[ARM] move include/asm-arm to arch/arm/include/asm

Move platform independent header files to arch/arm/include/asm, leaving
those in asm/arch* and asm/plat* alone.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
new file mode 100644
index 0000000..73237bd
--- /dev/null
+++ b/arch/arm/include/asm/Kbuild
@@ -0,0 +1,3 @@
+include include/asm-generic/Kbuild.asm
+
+unifdef-y += hwcap.h
diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h
new file mode 100644
index 0000000..93d04ac
--- /dev/null
+++ b/arch/arm/include/asm/a.out-core.h
@@ -0,0 +1,49 @@
+/* a.out coredump register dumper
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_A_OUT_CORE_H
+#define _ASM_A_OUT_CORE_H
+
+#ifdef __KERNEL__
+
+#include <linux/user.h>
+#include <linux/elfcore.h>
+
+/*
+ * fill in the user structure for an a.out core dump
+ */
+static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
+{
+	struct task_struct *tsk = current;
+
+	dump->magic = CMAGIC;
+	dump->start_code = tsk->mm->start_code;
+	dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
+
+	dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
+	dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	dump->u_ssize = 0;
+
+	dump->u_debugreg[0] = tsk->thread.debug.bp[0].address;
+	dump->u_debugreg[1] = tsk->thread.debug.bp[1].address;
+	dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm;
+	dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm;
+	dump->u_debugreg[4] = tsk->thread.debug.nsaved;
+
+	if (dump->start_stack < 0x04000000)
+		dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
+
+	dump->regs = *regs;
+	dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_A_OUT_CORE_H */
diff --git a/arch/arm/include/asm/a.out.h b/arch/arm/include/asm/a.out.h
new file mode 100644
index 0000000..79489fd
--- /dev/null
+++ b/arch/arm/include/asm/a.out.h
@@ -0,0 +1,34 @@
+#ifndef __ARM_A_OUT_H__
+#define __ARM_A_OUT_H__
+
+#include <linux/personality.h>
+#include <asm/types.h>
+
+struct exec
+{
+  __u32 a_info;		/* Use macros N_MAGIC, etc for access */
+  __u32 a_text;		/* length of text, in bytes */
+  __u32 a_data;		/* length of data, in bytes */
+  __u32 a_bss;		/* length of uninitialized data area for file, in bytes */
+  __u32 a_syms;		/* length of symbol table data in file, in bytes */
+  __u32 a_entry;	/* start address */
+  __u32 a_trsize;	/* length of relocation info for text, in bytes */
+  __u32 a_drsize;	/* length of relocation info for data, in bytes */
+};
+
+/*
+ * This is always the same
+ */
+#define N_TXTADDR(a)	(0x00008000)
+
+#define N_TRSIZE(a)	((a).a_trsize)
+#define N_DRSIZE(a)	((a).a_drsize)
+#define N_SYMSIZE(a)	((a).a_syms)
+
+#define M_ARM 103
+
+#ifndef LIBRARY_START_TEXT
+#define LIBRARY_START_TEXT	(0x00c00000)
+#endif
+
+#endif /* __A_OUT_GNU_H__ */
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
new file mode 100644
index 0000000..6116e48
--- /dev/null
+++ b/arch/arm/include/asm/assembler.h
@@ -0,0 +1,116 @@
+/*
+ *  arch/arm/include/asm/assembler.h
+ *
+ *  Copyright (C) 1996-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  This file contains arm architecture specific defines
+ *  for the different processors.
+ *
+ *  Do not include any C declarations in this file - it is included by
+ *  assembler source.
+ */
+#ifndef __ASSEMBLY__
+#error "Only include this from assembly code"
+#endif
+
+#include <asm/ptrace.h>
+
+/*
+ * Endian independent macros for shifting bytes within registers.
+ */
+#ifndef __ARMEB__
+#define pull            lsr
+#define push            lsl
+#define get_byte_0      lsl #0
+#define get_byte_1	lsr #8
+#define get_byte_2	lsr #16
+#define get_byte_3	lsr #24
+#define put_byte_0      lsl #0
+#define put_byte_1	lsl #8
+#define put_byte_2	lsl #16
+#define put_byte_3	lsl #24
+#else
+#define pull            lsl
+#define push            lsr
+#define get_byte_0	lsr #24
+#define get_byte_1	lsr #16
+#define get_byte_2	lsr #8
+#define get_byte_3      lsl #0
+#define put_byte_0	lsl #24
+#define put_byte_1	lsl #16
+#define put_byte_2	lsl #8
+#define put_byte_3      lsl #0
+#endif
+
+/*
+ * Data preload for architectures that support it
+ */
+#if __LINUX_ARM_ARCH__ >= 5
+#define PLD(code...)	code
+#else
+#define PLD(code...)
+#endif
+
+/*
+ * This can be used to enable code to cacheline align the destination
+ * pointer when bulk writing to memory.  Experiments on StrongARM and
+ * XScale didn't show this a worthwhile thing to do when the cache is not
+ * set to write-allocate (this would need further testing on XScale when WA
+ * is used).
+ *
+ * On Feroceon there is much to gain however, regardless of cache mode.
+ */
+#ifdef CONFIG_CPU_FEROCEON
+#define CALGN(code...) code
+#else
+#define CALGN(code...)
+#endif
+
+/*
+ * Enable and disable interrupts
+ */
+#if __LINUX_ARM_ARCH__ >= 6
+	.macro	disable_irq
+	cpsid	i
+	.endm
+
+	.macro	enable_irq
+	cpsie	i
+	.endm
+#else
+	.macro	disable_irq
+	msr	cpsr_c, #PSR_I_BIT | SVC_MODE
+	.endm
+
+	.macro	enable_irq
+	msr	cpsr_c, #SVC_MODE
+	.endm
+#endif
+
+/*
+ * Save the current IRQ state and disable IRQs.  Note that this macro
+ * assumes FIQs are enabled, and that the processor is in SVC mode.
+ */
+	.macro	save_and_disable_irqs, oldcpsr
+	mrs	\oldcpsr, cpsr
+	disable_irq
+	.endm
+
+/*
+ * Restore interrupt state previously stored in a register.  We don't
+ * guarantee that this will preserve the flags.
+ */
+	.macro	restore_irqs, oldcpsr
+	msr	cpsr_c, \oldcpsr
+	.endm
+
+#define USER(x...)				\
+9999:	x;					\
+	.section __ex_table,"a";		\
+	.align	3;				\
+	.long	9999b,9001f;			\
+	.previous
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
new file mode 100644
index 0000000..325f881
--- /dev/null
+++ b/arch/arm/include/asm/atomic.h
@@ -0,0 +1,212 @@
+/*
+ *  arch/arm/include/asm/atomic.h
+ *
+ *  Copyright (C) 1996 Russell King.
+ *  Copyright (C) 2002 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARM_ATOMIC_H
+#define __ASM_ARM_ATOMIC_H
+
+#include <linux/compiler.h>
+#include <asm/system.h>
+
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i)	{ (i) }
+
+#ifdef __KERNEL__
+
+#define atomic_read(v)	((v)->counter)
+
+#if __LINUX_ARM_ARCH__ >= 6
+
+/*
+ * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
+ * store exclusive to ensure that these are atomic.  We may loop
+ * to ensure that the update happens.  Writing to 'v->counter'
+ * without using the following operations WILL break the atomic
+ * nature of these ops.
+ */
+static inline void atomic_set(atomic_t *v, int i)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__("@ atomic_set\n"
+"1:	ldrex	%0, [%1]\n"
+"	strex	%0, %2, [%1]\n"
+"	teq	%0, #0\n"
+"	bne	1b"
+	: "=&r" (tmp)
+	: "r" (&v->counter), "r" (i)
+	: "cc");
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+	unsigned long tmp;
+	int result;
+
+	__asm__ __volatile__("@ atomic_add_return\n"
+"1:	ldrex	%0, [%2]\n"
+"	add	%0, %0, %3\n"
+"	strex	%1, %0, [%2]\n"
+"	teq	%1, #0\n"
+"	bne	1b"
+	: "=&r" (result), "=&r" (tmp)
+	: "r" (&v->counter), "Ir" (i)
+	: "cc");
+
+	return result;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+	unsigned long tmp;
+	int result;
+
+	__asm__ __volatile__("@ atomic_sub_return\n"
+"1:	ldrex	%0, [%2]\n"
+"	sub	%0, %0, %3\n"
+"	strex	%1, %0, [%2]\n"
+"	teq	%1, #0\n"
+"	bne	1b"
+	: "=&r" (result), "=&r" (tmp)
+	: "r" (&v->counter), "Ir" (i)
+	: "cc");
+
+	return result;
+}
+
+static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+{
+	unsigned long oldval, res;
+
+	do {
+		__asm__ __volatile__("@ atomic_cmpxchg\n"
+		"ldrex	%1, [%2]\n"
+		"mov	%0, #0\n"
+		"teq	%1, %3\n"
+		"strexeq %0, %4, [%2]\n"
+		    : "=&r" (res), "=&r" (oldval)
+		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
+		    : "cc");
+	} while (res);
+
+	return oldval;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+	unsigned long tmp, tmp2;
+
+	__asm__ __volatile__("@ atomic_clear_mask\n"
+"1:	ldrex	%0, [%2]\n"
+"	bic	%0, %0, %3\n"
+"	strex	%1, %0, [%2]\n"
+"	teq	%1, #0\n"
+"	bne	1b"
+	: "=&r" (tmp), "=&r" (tmp2)
+	: "r" (addr), "Ir" (mask)
+	: "cc");
+}
+
+#else /* ARM_ARCH_6 */
+
+#include <asm/system.h>
+
+#ifdef CONFIG_SMP
+#error SMP not supported on pre-ARMv6 CPUs
+#endif
+
+#define atomic_set(v,i)	(((v)->counter) = (i))
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+	unsigned long flags;
+	int val;
+
+	raw_local_irq_save(flags);
+	val = v->counter;
+	v->counter = val += i;
+	raw_local_irq_restore(flags);
+
+	return val;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+	unsigned long flags;
+	int val;
+
+	raw_local_irq_save(flags);
+	val = v->counter;
+	v->counter = val -= i;
+	raw_local_irq_restore(flags);
+
+	return val;
+}
+
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+	int ret;
+	unsigned long flags;
+
+	raw_local_irq_save(flags);
+	ret = v->counter;
+	if (likely(ret == old))
+		v->counter = new;
+	raw_local_irq_restore(flags);
+
+	return ret;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+	unsigned long flags;
+
+	raw_local_irq_save(flags);
+	*addr &= ~mask;
+	raw_local_irq_restore(flags);
+}
+
+#endif /* __LINUX_ARM_ARCH__ */
+
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
+{
+	int c, old;
+
+	c = atomic_read(v);
+	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
+		c = old;
+	return c != u;
+}
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+#define atomic_add(i, v)	(void) atomic_add_return(i, v)
+#define atomic_inc(v)		(void) atomic_add_return(1, v)
+#define atomic_sub(i, v)	(void) atomic_sub_return(i, v)
+#define atomic_dec(v)		(void) atomic_sub_return(1, v)
+
+#define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
+#define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
+#define atomic_inc_return(v)    (atomic_add_return(1, v))
+#define atomic_dec_return(v)    (atomic_sub_return(1, v))
+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+
+#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
+
+/* Atomic operations are already serializing on ARM */
+#define smp_mb__before_atomic_dec()	barrier()
+#define smp_mb__after_atomic_dec()	barrier()
+#define smp_mb__before_atomic_inc()	barrier()
+#define smp_mb__after_atomic_inc()	barrier()
+
+#include <asm-generic/atomic.h>
+#endif
+#endif
diff --git a/arch/arm/include/asm/auxvec.h b/arch/arm/include/asm/auxvec.h
new file mode 100644
index 0000000..c0536f6
--- /dev/null
+++ b/arch/arm/include/asm/auxvec.h
@@ -0,0 +1,4 @@
+#ifndef __ASMARM_AUXVEC_H
+#define __ASMARM_AUXVEC_H
+
+#endif
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
new file mode 100644
index 0000000..9a1db20
--- /dev/null
+++ b/arch/arm/include/asm/bitops.h
@@ -0,0 +1,340 @@
+/*
+ * Copyright 1995, Russell King.
+ * Various bits and pieces copyrights include:
+ *  Linus Torvalds (test_bit).
+ * Big endian support: Copyright 2001, Nicolas Pitre
+ *  reworked by rmk.
+ *
+ * bit 0 is the LSB of an "unsigned long" quantity.
+ *
+ * Please note that the code in this file should never be included
+ * from user space.  Many of these are not implemented in assembler
+ * since they would be too costly.  Also, they require privileged
+ * instructions (which are not available from user mode) to ensure
+ * that they are atomic.
+ */
+
+#ifndef __ASM_ARM_BITOPS_H
+#define __ASM_ARM_BITOPS_H
+
+#ifdef __KERNEL__
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <linux/compiler.h>
+#include <asm/system.h>
+
+#define smp_mb__before_clear_bit()	mb()
+#define smp_mb__after_clear_bit()	mb()
+
+/*
+ * These functions are the basis of our bit ops.
+ *
+ * First, the atomic bitops. These use native endian.
+ */
+static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
+{
+	unsigned long flags;
+	unsigned long mask = 1UL << (bit & 31);
+
+	p += bit >> 5;
+
+	raw_local_irq_save(flags);
+	*p |= mask;
+	raw_local_irq_restore(flags);
+}
+
+static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+	unsigned long flags;
+	unsigned long mask = 1UL << (bit & 31);
+
+	p += bit >> 5;
+
+	raw_local_irq_save(flags);
+	*p &= ~mask;
+	raw_local_irq_restore(flags);
+}
+
+static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
+{
+	unsigned long flags;
+	unsigned long mask = 1UL << (bit & 31);
+
+	p += bit >> 5;
+
+	raw_local_irq_save(flags);
+	*p ^= mask;
+	raw_local_irq_restore(flags);
+}
+
+static inline int
+____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
+{
+	unsigned long flags;
+	unsigned int res;
+	unsigned long mask = 1UL << (bit & 31);
+
+	p += bit >> 5;
+
+	raw_local_irq_save(flags);
+	res = *p;
+	*p = res | mask;
+	raw_local_irq_restore(flags);
+
+	return res & mask;
+}
+
+static inline int
+____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+	unsigned long flags;
+	unsigned int res;
+	unsigned long mask = 1UL << (bit & 31);
+
+	p += bit >> 5;
+
+	raw_local_irq_save(flags);
+	res = *p;
+	*p = res & ~mask;
+	raw_local_irq_restore(flags);
+
+	return res & mask;
+}
+
+static inline int
+____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
+{
+	unsigned long flags;
+	unsigned int res;
+	unsigned long mask = 1UL << (bit & 31);
+
+	p += bit >> 5;
+
+	raw_local_irq_save(flags);
+	res = *p;
+	*p = res ^ mask;
+	raw_local_irq_restore(flags);
+
+	return res & mask;
+}
+
+#include <asm-generic/bitops/non-atomic.h>
+
+/*
+ *  A note about Endian-ness.
+ *  -------------------------
+ *
+ * When the ARM is put into big endian mode via CR15, the processor
+ * merely swaps the order of bytes within words, thus:
+ *
+ *          ------------ physical data bus bits -----------
+ *          D31 ... D24  D23 ... D16  D15 ... D8  D7 ... D0
+ * little     byte 3       byte 2       byte 1      byte 0
+ * big        byte 0       byte 1       byte 2      byte 3
+ *
+ * This means that reading a 32-bit word at address 0 returns the same
+ * value irrespective of the endian mode bit.
+ *
+ * Peripheral devices should be connected with the data bus reversed in
+ * "Big Endian" mode.  ARM Application Note 61 is applicable, and is
+ * available from http://www.arm.com/.
+ *
+ * The following assumes that the data bus connectivity for big endian
+ * mode has been followed.
+ *
+ * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0.
+ */
+
+/*
+ * Little endian assembly bitops.  nr = 0 -> byte 0 bit 0.
+ */
+extern void _set_bit_le(int nr, volatile unsigned long * p);
+extern void _clear_bit_le(int nr, volatile unsigned long * p);
+extern void _change_bit_le(int nr, volatile unsigned long * p);
+extern int _test_and_set_bit_le(int nr, volatile unsigned long * p);
+extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p);
+extern int _test_and_change_bit_le(int nr, volatile unsigned long * p);
+extern int _find_first_zero_bit_le(const void * p, unsigned size);
+extern int _find_next_zero_bit_le(const void * p, int size, int offset);
+extern int _find_first_bit_le(const unsigned long *p, unsigned size);
+extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
+
+/*
+ * Big endian assembly bitops.  nr = 0 -> byte 3 bit 0.
+ */
+extern void _set_bit_be(int nr, volatile unsigned long * p);
+extern void _clear_bit_be(int nr, volatile unsigned long * p);
+extern void _change_bit_be(int nr, volatile unsigned long * p);
+extern int _test_and_set_bit_be(int nr, volatile unsigned long * p);
+extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p);
+extern int _test_and_change_bit_be(int nr, volatile unsigned long * p);
+extern int _find_first_zero_bit_be(const void * p, unsigned size);
+extern int _find_next_zero_bit_be(const void * p, int size, int offset);
+extern int _find_first_bit_be(const unsigned long *p, unsigned size);
+extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
+
+#ifndef CONFIG_SMP
+/*
+ * The __* form of bitops are non-atomic and may be reordered.
+ */
+#define	ATOMIC_BITOP_LE(name,nr,p)		\
+	(__builtin_constant_p(nr) ?		\
+	 ____atomic_##name(nr, p) :		\
+	 _##name##_le(nr,p))
+
+#define	ATOMIC_BITOP_BE(name,nr,p)		\
+	(__builtin_constant_p(nr) ?		\
+	 ____atomic_##name(nr, p) :		\
+	 _##name##_be(nr,p))
+#else
+#define ATOMIC_BITOP_LE(name,nr,p)	_##name##_le(nr,p)
+#define ATOMIC_BITOP_BE(name,nr,p)	_##name##_be(nr,p)
+#endif
+
+#define NONATOMIC_BITOP(name,nr,p)		\
+	(____nonatomic_##name(nr, p))
+
+#ifndef __ARMEB__
+/*
+ * These are the little endian, atomic definitions.
+ */
+#define set_bit(nr,p)			ATOMIC_BITOP_LE(set_bit,nr,p)
+#define clear_bit(nr,p)			ATOMIC_BITOP_LE(clear_bit,nr,p)
+#define change_bit(nr,p)		ATOMIC_BITOP_LE(change_bit,nr,p)
+#define test_and_set_bit(nr,p)		ATOMIC_BITOP_LE(test_and_set_bit,nr,p)
+#define test_and_clear_bit(nr,p)	ATOMIC_BITOP_LE(test_and_clear_bit,nr,p)
+#define test_and_change_bit(nr,p)	ATOMIC_BITOP_LE(test_and_change_bit,nr,p)
+#define find_first_zero_bit(p,sz)	_find_first_zero_bit_le(p,sz)
+#define find_next_zero_bit(p,sz,off)	_find_next_zero_bit_le(p,sz,off)
+#define find_first_bit(p,sz)		_find_first_bit_le(p,sz)
+#define find_next_bit(p,sz,off)		_find_next_bit_le(p,sz,off)
+
+#define WORD_BITOFF_TO_LE(x)		((x))
+
+#else
+
+/*
+ * These are the big endian, atomic definitions.
+ */
+#define set_bit(nr,p)			ATOMIC_BITOP_BE(set_bit,nr,p)
+#define clear_bit(nr,p)			ATOMIC_BITOP_BE(clear_bit,nr,p)
+#define change_bit(nr,p)		ATOMIC_BITOP_BE(change_bit,nr,p)
+#define test_and_set_bit(nr,p)		ATOMIC_BITOP_BE(test_and_set_bit,nr,p)
+#define test_and_clear_bit(nr,p)	ATOMIC_BITOP_BE(test_and_clear_bit,nr,p)
+#define test_and_change_bit(nr,p)	ATOMIC_BITOP_BE(test_and_change_bit,nr,p)
+#define find_first_zero_bit(p,sz)	_find_first_zero_bit_be(p,sz)
+#define find_next_zero_bit(p,sz,off)	_find_next_zero_bit_be(p,sz,off)
+#define find_first_bit(p,sz)		_find_first_bit_be(p,sz)
+#define find_next_bit(p,sz,off)		_find_next_bit_be(p,sz,off)
+
+#define WORD_BITOFF_TO_LE(x)		((x) ^ 0x18)
+
+#endif
+
+#if __LINUX_ARM_ARCH__ < 5
+
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/ffs.h>
+
+#else
+
+static inline int constant_fls(int x)
+{
+	int r = 32;
+
+	if (!x)
+		return 0;
+	if (!(x & 0xffff0000u)) {
+		x <<= 16;
+		r -= 16;
+	}
+	if (!(x & 0xff000000u)) {
+		x <<= 8;
+		r -= 8;
+	}
+	if (!(x & 0xf0000000u)) {
+		x <<= 4;
+		r -= 4;
+	}
+	if (!(x & 0xc0000000u)) {
+		x <<= 2;
+		r -= 2;
+	}
+	if (!(x & 0x80000000u)) {
+		x <<= 1;
+		r -= 1;
+	}
+	return r;
+}
+
+/*
+ * On ARMv5 and above those functions can be implemented around
+ * the clz instruction for much better code efficiency.
+ */
+
+#define __fls(x) \
+	( __builtin_constant_p(x) ? constant_fls(x) : \
+	  ({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) )
+
+/* Implement fls() in C so that 64-bit args are suitably truncated */
+static inline int fls(int x)
+{
+	return __fls(x);
+}
+
+#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
+#define __ffs(x) (ffs(x) - 1)
+#define ffz(x) __ffs( ~(x) )
+
+#endif
+
+#include <asm-generic/bitops/fls64.h>
+
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+
+/*
+ * Ext2 is defined to use little-endian byte ordering.
+ * These do not need to be atomic.
+ */
+#define ext2_set_bit(nr,p)			\
+		__test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
+#define ext2_set_bit_atomic(lock,nr,p)          \
+                test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
+#define ext2_clear_bit(nr,p)			\
+		__test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
+#define ext2_clear_bit_atomic(lock,nr,p)        \
+                test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
+#define ext2_test_bit(nr,p)			\
+		test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
+#define ext2_find_first_zero_bit(p,sz)		\
+		_find_first_zero_bit_le(p,sz)
+#define ext2_find_next_zero_bit(p,sz,off)	\
+		_find_next_zero_bit_le(p,sz,off)
+#define ext2_find_next_bit(p, sz, off) \
+		_find_next_bit_le(p, sz, off)
+
+/*
+ * Minix is defined to use little-endian byte ordering.
+ * These do not need to be atomic.
+ */
+#define minix_set_bit(nr,p)			\
+		__set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
+#define minix_test_bit(nr,p)			\
+		test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
+#define minix_test_and_set_bit(nr,p)		\
+		__test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
+#define minix_test_and_clear_bit(nr,p)		\
+		__test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
+#define minix_find_first_zero_bit(p,sz)		\
+		_find_first_zero_bit_le(p,sz)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ARM_BITOPS_H */
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
new file mode 100644
index 0000000..7b62351
--- /dev/null
+++ b/arch/arm/include/asm/bug.h
@@ -0,0 +1,24 @@
+#ifndef _ASMARM_BUG_H
+#define _ASMARM_BUG_H
+
+
+#ifdef CONFIG_BUG
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+extern void __bug(const char *file, int line) __attribute__((noreturn));
+
+/* give file/line information */
+#define BUG()		__bug(__FILE__, __LINE__)
+
+#else
+
+/* this just causes an oops */
+#define BUG()		(*(int *)0 = 0)
+
+#endif
+
+#define HAVE_ARCH_BUG
+#endif
+
+#include <asm-generic/bug.h>
+
+#endif
diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h
new file mode 100644
index 0000000..a97f1ea
--- /dev/null
+++ b/arch/arm/include/asm/bugs.h
@@ -0,0 +1,21 @@
+/*
+ *  arch/arm/include/asm/bugs.h
+ *
+ *  Copyright (C) 1995-2003 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_BUGS_H
+#define __ASM_BUGS_H
+
+#ifdef CONFIG_MMU
+extern void check_writebuffer_bugs(void);
+
+#define check_bugs() check_writebuffer_bugs()
+#else
+#define check_bugs() do { } while (0)
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/byteorder.h b/arch/arm/include/asm/byteorder.h
new file mode 100644
index 0000000..4fbfb22
--- /dev/null
+++ b/arch/arm/include/asm/byteorder.h
@@ -0,0 +1,58 @@
+/*
+ *  arch/arm/include/asm/byteorder.h
+ *
+ * ARM Endian-ness.  In little endian mode, the data bus is connected such
+ * that byte accesses appear as:
+ *  0 = d0...d7, 1 = d8...d15, 2 = d16...d23, 3 = d24...d31
+ * and word accesses (data or instruction) appear as:
+ *  d0...d31
+ *
+ * When in big endian mode, byte accesses appear as:
+ *  0 = d24...d31, 1 = d16...d23, 2 = d8...d15, 3 = d0...d7
+ * and word accesses (data or instruction) appear as:
+ *  d0...d31
+ */
+#ifndef __ASM_ARM_BYTEORDER_H
+#define __ASM_ARM_BYTEORDER_H
+
+#include <linux/compiler.h>
+#include <asm/types.h>
+
+static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
+{
+	__u32 t;
+
+#ifndef __thumb__
+	if (!__builtin_constant_p(x)) {
+		/*
+		 * The compiler needs a bit of a hint here to always do the
+		 * right thing and not screw it up to different degrees
+		 * depending on the gcc version.
+		 */
+		asm ("eor\t%0, %1, %1, ror #16" : "=r" (t) : "r" (x));
+	} else
+#endif
+		t = x ^ ((x << 16) | (x >> 16)); /* eor r1,r0,r0,ror #16 */
+
+	x = (x << 24) | (x >> 8);		/* mov r0,r0,ror #8      */
+	t &= ~0x00FF0000;			/* bic r1,r1,#0x00FF0000 */
+	x ^= (t >> 8);				/* eor r0,r0,r1,lsr #8   */
+
+	return x;
+}
+
+#define __arch__swab32(x) ___arch__swab32(x)
+
+#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+#  define __BYTEORDER_HAS_U64__
+#  define __SWAB_64_THRU_32__
+#endif
+
+#ifdef __ARMEB__
+#include <linux/byteorder/big_endian.h>
+#else
+#include <linux/byteorder/little_endian.h>
+#endif
+
+#endif
+
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
new file mode 100644
index 0000000..cb7a9e9
--- /dev/null
+++ b/arch/arm/include/asm/cache.h
@@ -0,0 +1,10 @@
+/*
+ *  arch/arm/include/asm/cache.h
+ */
+#ifndef __ASMARM_CACHE_H
+#define __ASMARM_CACHE_H
+
+#define L1_CACHE_SHIFT		5
+#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+
+#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
new file mode 100644
index 0000000..9073d9c
--- /dev/null
+++ b/arch/arm/include/asm/cacheflush.h
@@ -0,0 +1,537 @@
+/*
+ *  arch/arm/include/asm/cacheflush.h
+ *
+ *  Copyright (C) 1999-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_CACHEFLUSH_H
+#define _ASMARM_CACHEFLUSH_H
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/glue.h>
+#include <asm/shmparam.h>
+
+#define CACHE_COLOUR(vaddr)	((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
+
+/*
+ *	Cache Model
+ *	===========
+ */
+#undef _CACHE
+#undef MULTI_CACHE
+
+#if defined(CONFIG_CPU_CACHE_V3)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE v3
+# endif
+#endif
+
+#if defined(CONFIG_CPU_CACHE_V4)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE v4
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
+    defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020)
+# define MULTI_CACHE 1
+#endif
+
+#if defined(CONFIG_CPU_ARM926T)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE arm926
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM940T)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE arm940
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM946E)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE arm946
+# endif
+#endif
+
+#if defined(CONFIG_CPU_CACHE_V4WB)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE v4wb
+# endif
+#endif
+
+#if defined(CONFIG_CPU_XSCALE)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE xscale
+# endif
+#endif
+
+#if defined(CONFIG_CPU_XSC3)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE xsc3
+# endif
+#endif
+
+#if defined(CONFIG_CPU_FEROCEON)
+# define MULTI_CACHE 1
+#endif
+
+#if defined(CONFIG_CPU_V6)
+//# ifdef _CACHE
+#  define MULTI_CACHE 1
+//# else
+//#  define _CACHE v6
+//# endif
+#endif
+
+#if defined(CONFIG_CPU_V7)
+//# ifdef _CACHE
+#  define MULTI_CACHE 1
+//# else
+//#  define _CACHE v7
+//# endif
+#endif
+
+#if !defined(_CACHE) && !defined(MULTI_CACHE)
+#error Unknown cache maintainence model
+#endif
+
+/*
+ * This flag is used to indicate that the page pointed to by a pte
+ * is dirty and requires cleaning before returning it to the user.
+ */
+#define PG_dcache_dirty PG_arch_1
+
+/*
+ *	MM Cache Management
+ *	===================
+ *
+ *	The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
+ *	implement these methods.
+ *
+ *	Start addresses are inclusive and end addresses are exclusive;
+ *	start addresses should be rounded down, end addresses up.
+ *
+ *	See Documentation/cachetlb.txt for more information.
+ *	Please note that the implementation of these, and the required
+ *	effects are cache-type (VIVT/VIPT/PIPT) specific.
+ *
+ *	flush_cache_kern_all()
+ *
+ *		Unconditionally clean and invalidate the entire cache.
+ *
+ *	flush_cache_user_mm(mm)
+ *
+ *		Clean and invalidate all user space cache entries
+ *		before a change of page tables.
+ *
+ *	flush_cache_user_range(start, end, flags)
+ *
+ *		Clean and invalidate a range of cache entries in the
+ *		specified address space before a change of page tables.
+ *		- start - user start address (inclusive, page aligned)
+ *		- end   - user end address   (exclusive, page aligned)
+ *		- flags - vma->vm_flags field
+ *
+ *	coherent_kern_range(start, end)
+ *
+ *		Ensure coherency between the Icache and the Dcache in the
+ *		region described by start, end.  If you have non-snooping
+ *		Harvard caches, you need to implement this function.
+ *		- start  - virtual start address
+ *		- end    - virtual end address
+ *
+ *	DMA Cache Coherency
+ *	===================
+ *
+ *	dma_inv_range(start, end)
+ *
+ *		Invalidate (discard) the specified virtual address range.
+ *		May not write back any entries.  If 'start' or 'end'
+ *		are not cache line aligned, those lines must be written
+ *		back.
+ *		- start  - virtual start address
+ *		- end    - virtual end address
+ *
+ *	dma_clean_range(start, end)
+ *
+ *		Clean (write back) the specified virtual address range.
+ *		- start  - virtual start address
+ *		- end    - virtual end address
+ *
+ *	dma_flush_range(start, end)
+ *
+ *		Clean and invalidate the specified virtual address range.
+ *		- start  - virtual start address
+ *		- end    - virtual end address
+ */
+
+struct cpu_cache_fns {
+	void (*flush_kern_all)(void);
+	void (*flush_user_all)(void);
+	void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
+
+	void (*coherent_kern_range)(unsigned long, unsigned long);
+	void (*coherent_user_range)(unsigned long, unsigned long);
+	void (*flush_kern_dcache_page)(void *);
+
+	void (*dma_inv_range)(const void *, const void *);
+	void (*dma_clean_range)(const void *, const void *);
+	void (*dma_flush_range)(const void *, const void *);
+};
+
+struct outer_cache_fns {
+	void (*inv_range)(unsigned long, unsigned long);
+	void (*clean_range)(unsigned long, unsigned long);
+	void (*flush_range)(unsigned long, unsigned long);
+};
+
+/*
+ * Select the calling method
+ */
+#ifdef MULTI_CACHE
+
+extern struct cpu_cache_fns cpu_cache;
+
+#define __cpuc_flush_kern_all		cpu_cache.flush_kern_all
+#define __cpuc_flush_user_all		cpu_cache.flush_user_all
+#define __cpuc_flush_user_range		cpu_cache.flush_user_range
+#define __cpuc_coherent_kern_range	cpu_cache.coherent_kern_range
+#define __cpuc_coherent_user_range	cpu_cache.coherent_user_range
+#define __cpuc_flush_dcache_page	cpu_cache.flush_kern_dcache_page
+
+/*
+ * These are private to the dma-mapping API.  Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+#define dmac_inv_range			cpu_cache.dma_inv_range
+#define dmac_clean_range		cpu_cache.dma_clean_range
+#define dmac_flush_range		cpu_cache.dma_flush_range
+
+#else
+
+#define __cpuc_flush_kern_all		__glue(_CACHE,_flush_kern_cache_all)
+#define __cpuc_flush_user_all		__glue(_CACHE,_flush_user_cache_all)
+#define __cpuc_flush_user_range		__glue(_CACHE,_flush_user_cache_range)
+#define __cpuc_coherent_kern_range	__glue(_CACHE,_coherent_kern_range)
+#define __cpuc_coherent_user_range	__glue(_CACHE,_coherent_user_range)
+#define __cpuc_flush_dcache_page	__glue(_CACHE,_flush_kern_dcache_page)
+
+extern void __cpuc_flush_kern_all(void);
+extern void __cpuc_flush_user_all(void);
+extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
+extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
+extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
+extern void __cpuc_flush_dcache_page(void *);
+
+/*
+ * These are private to the dma-mapping API.  Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+#define dmac_inv_range			__glue(_CACHE,_dma_inv_range)
+#define dmac_clean_range		__glue(_CACHE,_dma_clean_range)
+#define dmac_flush_range		__glue(_CACHE,_dma_flush_range)
+
+extern void dmac_inv_range(const void *, const void *);
+extern void dmac_clean_range(const void *, const void *);
+extern void dmac_flush_range(const void *, const void *);
+
+#endif
+
+#ifdef CONFIG_OUTER_CACHE
+
+extern struct outer_cache_fns outer_cache;
+
+static inline void outer_inv_range(unsigned long start, unsigned long end)
+{
+	if (outer_cache.inv_range)
+		outer_cache.inv_range(start, end);
+}
+static inline void outer_clean_range(unsigned long start, unsigned long end)
+{
+	if (outer_cache.clean_range)
+		outer_cache.clean_range(start, end);
+}
+static inline void outer_flush_range(unsigned long start, unsigned long end)
+{
+	if (outer_cache.flush_range)
+		outer_cache.flush_range(start, end);
+}
+
+#else
+
+static inline void outer_inv_range(unsigned long start, unsigned long end)
+{ }
+static inline void outer_clean_range(unsigned long start, unsigned long end)
+{ }
+static inline void outer_flush_range(unsigned long start, unsigned long end)
+{ }
+
+#endif
+
+/*
+ * flush_cache_vmap() is used when creating mappings (eg, via vmap,
+ * vmalloc, ioremap etc) in kernel space for pages.  Since the
+ * direct-mappings of these pages may contain cached data, we need
+ * to do a full cache flush to ensure that writebacks don't corrupt
+ * data placed into these pages via the new mappings.
+ */
+#define flush_cache_vmap(start, end)		flush_cache_all()
+#define flush_cache_vunmap(start, end)		flush_cache_all()
+
+/*
+ * Copy user data from/to a page which is mapped into a different
+ * processes address space.  Really, we want to allow our "user
+ * space" model to handle this.
+ */
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+	do {							\
+		memcpy(dst, src, len);				\
+		flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
+	} while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+	do {							\
+		memcpy(dst, src, len);				\
+	} while (0)
+
+/*
+ * Convert calls to our calling convention.
+ */
+#define flush_cache_all()		__cpuc_flush_kern_all()
+#ifndef CONFIG_CPU_CACHE_VIPT
+static inline void flush_cache_mm(struct mm_struct *mm)
+{
+	if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
+		__cpuc_flush_user_all();
+}
+
+static inline void
+flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+	if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
+		__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
+					vma->vm_flags);
+}
+
+static inline void
+flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
+{
+	if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+		unsigned long addr = user_addr & PAGE_MASK;
+		__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
+	}
+}
+
+static inline void
+flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
+			 unsigned long uaddr, void *kaddr,
+			 unsigned long len, int write)
+{
+	if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+		unsigned long addr = (unsigned long)kaddr;
+		__cpuc_coherent_kern_range(addr, addr + len);
+	}
+}
+#else
+extern void flush_cache_mm(struct mm_struct *mm);
+extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
+extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
+				unsigned long uaddr, void *kaddr,
+				unsigned long len, int write);
+#endif
+
+#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+
+/*
+ * flush_cache_user_range is used when we want to ensure that the
+ * Harvard caches are synchronised for the user space address range.
+ * This is used for the ARM private sys_cacheflush system call.
+ */
+#define flush_cache_user_range(vma,start,end) \
+	__cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
+
+/*
+ * Perform necessary cache operations to ensure that data previously
+ * stored within this range of addresses can be executed by the CPU.
+ */
+#define flush_icache_range(s,e)		__cpuc_coherent_kern_range(s,e)
+
+/*
+ * Perform necessary cache operations to ensure that the TLB will
+ * see data written in the specified area.
+ */
+#define clean_dcache_area(start,size)	cpu_dcache_clean_area(start, size)
+
+/*
+ * flush_dcache_page is used when the kernel has written to the page
+ * cache page at virtual address page->virtual.
+ *
+ * If this page isn't mapped (ie, page_mapping == NULL), or it might
+ * have userspace mappings, then we _must_ always clean + invalidate
+ * the dcache entries associated with the kernel mapping.
+ *
+ * Otherwise we can defer the operation, and clean the cache when we are
+ * about to change to user space.  This is the same method as used on SPARC64.
+ * See update_mmu_cache for the user space part.
+ */
+extern void flush_dcache_page(struct page *);
+
+extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
+
+static inline void __flush_icache_all(void)
+{
+	asm("mcr	p15, 0, %0, c7, c5, 0	@ invalidate I-cache\n"
+	    :
+	    : "r" (0));
+}
+
+#define ARCH_HAS_FLUSH_ANON_PAGE
+static inline void flush_anon_page(struct vm_area_struct *vma,
+			 struct page *page, unsigned long vmaddr)
+{
+	extern void __flush_anon_page(struct vm_area_struct *vma,
+				struct page *, unsigned long);
+	if (PageAnon(page))
+		__flush_anon_page(vma, page, vmaddr);
+}
+
+#define flush_dcache_mmap_lock(mapping) \
+	spin_lock_irq(&(mapping)->tree_lock)
+#define flush_dcache_mmap_unlock(mapping) \
+	spin_unlock_irq(&(mapping)->tree_lock)
+
+#define flush_icache_user_range(vma,page,addr,len) \
+	flush_dcache_page(page)
+
+/*
+ * We don't appear to need to do anything here.  In fact, if we did, we'd
+ * duplicate cache flushing elsewhere performed by flush_dcache_page().
+ */
+#define flush_icache_page(vma,page)	do { } while (0)
+
+static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
+	unsigned offset, size_t size)
+{
+	const void *start = (void __force *)virt + offset;
+	dmac_inv_range(start, start + size);
+}
+
+#define __cacheid_present(val)			(val != read_cpuid(CPUID_ID))
+#define __cacheid_type_v7(val)			((val & (7 << 29)) == (4 << 29))
+
+#define __cacheid_vivt_prev7(val)		((val & (15 << 25)) != (14 << 25))
+#define __cacheid_vipt_prev7(val)		((val & (15 << 25)) == (14 << 25))
+#define __cacheid_vipt_nonaliasing_prev7(val)	((val & (15 << 25 | 1 << 23)) == (14 << 25))
+#define __cacheid_vipt_aliasing_prev7(val)	((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23))
+
+#define __cacheid_vivt(val)			(__cacheid_type_v7(val) ? 0 : __cacheid_vivt_prev7(val))
+#define __cacheid_vipt(val)			(__cacheid_type_v7(val) ? 1 : __cacheid_vipt_prev7(val))
+#define __cacheid_vipt_nonaliasing(val)		(__cacheid_type_v7(val) ? 1 : __cacheid_vipt_nonaliasing_prev7(val))
+#define __cacheid_vipt_aliasing(val)		(__cacheid_type_v7(val) ? 0 : __cacheid_vipt_aliasing_prev7(val))
+#define __cacheid_vivt_asid_tagged_instr(val)	(__cacheid_type_v7(val) ? ((val & (3 << 14)) == (1 << 14)) : 0)
+
+#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
+/*
+ * VIVT caches only
+ */
+#define cache_is_vivt()			1
+#define cache_is_vipt()			0
+#define cache_is_vipt_nonaliasing()	0
+#define cache_is_vipt_aliasing()	0
+#define icache_is_vivt_asid_tagged()	0
+
+#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT)
+/*
+ * VIPT caches only
+ */
+#define cache_is_vivt()			0
+#define cache_is_vipt()			1
+#define cache_is_vipt_nonaliasing()					\
+	({								\
+		unsigned int __val = read_cpuid(CPUID_CACHETYPE);	\
+		__cacheid_vipt_nonaliasing(__val);			\
+	})
+
+#define cache_is_vipt_aliasing()					\
+	({								\
+		unsigned int __val = read_cpuid(CPUID_CACHETYPE);	\
+		__cacheid_vipt_aliasing(__val);				\
+	})
+
+#define icache_is_vivt_asid_tagged()					\
+	({								\
+		unsigned int __val = read_cpuid(CPUID_CACHETYPE);	\
+		__cacheid_vivt_asid_tagged_instr(__val);		\
+	})
+
+#else
+/*
+ * VIVT or VIPT caches.  Note that this is unreliable since ARM926
+ * and V6 CPUs satisfy the "(val & (15 << 25)) == (14 << 25)" test.
+ * There's no way to tell from the CacheType register what type (!)
+ * the cache is.
+ */
+#define cache_is_vivt()							\
+	({								\
+		unsigned int __val = read_cpuid(CPUID_CACHETYPE);	\
+		(!__cacheid_present(__val)) || __cacheid_vivt(__val);	\
+	})
+		
+#define cache_is_vipt()							\
+	({								\
+		unsigned int __val = read_cpuid(CPUID_CACHETYPE);	\
+		__cacheid_present(__val) && __cacheid_vipt(__val);	\
+	})
+
+#define cache_is_vipt_nonaliasing()					\
+	({								\
+		unsigned int __val = read_cpuid(CPUID_CACHETYPE);	\
+		__cacheid_present(__val) &&				\
+		 __cacheid_vipt_nonaliasing(__val);			\
+	})
+
+#define cache_is_vipt_aliasing()					\
+	({								\
+		unsigned int __val = read_cpuid(CPUID_CACHETYPE);	\
+		__cacheid_present(__val) &&				\
+		 __cacheid_vipt_aliasing(__val);			\
+	})
+
+#define icache_is_vivt_asid_tagged()					\
+	({								\
+		unsigned int __val = read_cpuid(CPUID_CACHETYPE);	\
+		__cacheid_present(__val) &&				\
+		 __cacheid_vivt_asid_tagged_instr(__val);		\
+	})
+
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
new file mode 100644
index 0000000..6dcc164
--- /dev/null
+++ b/arch/arm/include/asm/checksum.h
@@ -0,0 +1,139 @@
+/*
+ *  arch/arm/include/asm/checksum.h
+ *
+ * IP checksum routines
+ *
+ * Copyright (C) Original authors of ../asm-i386/checksum.h
+ * Copyright (C) 1996-1999 Russell King
+ */
+#ifndef __ASM_ARM_CHECKSUM_H
+#define __ASM_ARM_CHECKSUM_H
+
+#include <linux/in6.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+__wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums, and handles user-space pointer exceptions correctly, when needed.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
+
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
+
+/*
+ * 	Fold a partial checksum without adding pseudo headers
+ */
+static inline __sum16 csum_fold(__wsum sum)
+{
+	__asm__(
+	"add	%0, %1, %1, ror #16	@ csum_fold"
+	: "=r" (sum)
+	: "r" (sum)
+	: "cc");
+	return (__force __sum16)(~(__force u32)sum >> 16);
+}
+
+/*
+ *	This is a version of ip_compute_csum() optimized for IP headers,
+ *	which always checksum on 4 octet boundaries.
+ */
+static inline __sum16
+ip_fast_csum(const void *iph, unsigned int ihl)
+{
+	unsigned int tmp1;
+	__wsum sum;
+
+	__asm__ __volatile__(
+	"ldr	%0, [%1], #4		@ ip_fast_csum		\n\
+	ldr	%3, [%1], #4					\n\
+	sub	%2, %2, #5					\n\
+	adds	%0, %0, %3					\n\
+	ldr	%3, [%1], #4					\n\
+	adcs	%0, %0, %3					\n\
+	ldr	%3, [%1], #4					\n\
+1:	adcs	%0, %0, %3					\n\
+	ldr	%3, [%1], #4					\n\
+	tst	%2, #15			@ do this carefully	\n\
+	subne	%2, %2, #1		@ without destroying	\n\
+	bne	1b			@ the carry flag	\n\
+	adcs	%0, %0, %3					\n\
+	adc	%0, %0, #0"
+	: "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1)
+	: "1" (iph), "2" (ihl)
+	: "cc", "memory");
+	return csum_fold(sum);
+}
+
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+		   unsigned short proto, __wsum sum)
+{
+	__asm__(
+	"adds	%0, %1, %2		@ csum_tcpudp_nofold	\n\
+	adcs	%0, %0, %3					\n"
+#ifdef __ARMEB__
+	"adcs	%0, %0, %4					\n"
+#else
+	"adcs	%0, %0, %4, lsl #8				\n"
+#endif
+	"adcs	%0, %0, %5					\n\
+	adc	%0, %0, #0"
+	: "=&r"(sum)
+	: "r" (sum), "r" (daddr), "r" (saddr), "r" (len), "Ir" (htons(proto))
+	: "cc");
+	return sum;
+}	
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
+		  unsigned short proto, __wsum sum)
+{
+	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+static inline __sum16
+ip_compute_csum(const void *buff, int len)
+{
+	return csum_fold(csum_partial(buff, len, 0));
+}
+
+#define _HAVE_ARCH_IPV6_CSUM
+extern __wsum
+__csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __be32 len,
+		__be32 proto, __wsum sum);
+
+static inline __sum16
+csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __u32 len,
+		unsigned short proto, __wsum sum)
+{
+	return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len),
+					   htonl(proto), sum));
+}
+#endif
diff --git a/arch/arm/include/asm/cnt32_to_63.h b/arch/arm/include/asm/cnt32_to_63.h
new file mode 100644
index 0000000..480c873
--- /dev/null
+++ b/arch/arm/include/asm/cnt32_to_63.h
@@ -0,0 +1,78 @@
+/*
+ *  include/asm/cnt32_to_63.h -- extend a 32-bit counter to 63 bits
+ *
+ *  Author:	Nicolas Pitre
+ *  Created:	December 3, 2006
+ *  Copyright:	MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __INCLUDE_CNT32_TO_63_H__
+#define __INCLUDE_CNT32_TO_63_H__
+
+#include <linux/compiler.h>
+#include <asm/types.h>
+#include <asm/byteorder.h>
+
+/*
+ * Prototype: u64 cnt32_to_63(u32 cnt)
+ * Many hardware clock counters are only 32 bits wide and therefore have
+ * a relatively short period making wrap-arounds rather frequent.  This
+ * is a problem when implementing sched_clock() for example, where a 64-bit
+ * non-wrapping monotonic value is expected to be returned.
+ *
+ * To overcome that limitation, let's extend a 32-bit counter to 63 bits
+ * in a completely lock free fashion. Bits 0 to 31 of the clock are provided
+ * by the hardware while bits 32 to 62 are stored in memory.  The top bit in
+ * memory is used to synchronize with the hardware clock half-period.  When
+ * the top bit of both counters (hardware and in memory) differ then the
+ * memory is updated with a new value, incrementing it when the hardware
+ * counter wraps around.
+ *
+ * Because a word store in memory is atomic then the incremented value will
+ * always be in synch with the top bit indicating to any potential concurrent
+ * reader if the value in memory is up to date or not with regards to the
+ * needed increment.  And any race in updating the value in memory is harmless
+ * as the same value would simply be stored more than once.
+ *
+ * The only restriction for the algorithm to work properly is that this
+ * code must be executed at least once per each half period of the 32-bit
+ * counter to properly update the state bit in memory. This is usually not a
+ * problem in practice, but if it is then a kernel timer could be scheduled
+ * to manage for this code to be executed often enough.
+ *
+ * Note that the top bit (bit 63) in the returned value should be considered
+ * as garbage.  It is not cleared here because callers are likely to use a
+ * multiplier on the returned value which can get rid of the top bit
+ * implicitly by making the multiplier even, therefore saving on a runtime
+ * clear-bit instruction. Otherwise caller must remember to clear the top
+ * bit explicitly.
+ */
+
+/* this is used only to give gcc a clue about good code generation */
+typedef union {
+	struct {
+#if defined(__LITTLE_ENDIAN)
+		u32 lo, hi;
+#elif defined(__BIG_ENDIAN)
+		u32 hi, lo;
+#endif
+	};
+	u64 val;
+} cnt32_to_63_t;
+
+#define cnt32_to_63(cnt_lo) \
+({ \
+	static volatile u32 __m_cnt_hi = 0; \
+	cnt32_to_63_t __x; \
+	__x.hi = __m_cnt_hi; \
+	__x.lo = (cnt_lo); \
+ 	if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \
+		__m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \
+	__x.val; \
+})
+
+#endif
diff --git a/arch/arm/include/asm/cpu-multi32.h b/arch/arm/include/asm/cpu-multi32.h
new file mode 100644
index 0000000..e2b5b0b
--- /dev/null
+++ b/arch/arm/include/asm/cpu-multi32.h
@@ -0,0 +1,69 @@
+/*
+ *  arch/arm/include/asm/cpu-multi32.h
+ *
+ *  Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <asm/page.h>
+
+struct mm_struct;
+
+/*
+ * Don't change this structure - ASM code
+ * relies on it.
+ */
+extern struct processor {
+	/* MISC
+	 * get data abort address/flags
+	 */
+	void (*_data_abort)(unsigned long pc);
+	/*
+	 * Retrieve prefetch fault address
+	 */
+	unsigned long (*_prefetch_abort)(unsigned long lr);
+	/*
+	 * Set up any processor specifics
+	 */
+	void (*_proc_init)(void);
+	/*
+	 * Disable any processor specifics
+	 */
+	void (*_proc_fin)(void);
+	/*
+	 * Special stuff for a reset
+	 */
+	void (*reset)(unsigned long addr) __attribute__((noreturn));
+	/*
+	 * Idle the processor
+	 */
+	int (*_do_idle)(void);
+	/*
+	 * Processor architecture specific
+	 */
+	/*
+	 * clean a virtual address range from the
+	 * D-cache without flushing the cache.
+	 */
+	void (*dcache_clean_area)(void *addr, int size);
+
+	/*
+	 * Set the page table
+	 */
+	void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
+	/*
+	 * Set a possibly extended PTE.  Non-extended PTEs should
+	 * ignore 'ext'.
+	 */
+	void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
+} processor;
+
+#define cpu_proc_init()			processor._proc_init()
+#define cpu_proc_fin()			processor._proc_fin()
+#define cpu_reset(addr)			processor.reset(addr)
+#define cpu_do_idle()			processor._do_idle()
+#define cpu_dcache_clean_area(addr,sz)	processor.dcache_clean_area(addr,sz)
+#define cpu_set_pte_ext(ptep,pte,ext)	processor.set_pte_ext(ptep,pte,ext)
+#define cpu_do_switch_mm(pgd,mm)	processor.switch_mm(pgd,mm)
diff --git a/arch/arm/include/asm/cpu-single.h b/arch/arm/include/asm/cpu-single.h
new file mode 100644
index 0000000..f073a6d
--- /dev/null
+++ b/arch/arm/include/asm/cpu-single.h
@@ -0,0 +1,44 @@
+/*
+ *  arch/arm/include/asm/cpu-single.h
+ *
+ *  Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/*
+ * Single CPU
+ */
+#ifdef __STDC__
+#define __catify_fn(name,x)	name##x
+#else
+#define __catify_fn(name,x)	name/**/x
+#endif
+#define __cpu_fn(name,x)	__catify_fn(name,x)
+
+/*
+ * If we are supporting multiple CPUs, then we must use a table of
+ * function pointers for this lot.  Otherwise, we can optimise the
+ * table away.
+ */
+#define cpu_proc_init			__cpu_fn(CPU_NAME,_proc_init)
+#define cpu_proc_fin			__cpu_fn(CPU_NAME,_proc_fin)
+#define cpu_reset			__cpu_fn(CPU_NAME,_reset)
+#define cpu_do_idle			__cpu_fn(CPU_NAME,_do_idle)
+#define cpu_dcache_clean_area		__cpu_fn(CPU_NAME,_dcache_clean_area)
+#define cpu_do_switch_mm		__cpu_fn(CPU_NAME,_switch_mm)
+#define cpu_set_pte_ext			__cpu_fn(CPU_NAME,_set_pte_ext)
+
+#include <asm/page.h>
+
+struct mm_struct;
+
+/* declare all the functions as extern */
+extern void cpu_proc_init(void);
+extern void cpu_proc_fin(void);
+extern int cpu_do_idle(void);
+extern void cpu_dcache_clean_area(void *, int);
+extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
+extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h
new file mode 100644
index 0000000..634b2d7
--- /dev/null
+++ b/arch/arm/include/asm/cpu.h
@@ -0,0 +1,25 @@
+/*
+ *  arch/arm/include/asm/cpu.h
+ *
+ *  Copyright (C) 2004-2005 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARM_CPU_H
+#define __ASM_ARM_CPU_H
+
+#include <linux/percpu.h>
+
+struct cpuinfo_arm {
+	struct cpu	cpu;
+#ifdef CONFIG_SMP
+	struct task_struct *idle;
+	unsigned int	loops_per_jiffy;
+#endif
+};
+
+DECLARE_PER_CPU(struct cpuinfo_arm, cpu_data);
+
+#endif
diff --git a/arch/arm/include/asm/cputime.h b/arch/arm/include/asm/cputime.h
new file mode 100644
index 0000000..3a8002a
--- /dev/null
+++ b/arch/arm/include/asm/cputime.h
@@ -0,0 +1,6 @@
+#ifndef __ARM_CPUTIME_H
+#define __ARM_CPUTIME_H
+
+#include <asm-generic/cputime.h>
+
+#endif /* __ARM_CPUTIME_H */
diff --git a/arch/arm/include/asm/current.h b/arch/arm/include/asm/current.h
new file mode 100644
index 0000000..75d21e2
--- /dev/null
+++ b/arch/arm/include/asm/current.h
@@ -0,0 +1,15 @@
+#ifndef _ASMARM_CURRENT_H
+#define _ASMARM_CURRENT_H
+
+#include <linux/thread_info.h>
+
+static inline struct task_struct *get_current(void) __attribute_const__;
+
+static inline struct task_struct *get_current(void)
+{
+	return current_thread_info()->task;
+}
+
+#define current (get_current())
+
+#endif /* _ASMARM_CURRENT_H */
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
new file mode 100644
index 0000000..b2deda1
--- /dev/null
+++ b/arch/arm/include/asm/delay.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 1995-2004 Russell King
+ *
+ * Delay routines, using a pre-computed "loops_per_second" value.
+ */
+#ifndef __ASM_ARM_DELAY_H
+#define __ASM_ARM_DELAY_H
+
+#include <asm/param.h>	/* HZ */
+
+extern void __delay(int loops);
+
+/*
+ * This function intentionally does not exist; if you see references to
+ * it, it means that you're calling udelay() with an out of range value.
+ *
+ * With currently imposed limits, this means that we support a max delay
+ * of 2000us. Further limits: HZ<=1000 and bogomips<=3355
+ */
+extern void __bad_udelay(void);
+
+/*
+ * division by multiplication: you don't have to worry about
+ * loss of precision.
+ *
+ * Use only for very small delays ( < 1 msec).  Should probably use a
+ * lookup table, really, as the multiplications take much too long with
+ * short delays.  This is a "reasonable" implementation, though (and the
+ * first constant multiplications gets optimized away if the delay is
+ * a constant)
+ */
+extern void __udelay(unsigned long usecs);
+extern void __const_udelay(unsigned long);
+
+#define MAX_UDELAY_MS 2
+
+#define udelay(n)							\
+	(__builtin_constant_p(n) ?					\
+	  ((n) > (MAX_UDELAY_MS * 1000) ? __bad_udelay() :		\
+			__const_udelay((n) * ((2199023U*HZ)>>11))) :	\
+	  __udelay(n))
+
+#endif /* defined(_ARM_DELAY_H) */
+
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
new file mode 100644
index 0000000..c61642b
--- /dev/null
+++ b/arch/arm/include/asm/device.h
@@ -0,0 +1,15 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#ifndef ASMARM_DEVICE_H
+#define ASMARM_DEVICE_H
+
+struct dev_archdata {
+#ifdef CONFIG_DMABOUNCE
+	struct dmabounce_device_info *dmabounce;
+#endif
+};
+
+#endif
diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h
new file mode 100644
index 0000000..5001390
--- /dev/null
+++ b/arch/arm/include/asm/div64.h
@@ -0,0 +1,227 @@
+#ifndef __ASM_ARM_DIV64
+#define __ASM_ARM_DIV64
+
+#include <asm/system.h>
+#include <linux/types.h>
+
+/*
+ * The semantics of do_div() are:
+ *
+ * uint32_t do_div(uint64_t *n, uint32_t base)
+ * {
+ * 	uint32_t remainder = *n % base;
+ * 	*n = *n / base;
+ * 	return remainder;
+ * }
+ *
+ * In other words, a 64-bit dividend with a 32-bit divisor producing
+ * a 64-bit result and a 32-bit remainder.  To accomplish this optimally
+ * we call a special __do_div64 helper with completely non standard
+ * calling convention for arguments and results (beware).
+ */
+
+#ifdef __ARMEB__
+#define __xh "r0"
+#define __xl "r1"
+#else
+#define __xl "r0"
+#define __xh "r1"
+#endif
+
+#define __do_div_asm(n, base)					\
+({								\
+	register unsigned int __base      asm("r4") = base;	\
+	register unsigned long long __n   asm("r0") = n;	\
+	register unsigned long long __res asm("r2");		\
+	register unsigned int __rem       asm(__xh);		\
+	asm(	__asmeq("%0", __xh)				\
+		__asmeq("%1", "r2")				\
+		__asmeq("%2", "r0")				\
+		__asmeq("%3", "r4")				\
+		"bl	__do_div64"				\
+		: "=r" (__rem), "=r" (__res)			\
+		: "r" (__n), "r" (__base)			\
+		: "ip", "lr", "cc");				\
+	n = __res;						\
+	__rem;							\
+})
+
+#if __GNUC__ < 4
+
+/*
+ * gcc versions earlier than 4.0 are simply too problematic for the
+ * optimized implementation below. First there is gcc PR 15089 that
+ * tend to trig on more complex constructs, spurious .global __udivsi3
+ * are inserted even if none of those symbols are referenced in the
+ * generated code, and those gcc versions are not able to do constant
+ * propagation on long long values anyway.
+ */
+#define do_div(n, base) __do_div_asm(n, base)
+
+#elif __GNUC__ >= 4
+
+#include <asm/bug.h>
+
+/*
+ * If the divisor happens to be constant, we determine the appropriate
+ * inverse at compile time to turn the division into a few inline
+ * multiplications instead which is much faster. And yet only if compiling
+ * for ARMv4 or higher (we need umull/umlal) and if the gcc version is
+ * sufficiently recent to perform proper long long constant propagation.
+ * (It is unfortunate that gcc doesn't perform all this internally.)
+ */
+#define do_div(n, base)							\
+({									\
+	unsigned int __r, __b = (base);					\
+	if (!__builtin_constant_p(__b) || __b == 0 ||			\
+	    (__LINUX_ARM_ARCH__ < 4 && (__b & (__b - 1)) != 0)) {	\
+		/* non-constant divisor (or zero): slow path */		\
+		__r = __do_div_asm(n, __b);				\
+	} else if ((__b & (__b - 1)) == 0) {				\
+		/* Trivial: __b is constant and a power of 2 */		\
+		/* gcc does the right thing with this code.  */		\
+		__r = n;						\
+		__r &= (__b - 1);					\
+		n /= __b;						\
+	} else {							\
+		/* Multiply by inverse of __b: n/b = n*(p/b)/p       */	\
+		/* We rely on the fact that most of this code gets   */	\
+		/* optimized away at compile time due to constant    */	\
+		/* propagation and only a couple inline assembly     */	\
+		/* instructions should remain. Better avoid any      */	\
+		/* code construct that might prevent that.           */	\
+		unsigned long long __res, __x, __t, __m, __n = n;	\
+		unsigned int __c, __p, __z = 0;				\
+		/* preserve low part of n for reminder computation */	\
+		__r = __n;						\
+		/* determine number of bits to represent __b */		\
+		__p = 1 << __div64_fls(__b);				\
+		/* compute __m = ((__p << 64) + __b - 1) / __b */	\
+		__m = (~0ULL / __b) * __p;				\
+		__m += (((~0ULL % __b + 1) * __p) + __b - 1) / __b;	\
+		/* compute __res = __m*(~0ULL/__b*__b-1)/(__p << 64) */	\
+		__x = ~0ULL / __b * __b - 1;				\
+		__res = (__m & 0xffffffff) * (__x & 0xffffffff);	\
+		__res >>= 32;						\
+		__res += (__m & 0xffffffff) * (__x >> 32);		\
+		__t = __res;						\
+		__res += (__x & 0xffffffff) * (__m >> 32);		\
+		__t = (__res < __t) ? (1ULL << 32) : 0;			\
+		__res = (__res >> 32) + __t;				\
+		__res += (__m >> 32) * (__x >> 32);			\
+		__res /= __p;						\
+		/* Now sanitize and optimize what we've got. */		\
+		if (~0ULL % (__b / (__b & -__b)) == 0) {		\
+			/* those cases can be simplified with: */	\
+			__n /= (__b & -__b);				\
+			__m = ~0ULL / (__b / (__b & -__b));		\
+			__p = 1;					\
+			__c = 1;					\
+		} else if (__res != __x / __b) {			\
+			/* We can't get away without a correction    */	\
+			/* to compensate for bit truncation errors.  */	\
+			/* To avoid it we'd need an additional bit   */	\
+			/* to represent __m which would overflow it. */	\
+			/* Instead we do m=p/b and n/b=(n*m+m)/p.    */	\
+			__c = 1;					\
+			/* Compute __m = (__p << 64) / __b */		\
+			__m = (~0ULL / __b) * __p;			\
+			__m += ((~0ULL % __b + 1) * __p) / __b;		\
+		} else {						\
+			/* Reduce __m/__p, and try to clear bit 31   */	\
+			/* of __m when possible otherwise that'll    */	\
+			/* need extra overflow handling later.       */	\
+			unsigned int __bits = -(__m & -__m);		\
+			__bits |= __m >> 32;				\
+			__bits = (~__bits) << 1;			\
+			/* If __bits == 0 then setting bit 31 is     */	\
+			/* unavoidable.  Simply apply the maximum    */	\
+			/* possible reduction in that case.          */	\
+			/* Otherwise the MSB of __bits indicates the */	\
+			/* best reduction we should apply.           */	\
+			if (!__bits) {					\
+				__p /= (__m & -__m);			\
+				__m /= (__m & -__m);			\
+			} else {					\
+				__p >>= __div64_fls(__bits);		\
+				__m >>= __div64_fls(__bits);		\
+			}						\
+			/* No correction needed. */			\
+			__c = 0;					\
+		}							\
+		/* Now we have a combination of 2 conditions:        */	\
+		/* 1) whether or not we need a correction (__c), and */	\
+		/* 2) whether or not there might be an overflow in   */	\
+		/*    the cross product (__m & ((1<<63) | (1<<31)))  */	\
+		/* Select the best insn combination to perform the   */	\
+		/* actual __m * __n / (__p << 64) operation.         */	\
+		if (!__c) {						\
+			asm (	"umull	%Q0, %R0, %1, %Q2\n\t"		\
+				"mov	%Q0, #0"			\
+				: "=&r" (__res)				\
+				: "r" (__m), "r" (__n)			\
+				: "cc" );				\
+		} else if (!(__m & ((1ULL << 63) | (1ULL << 31)))) {	\
+			__res = __m;					\
+			asm (	"umlal	%Q0, %R0, %Q1, %Q2\n\t"		\
+				"mov	%Q0, #0"			\
+				: "+r" (__res)				\
+				: "r" (__m), "r" (__n)			\
+				: "cc" );				\
+		} else {						\
+			asm (	"umull	%Q0, %R0, %Q1, %Q2\n\t"		\
+				"cmn	%Q0, %Q1\n\t"			\
+				"adcs	%R0, %R0, %R1\n\t"		\
+				"adc	%Q0, %3, #0"			\
+				: "=&r" (__res)				\
+				: "r" (__m), "r" (__n), "r" (__z)	\
+				: "cc" );				\
+		}							\
+		if (!(__m & ((1ULL << 63) | (1ULL << 31)))) {		\
+			asm (	"umlal	%R0, %Q0, %R1, %Q2\n\t"		\
+				"umlal	%R0, %Q0, %Q1, %R2\n\t"		\
+				"mov	%R0, #0\n\t"			\
+				"umlal	%Q0, %R0, %R1, %R2"		\
+				: "+r" (__res)				\
+				: "r" (__m), "r" (__n)			\
+				: "cc" );				\
+		} else {						\
+			asm (	"umlal	%R0, %Q0, %R2, %Q3\n\t"		\
+				"umlal	%R0, %1, %Q2, %R3\n\t"		\
+				"mov	%R0, #0\n\t"			\
+				"adds	%Q0, %1, %Q0\n\t"		\
+				"adc	%R0, %R0, #0\n\t"		\
+				"umlal	%Q0, %R0, %R2, %R3"		\
+				: "+r" (__res), "+r" (__z)		\
+				: "r" (__m), "r" (__n)			\
+				: "cc" );				\
+		}							\
+		__res /= __p;						\
+		/* The reminder can be computed with 32-bit regs     */	\
+		/* only, and gcc is good at that.                    */	\
+		{							\
+			unsigned int __res0 = __res;			\
+			unsigned int __b0 = __b;			\
+			__r -= __res0 * __b0;				\
+		}							\
+		/* BUG_ON(__r >= __b || __res * __b + __r != n); */	\
+		n = __res;						\
+	}								\
+	__r;								\
+})
+
+/* our own fls implementation to make sure constant propagation is fine */
+#define __div64_fls(bits)						\
+({									\
+	unsigned int __left = (bits), __nr = 0;				\
+	if (__left & 0xffff0000) __nr += 16, __left >>= 16;		\
+	if (__left & 0x0000ff00) __nr +=  8, __left >>=  8;		\
+	if (__left & 0x000000f0) __nr +=  4, __left >>=  4;		\
+	if (__left & 0x0000000c) __nr +=  2, __left >>=  2;		\
+	if (__left & 0x00000002) __nr +=  1;				\
+	__nr;								\
+})
+
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
new file mode 100644
index 0000000..f41335b
--- /dev/null
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -0,0 +1,456 @@
+#ifndef ASMARM_DMA_MAPPING_H
+#define ASMARM_DMA_MAPPING_H
+
+#ifdef __KERNEL__
+
+#include <linux/mm.h> /* need struct page */
+
+#include <linux/scatterlist.h>
+
+/*
+ * DMA-consistent mapping functions.  These allocate/free a region of
+ * uncached, unwrite-buffered mapped memory space for use with DMA
+ * devices.  This is the "generic" version.  The PCI specific version
+ * is in pci.h
+ *
+ * Note: Drivers should NOT use this function directly, as it will break
+ * platforms with CONFIG_DMABOUNCE.
+ * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+ */
+extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
+
+/*
+ * Return whether the given device DMA address mask can be supported
+ * properly.  For example, if your device can only drive the low 24-bits
+ * during bus mastering, then you would pass 0x00ffffff as the mask
+ * to this function.
+ *
+ * FIXME: This should really be a platform specific issue - we should
+ * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
+ */
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+	return dev->dma_mask && *dev->dma_mask != 0;
+}
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+		return -EIO;
+
+	*dev->dma_mask = dma_mask;
+
+	return 0;
+}
+
+static inline int dma_get_cache_alignment(void)
+{
+	return 32;
+}
+
+static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
+{
+	return !!arch_is_coherent();
+}
+
+/*
+ * DMA errors are defined by all-bits-set in the DMA address.
+ */
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+	return dma_addr == ~0;
+}
+
+/*
+ * Dummy noncoherent implementation.  We don't provide a dma_cache_sync
+ * function so drivers using this API are highlighted with build warnings.
+ */
+static inline void *
+dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
+{
+	return NULL;
+}
+
+static inline void
+dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
+		     dma_addr_t handle)
+{
+}
+
+/**
+ * dma_alloc_coherent - allocate consistent memory for DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @size: required memory size
+ * @handle: bus-specific DMA address
+ *
+ * Allocate some uncached, unbuffered memory for a device for
+ * performing DMA.  This function allocates pages, and will
+ * return the CPU-viewed address, and sets @handle to be the
+ * device-viewed address.
+ */
+extern void *
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
+
+/**
+ * dma_free_coherent - free memory allocated by dma_alloc_coherent
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @size: size of memory originally requested in dma_alloc_coherent
+ * @cpu_addr: CPU-view address returned from dma_alloc_coherent
+ * @handle: device-view address returned from dma_alloc_coherent
+ *
+ * Free (and unmap) a DMA buffer previously allocated by
+ * dma_alloc_coherent().
+ *
+ * References to memory and mappings associated with cpu_addr/handle
+ * during and after this call executing are illegal.
+ */
+extern void
+dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+		  dma_addr_t handle);
+
+/**
+ * dma_mmap_coherent - map a coherent DMA allocation into user space
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @vma: vm_area_struct describing requested user mapping
+ * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
+ * @handle: device-view address returned from dma_alloc_coherent
+ * @size: size of memory originally requested in dma_alloc_coherent
+ *
+ * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
+ * into user space.  The coherent DMA buffer must not be freed by the
+ * driver until the user space mapping has been released.
+ */
+int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+		      void *cpu_addr, dma_addr_t handle, size_t size);
+
+
+/**
+ * dma_alloc_writecombine - allocate writecombining memory for DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @size: required memory size
+ * @handle: bus-specific DMA address
+ *
+ * Allocate some uncached, buffered memory for a device for
+ * performing DMA.  This function allocates pages, and will
+ * return the CPU-viewed address, and sets @handle to be the
+ * device-viewed address.
+ */
+extern void *
+dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
+
+#define dma_free_writecombine(dev,size,cpu_addr,handle) \
+	dma_free_coherent(dev,size,cpu_addr,handle)
+
+int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+			  void *cpu_addr, dma_addr_t handle, size_t size);
+
+
+/**
+ * dma_map_single - map a single buffer for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @cpu_addr: CPU direct mapped address of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ * The device owns this memory once this call has completed.  The CPU
+ * can regain ownership by calling dma_unmap_single() or
+ * dma_sync_single_for_cpu().
+ */
+#ifndef CONFIG_DMABOUNCE
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+	       enum dma_data_direction dir)
+{
+	if (!arch_is_coherent())
+		dma_cache_maint(cpu_addr, size, dir);
+
+	return virt_to_dma(dev, (unsigned long)cpu_addr);
+}
+#else
+extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
+#endif
+
+/**
+ * dma_map_page - map a portion of a page for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ * The device owns this memory once this call has completed.  The CPU
+ * can regain ownership by calling dma_unmap_page() or
+ * dma_sync_single_for_cpu().
+ */
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page,
+	     unsigned long offset, size_t size,
+	     enum dma_data_direction dir)
+{
+	return dma_map_single(dev, page_address(page) + offset, size, (int)dir);
+}
+
+/**
+ * dma_unmap_single - unmap a single buffer previously mapped
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Unmap a single streaming mode DMA translation.  The handle and size
+ * must match what was provided in the previous dma_map_single() call.
+ * All other usages are undefined.
+ *
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+#ifndef CONFIG_DMABOUNCE
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
+		 enum dma_data_direction dir)
+{
+	/* nothing to do */
+}
+#else
+extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction);
+#endif
+
+/**
+ * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Unmap a single streaming mode DMA translation.  The handle and size
+ * must match what was provided in the previous dma_map_single() call.
+ * All other usages are undefined.
+ *
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
+	       enum dma_data_direction dir)
+{
+	dma_unmap_single(dev, handle, size, (int)dir);
+}
+
+/**
+ * dma_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming
+ * mode for DMA.  This is the scatter-gather version of the
+ * above dma_map_single interface.  Here the scatter gather list
+ * elements are each tagged with the appropriate dma address
+ * and length.  They are obtained via sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ *       DMA address/length pairs than there are SG table elements.
+ *       (for example via virtual mapping capabilities)
+ *       The routine returns the number of addr/length pairs actually
+ *       used, at most nents.
+ *
+ * Device ownership issues as mentioned above for dma_map_single are
+ * the same here.
+ */
+#ifndef CONFIG_DMABOUNCE
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+	   enum dma_data_direction dir)
+{
+	int i;
+
+	for (i = 0; i < nents; i++, sg++) {
+		char *virt;
+
+		sg->dma_address = page_to_dma(dev, sg_page(sg)) + sg->offset;
+		virt = sg_virt(sg);
+
+		if (!arch_is_coherent())
+			dma_cache_maint(virt, sg->length, dir);
+	}
+
+	return nents;
+}
+#else
+extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
+#endif
+
+/**
+ * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Unmap a set of streaming mode DMA translations.
+ * Again, CPU read rules concerning calls here are the same as for
+ * dma_unmap_single() above.
+ */
+#ifndef CONFIG_DMABOUNCE
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+	     enum dma_data_direction dir)
+{
+
+	/* nothing to do */
+}
+#else
+extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
+#endif
+
+
+/**
+ * dma_sync_single_for_cpu
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Make physical memory consistent for a single streaming mode DMA
+ * translation after a transfer.
+ *
+ * If you perform a dma_map_single() but wish to interrogate the
+ * buffer using the cpu, yet do not wish to teardown the PCI dma
+ * mapping, you must call this function before doing so.  At the
+ * next point you give the PCI dma address back to the card, you
+ * must first the perform a dma_sync_for_device, and then the
+ * device again owns the buffer.
+ */
+#ifndef CONFIG_DMABOUNCE
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
+			enum dma_data_direction dir)
+{
+	if (!arch_is_coherent())
+		dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
+			   enum dma_data_direction dir)
+{
+	if (!arch_is_coherent())
+		dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir);
+}
+#else
+extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
+extern void dma_sync_single_for_device(struct device*, dma_addr_t, size_t, enum dma_data_direction);
+#endif
+
+
+/**
+ * dma_sync_sg_for_cpu
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Make physical memory consistent for a set of streaming
+ * mode DMA translations after a transfer.
+ *
+ * The same as dma_sync_single_for_* but for a scatter-gather list,
+ * same rules and usage.
+ */
+#ifndef CONFIG_DMABOUNCE
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
+		    enum dma_data_direction dir)
+{
+	int i;
+
+	for (i = 0; i < nents; i++, sg++) {
+		char *virt = sg_virt(sg);
+		if (!arch_is_coherent())
+			dma_cache_maint(virt, sg->length, dir);
+	}
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
+		       enum dma_data_direction dir)
+{
+	int i;
+
+	for (i = 0; i < nents; i++, sg++) {
+		char *virt = sg_virt(sg);
+		if (!arch_is_coherent())
+			dma_cache_maint(virt, sg->length, dir);
+	}
+}
+#else
+extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction);
+extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction);
+#endif
+
+#ifdef CONFIG_DMABOUNCE
+/*
+ * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
+ * and utilize bounce buffers as needed to work around limited DMA windows.
+ *
+ * On the SA-1111, a bug limits DMA to only certain regions of RAM.
+ * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
+ * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
+ *
+ * The following are helper functions used by the dmabounce subystem
+ *
+ */
+
+/**
+ * dmabounce_register_dev
+ *
+ * @dev: valid struct device pointer
+ * @small_buf_size: size of buffers to use with small buffer pool
+ * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
+ *
+ * This function should be called by low-level platform code to register
+ * a device as requireing DMA buffer bouncing. The function will allocate
+ * appropriate DMA pools for the device.
+ *
+ */
+extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long);
+
+/**
+ * dmabounce_unregister_dev
+ *
+ * @dev: valid struct device pointer
+ *
+ * This function should be called by low-level platform code when device
+ * that was previously registered with dmabounce_register_dev is removed
+ * from the system.
+ *
+ */
+extern void dmabounce_unregister_dev(struct device *);
+
+/**
+ * dma_needs_bounce
+ *
+ * @dev: valid struct device pointer
+ * @dma_handle: dma_handle of unbounced buffer
+ * @size: size of region being mapped
+ *
+ * Platforms that utilize the dmabounce mechanism must implement
+ * this function.
+ *
+ * The dmabounce routines call this function whenever a dma-mapping
+ * is requested to determine whether a given buffer needs to be bounced
+ * or not. The function must return 0 if the buffer is OK for
+ * DMA access and 1 if the buffer needs to be bounced.
+ *
+ */
+extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
+#endif /* CONFIG_DMABOUNCE */
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
new file mode 100644
index 0000000..9f2c530
--- /dev/null
+++ b/arch/arm/include/asm/dma.h
@@ -0,0 +1,143 @@
+#ifndef __ASM_ARM_DMA_H
+#define __ASM_ARM_DMA_H
+
+typedef unsigned int dmach_t;
+
+#include <linux/spinlock.h>
+#include <asm/system.h>
+#include <asm/scatterlist.h>
+#include <asm/arch/dma.h>
+
+/*
+ * This is the maximum virtual address which can be DMA'd from.
+ */
+#ifndef MAX_DMA_ADDRESS
+#define MAX_DMA_ADDRESS	0xffffffff
+#endif
+
+/*
+ * DMA modes
+ */
+typedef unsigned int dmamode_t;
+
+#define DMA_MODE_MASK	3
+
+#define DMA_MODE_READ	 0
+#define DMA_MODE_WRITE	 1
+#define DMA_MODE_CASCADE 2
+#define DMA_AUTOINIT	 4
+
+extern spinlock_t  dma_spin_lock;
+
+static inline unsigned long claim_dma_lock(void)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&dma_spin_lock, flags);
+	return flags;
+}
+
+static inline void release_dma_lock(unsigned long flags)
+{
+	spin_unlock_irqrestore(&dma_spin_lock, flags);
+}
+
+/* Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ */
+#define clear_dma_ff(channel)
+
+/* Set only the page register bits of the transfer address.
+ *
+ * NOTE: This is an architecture specific function, and should
+ *       be hidden from the drivers
+ */
+extern void set_dma_page(dmach_t channel, char pagenr);
+
+/* Request a DMA channel
+ *
+ * Some architectures may need to do allocate an interrupt
+ */
+extern int  request_dma(dmach_t channel, const char * device_id);
+
+/* Free a DMA channel
+ *
+ * Some architectures may need to do free an interrupt
+ */
+extern void free_dma(dmach_t channel);
+
+/* Enable DMA for this channel
+ *
+ * On some architectures, this may have other side effects like
+ * enabling an interrupt and setting the DMA registers.
+ */
+extern void enable_dma(dmach_t channel);
+
+/* Disable DMA for this channel
+ *
+ * On some architectures, this may have other side effects like
+ * disabling an interrupt or whatever.
+ */
+extern void disable_dma(dmach_t channel);
+
+/* Test whether the specified channel has an active DMA transfer
+ */
+extern int dma_channel_active(dmach_t channel);
+
+/* Set the DMA scatter gather list for this channel
+ *
+ * This should not be called if a DMA channel is enabled,
+ * especially since some DMA architectures don't update the
+ * DMA address immediately, but defer it to the enable_dma().
+ */
+extern void set_dma_sg(dmach_t channel, struct scatterlist *sg, int nr_sg);
+
+/* Set the DMA address for this channel
+ *
+ * This should not be called if a DMA channel is enabled,
+ * especially since some DMA architectures don't update the
+ * DMA address immediately, but defer it to the enable_dma().
+ */
+extern void __set_dma_addr(dmach_t channel, void *addr);
+#define set_dma_addr(channel, addr)				\
+	__set_dma_addr(channel, bus_to_virt(addr))
+
+/* Set the DMA byte count for this channel
+ *
+ * This should not be called if a DMA channel is enabled,
+ * especially since some DMA architectures don't update the
+ * DMA count immediately, but defer it to the enable_dma().
+ */
+extern void set_dma_count(dmach_t channel, unsigned long count);
+
+/* Set the transfer direction for this channel
+ *
+ * This should not be called if a DMA channel is enabled,
+ * especially since some DMA architectures don't update the
+ * DMA transfer direction immediately, but defer it to the
+ * enable_dma().
+ */
+extern void set_dma_mode(dmach_t channel, dmamode_t mode);
+
+/* Set the transfer speed for this channel
+ */
+extern void set_dma_speed(dmach_t channel, int cycle_ns);
+
+/* Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * If called before the channel has been used, it may return 1.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ */
+extern int  get_dma_residue(dmach_t channel);
+
+#ifndef NO_DMA
+#define NO_DMA	255
+#endif
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy    (0)
+#endif
+
+#endif /* _ARM_DMA_H */
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
new file mode 100644
index 0000000..cc7ef40
--- /dev/null
+++ b/arch/arm/include/asm/domain.h
@@ -0,0 +1,78 @@
+/*
+ *  arch/arm/include/asm/domain.h
+ *
+ *  Copyright (C) 1999 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_PROC_DOMAIN_H
+#define __ASM_PROC_DOMAIN_H
+
+/*
+ * Domain numbers
+ *
+ *  DOMAIN_IO     - domain 2 includes all IO only
+ *  DOMAIN_USER   - domain 1 includes all user memory only
+ *  DOMAIN_KERNEL - domain 0 includes all kernel memory only
+ *
+ * The domain numbering depends on whether we support 36 physical
+ * address for I/O or not.  Addresses above the 32 bit boundary can
+ * only be mapped using supersections and supersections can only
+ * be set for domain 0.  We could just default to DOMAIN_IO as zero,
+ * but there may be systems with supersection support and no 36-bit
+ * addressing.  In such cases, we want to map system memory with
+ * supersections to reduce TLB misses and footprint.
+ *
+ * 36-bit addressing and supersections are only available on
+ * CPUs based on ARMv6+ or the Intel XSC3 core.
+ */
+#ifndef CONFIG_IO_36
+#define DOMAIN_KERNEL	0
+#define DOMAIN_TABLE	0
+#define DOMAIN_USER	1
+#define DOMAIN_IO	2
+#else
+#define DOMAIN_KERNEL	2
+#define DOMAIN_TABLE	2
+#define DOMAIN_USER	1
+#define DOMAIN_IO	0
+#endif
+
+/*
+ * Domain types
+ */
+#define DOMAIN_NOACCESS	0
+#define DOMAIN_CLIENT	1
+#define DOMAIN_MANAGER	3
+
+#define domain_val(dom,type)	((type) << (2*(dom)))
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_MMU
+#define set_domain(x)					\
+	do {						\
+	__asm__ __volatile__(				\
+	"mcr	p15, 0, %0, c3, c0	@ set domain"	\
+	  : : "r" (x));					\
+	isb();						\
+	} while (0)
+
+#define modify_domain(dom,type)					\
+	do {							\
+	struct thread_info *thread = current_thread_info();	\
+	unsigned int domain = thread->cpu_domain;		\
+	domain &= ~domain_val(dom, DOMAIN_MANAGER);		\
+	thread->cpu_domain = domain | domain_val(dom, type);	\
+	set_domain(thread->cpu_domain);				\
+	} while (0)
+
+#else
+#define set_domain(x)		do { } while (0)
+#define modify_domain(dom,type)	do { } while (0)
+#endif
+
+#endif
+#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/ecard.h b/arch/arm/include/asm/ecard.h
new file mode 100644
index 0000000..29f2610
--- /dev/null
+++ b/arch/arm/include/asm/ecard.h
@@ -0,0 +1,219 @@
+/*
+ * arch/arm/include/asm/ecard.h
+ *
+ * definitions for expansion cards
+ *
+ * This is a new system as from Linux 1.2.3
+ *
+ * Changelog:
+ *  11-12-1996	RMK	Further minor improvements
+ *  12-09-1997	RMK	Added interrupt enable/disable for card level
+ *
+ * Reference: Acorns Risc OS 3 Programmers Reference Manuals.
+ */
+
+#ifndef __ASM_ECARD_H
+#define __ASM_ECARD_H
+
+/*
+ * Currently understood cards (but not necessarily
+ * supported):
+ *                        Manufacturer  Product ID
+ */
+#define MANU_ACORN		0x0000
+#define PROD_ACORN_SCSI			0x0002
+#define PROD_ACORN_ETHER1		0x0003
+#define PROD_ACORN_MFM			0x000b
+
+#define MANU_ANT2		0x0011
+#define PROD_ANT_ETHER3			0x00a4
+
+#define MANU_ATOMWIDE		0x0017
+#define PROD_ATOMWIDE_3PSERIAL		0x0090
+
+#define MANU_IRLAM_INSTRUMENTS	0x001f
+#define MANU_IRLAM_INSTRUMENTS_ETHERN	0x5678
+
+#define MANU_OAK		0x0021
+#define PROD_OAK_SCSI			0x0058
+
+#define MANU_MORLEY		0x002b
+#define PROD_MORLEY_SCSI_UNCACHED	0x0067
+
+#define MANU_CUMANA		0x003a
+#define PROD_CUMANA_SCSI_2		0x003a
+#define PROD_CUMANA_SCSI_1		0x00a0
+
+#define MANU_ICS		0x003c
+#define PROD_ICS_IDE			0x00ae
+
+#define MANU_ICS2		0x003d
+#define PROD_ICS2_IDE			0x00ae
+
+#define MANU_SERPORT		0x003f
+#define PROD_SERPORT_DSPORT		0x00b9
+
+#define MANU_ARXE		0x0041
+#define PROD_ARXE_SCSI			0x00be
+
+#define MANU_I3			0x0046
+#define PROD_I3_ETHERLAN500		0x00d4
+#define PROD_I3_ETHERLAN600		0x00ec
+#define PROD_I3_ETHERLAN600A		0x011e
+
+#define MANU_ANT		0x0053
+#define PROD_ANT_ETHERM			0x00d8
+#define PROD_ANT_ETHERB			0x00e4
+
+#define MANU_ALSYSTEMS		0x005b
+#define PROD_ALSYS_SCSIATAPI		0x0107
+
+#define MANU_MCS		0x0063
+#define PROD_MCS_CONNECT32		0x0125
+
+#define MANU_EESOX		0x0064
+#define PROD_EESOX_SCSI2		0x008c
+
+#define MANU_YELLOWSTONE	0x0096
+#define PROD_YELLOWSTONE_RAPIDE32	0x0120
+
+#ifdef ECARD_C
+#define CONST
+#else
+#define CONST const
+#endif
+
+#define MAX_ECARDS	9
+
+struct ecard_id {			/* Card ID structure		*/
+	unsigned short	manufacturer;
+	unsigned short	product;
+	void		*data;
+};
+
+struct in_ecid {			/* Packed card ID information	*/
+	unsigned short	product;	/* Product code			*/
+	unsigned short	manufacturer;	/* Manufacturer code		*/
+	unsigned char	id:4;		/* Simple ID			*/
+	unsigned char	cd:1;		/* Chunk dir present		*/
+	unsigned char	is:1;		/* Interrupt status pointers	*/
+	unsigned char	w:2;		/* Width			*/
+	unsigned char	country;	/* Country			*/
+	unsigned char	irqmask;	/* IRQ mask			*/
+	unsigned char	fiqmask;	/* FIQ mask			*/
+	unsigned long	irqoff;		/* IRQ offset			*/
+	unsigned long	fiqoff;		/* FIQ offset			*/
+};
+
+typedef struct expansion_card ecard_t;
+typedef unsigned long *loader_t;
+
+typedef struct expansion_card_ops {	/* Card handler routines	*/
+	void (*irqenable)(ecard_t *ec, int irqnr);
+	void (*irqdisable)(ecard_t *ec, int irqnr);
+	int  (*irqpending)(ecard_t *ec);
+	void (*fiqenable)(ecard_t *ec, int fiqnr);
+	void (*fiqdisable)(ecard_t *ec, int fiqnr);
+	int  (*fiqpending)(ecard_t *ec);
+} expansioncard_ops_t;
+
+#define ECARD_NUM_RESOURCES	(6)
+
+#define ECARD_RES_IOCSLOW	(0)
+#define ECARD_RES_IOCMEDIUM	(1)
+#define ECARD_RES_IOCFAST	(2)
+#define ECARD_RES_IOCSYNC	(3)
+#define ECARD_RES_MEMC		(4)
+#define ECARD_RES_EASI		(5)
+
+#define ecard_resource_start(ec,nr)	((ec)->resource[nr].start)
+#define ecard_resource_end(ec,nr)	((ec)->resource[nr].end)
+#define ecard_resource_len(ec,nr)	((ec)->resource[nr].end - \
+					 (ec)->resource[nr].start + 1)
+#define ecard_resource_flags(ec,nr)	((ec)->resource[nr].flags)
+
+/*
+ * This contains all the info needed on an expansion card
+ */
+struct expansion_card {
+	struct expansion_card  *next;
+
+	struct device		dev;
+	struct resource		resource[ECARD_NUM_RESOURCES];
+
+	/* Public data */
+	void __iomem		*irqaddr;	/* address of IRQ register	*/
+	void __iomem		*fiqaddr;	/* address of FIQ register	*/
+	unsigned char		irqmask;	/* IRQ mask			*/
+	unsigned char		fiqmask;	/* FIQ mask			*/
+	unsigned char  		claimed;	/* Card claimed?		*/
+	unsigned char		easi;		/* EASI card			*/
+
+	void			*irq_data;	/* Data for use for IRQ by card	*/
+	void			*fiq_data;	/* Data for use for FIQ by card	*/
+	const expansioncard_ops_t *ops;		/* Enable/Disable Ops for card	*/
+
+	CONST unsigned int	slot_no;	/* Slot number			*/
+	CONST unsigned int	dma;		/* DMA number (for request_dma)	*/
+	CONST unsigned int	irq;		/* IRQ number (for request_irq)	*/
+	CONST unsigned int	fiq;		/* FIQ number (for request_irq)	*/
+	CONST struct in_ecid	cid;		/* Card Identification		*/
+
+	/* Private internal data */
+	const char		*card_desc;	/* Card description		*/
+	CONST unsigned int	podaddr;	/* Base Linux address for card	*/
+	CONST loader_t		loader;		/* loader program */
+	u64			dma_mask;
+};
+
+void ecard_setirq(struct expansion_card *ec, const struct expansion_card_ops *ops, void *irq_data);
+
+struct in_chunk_dir {
+	unsigned int start_offset;
+	union {
+		unsigned char string[256];
+		unsigned char data[1];
+	} d;
+};
+
+/*
+ * Read a chunk from an expansion card
+ * cd : where to put read data
+ * ec : expansion card info struct
+ * id : id number to find
+ * num: (n+1)'th id to find.
+ */
+extern int ecard_readchunk (struct in_chunk_dir *cd, struct expansion_card *ec, int id, int num);
+
+/*
+ * Request and release ecard resources
+ */
+extern int ecard_request_resources(struct expansion_card *ec);
+extern void ecard_release_resources(struct expansion_card *ec);
+
+void __iomem *ecardm_iomap(struct expansion_card *ec, unsigned int res,
+			   unsigned long offset, unsigned long maxsize);
+#define ecardm_iounmap(__ec, __addr)	devm_iounmap(&(__ec)->dev, __addr)
+
+extern struct bus_type ecard_bus_type;
+
+#define ECARD_DEV(_d)	container_of((_d), struct expansion_card, dev)
+
+struct ecard_driver {
+	int			(*probe)(struct expansion_card *, const struct ecard_id *id);
+	void			(*remove)(struct expansion_card *);
+	void			(*shutdown)(struct expansion_card *);
+	const struct ecard_id	*id_table;
+	unsigned int		id;
+	struct device_driver	drv;
+};
+
+#define ECARD_DRV(_d)	container_of((_d), struct ecard_driver, drv)
+
+#define ecard_set_drvdata(ec,data)	dev_set_drvdata(&(ec)->dev, (data))
+#define ecard_get_drvdata(ec)		dev_get_drvdata(&(ec)->dev)
+
+int ecard_register_driver(struct ecard_driver *);
+void ecard_remove_driver(struct ecard_driver *);
+
+#endif
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
new file mode 100644
index 0000000..4ca7516
--- /dev/null
+++ b/arch/arm/include/asm/elf.h
@@ -0,0 +1,116 @@
+#ifndef __ASMARM_ELF_H
+#define __ASMARM_ELF_H
+
+#include <asm/hwcap.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * ELF register definitions..
+ */
+#include <asm/ptrace.h>
+#include <asm/user.h>
+
+typedef unsigned long elf_greg_t;
+typedef unsigned long elf_freg_t[3];
+
+#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct user_fp elf_fpregset_t;
+#endif
+
+#define EM_ARM	40
+#define EF_ARM_APCS26 0x08
+#define EF_ARM_SOFT_FLOAT 0x200
+#define EF_ARM_EABI_MASK 0xFF000000
+
+#define R_ARM_NONE	0
+#define R_ARM_PC24	1
+#define R_ARM_ABS32	2
+#define R_ARM_CALL	28
+#define R_ARM_JUMP24	29
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS	ELFCLASS32
+#ifdef __ARMEB__
+#define ELF_DATA	ELFDATA2MSB
+#else
+#define ELF_DATA	ELFDATA2LSB
+#endif
+#define ELF_ARCH	EM_ARM
+
+#ifndef __ASSEMBLY__
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization.  This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ *
+ * For now we just provide a fairly general string that describes the
+ * processor family.  This could be made more specific later if someone
+ * implemented optimisations that require it.  26-bit CPUs give you
+ * "v1l" for ARM2 (no SWP) and "v2l" for anything else (ARM1 isn't
+ * supported).  32-bit CPUs give you "v3[lb]" for anything based on an
+ * ARM6 or ARM7 core and "armv4[lb]" for anything based on a StrongARM-1
+ * core.
+ */
+#define ELF_PLATFORM_SIZE 8
+#define ELF_PLATFORM	(elf_platform)
+
+extern char elf_platform[];
+#endif
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == EM_ARM && ELF_PROC_OK(x))
+
+/*
+ * 32-bit code is always OK.  Some cpus can do 26-bit, some can't.
+ */
+#define ELF_PROC_OK(x)	(ELF_THUMB_OK(x) && ELF_26BIT_OK(x))
+
+#define ELF_THUMB_OK(x) \
+	((elf_hwcap & HWCAP_THUMB && ((x)->e_entry & 1) == 1) || \
+	 ((x)->e_entry & 3) == 0)
+
+#define ELF_26BIT_OK(x) \
+	((elf_hwcap & HWCAP_26BIT && (x)->e_flags & EF_ARM_APCS26) || \
+	  ((x)->e_flags & EF_ARM_APCS26) == 0)
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE	4096
+
+/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+   use of this is to invoke "./ld.so someprog" to test out a new version of
+   the loader.  We need to make sure that it is out of the way of the program
+   that it will "exec", and that there is sufficient room for the brk.  */
+
+#define ELF_ET_DYN_BASE	(2 * TASK_SIZE / 3)
+
+/* When the program starts, a1 contains a pointer to a function to be 
+   registered with atexit, as per the SVR4 ABI.  A value of 0 means we 
+   have no such handler.  */
+#define ELF_PLAT_INIT(_r, load_addr)	(_r)->ARM_r0 = 0
+
+/*
+ * Since the FPA coprocessor uses CP1 and CP2, and iWMMXt uses CP0
+ * and CP1, we only enable access to the iWMMXt coprocessor if the
+ * binary is EABI or softfloat (and thus, guaranteed not to use
+ * FPA instructions.)
+ */
+#define SET_PERSONALITY(ex, ibcs2)					\
+	do {								\
+		if ((ex).e_flags & EF_ARM_APCS26) {			\
+			set_personality(PER_LINUX);			\
+		} else {						\
+			set_personality(PER_LINUX_32BIT);		\
+			if (elf_hwcap & HWCAP_IWMMXT && (ex).e_flags & (EF_ARM_EABI_MASK | EF_ARM_SOFT_FLOAT)) \
+				set_thread_flag(TIF_USING_IWMMXT);	\
+			else						\
+				clear_thread_flag(TIF_USING_IWMMXT);	\
+		}							\
+	} while (0)
+
+#endif
diff --git a/arch/arm/include/asm/emergency-restart.h b/arch/arm/include/asm/emergency-restart.h
new file mode 100644
index 0000000..108d8c4
--- /dev/null
+++ b/arch/arm/include/asm/emergency-restart.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_EMERGENCY_RESTART_H
+#define _ASM_EMERGENCY_RESTART_H
+
+#include <asm-generic/emergency-restart.h>
+
+#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/arm/include/asm/errno.h b/arch/arm/include/asm/errno.h
new file mode 100644
index 0000000..6e60f06
--- /dev/null
+++ b/arch/arm/include/asm/errno.h
@@ -0,0 +1,6 @@
+#ifndef _ARM_ERRNO_H
+#define _ARM_ERRNO_H
+
+#include <asm-generic/errno.h>
+
+#endif
diff --git a/arch/arm/include/asm/fb.h b/arch/arm/include/asm/fb.h
new file mode 100644
index 0000000..d92e99c
--- /dev/null
+++ b/arch/arm/include/asm/fb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+				unsigned long off)
+{
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/arch/arm/include/asm/fcntl.h b/arch/arm/include/asm/fcntl.h
new file mode 100644
index 0000000..a80b660
--- /dev/null
+++ b/arch/arm/include/asm/fcntl.h
@@ -0,0 +1,11 @@
+#ifndef _ARM_FCNTL_H
+#define _ARM_FCNTL_H
+
+#define O_DIRECTORY	 040000	/* must be a directory */
+#define O_NOFOLLOW	0100000	/* don't follow links */
+#define O_DIRECT	0200000	/* direct disk access hint - currently ignored */
+#define O_LARGEFILE	0400000
+
+#include <asm-generic/fcntl.h>
+
+#endif
diff --git a/arch/arm/include/asm/fiq.h b/arch/arm/include/asm/fiq.h
new file mode 100644
index 0000000..2242ce2
--- /dev/null
+++ b/arch/arm/include/asm/fiq.h
@@ -0,0 +1,37 @@
+/*
+ *  arch/arm/include/asm/fiq.h
+ *
+ * Support for FIQ on ARM architectures.
+ * Written by Philip Blundell <philb@gnu.org>, 1998
+ * Re-written by Russell King
+ */
+
+#ifndef __ASM_FIQ_H
+#define __ASM_FIQ_H
+
+#include <asm/ptrace.h>
+
+struct fiq_handler {
+	struct fiq_handler *next;
+	/* Name
+	 */
+	const char *name;
+	/* Called to ask driver to relinquish/
+	 * reacquire FIQ
+	 * return zero to accept, or -<errno>
+	 */
+	int (*fiq_op)(void *, int relinquish);
+	/* data for the relinquish/reacquire functions
+	 */
+	void *dev_id;
+};
+
+extern int claim_fiq(struct fiq_handler *f);
+extern void release_fiq(struct fiq_handler *f);
+extern void set_fiq_handler(void *start, unsigned int length);
+extern void set_fiq_regs(struct pt_regs *regs);
+extern void get_fiq_regs(struct pt_regs *regs);
+extern void enable_fiq(int fiq);
+extern void disable_fiq(int fiq);
+
+#endif
diff --git a/arch/arm/include/asm/flat.h b/arch/arm/include/asm/flat.h
new file mode 100644
index 0000000..1d77e51
--- /dev/null
+++ b/arch/arm/include/asm/flat.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/include/asm/flat.h -- uClinux flat-format executables
+ */
+
+#ifndef __ARM_FLAT_H__
+#define __ARM_FLAT_H__
+
+/* An odd number of words will be pushed after this alignment, so
+   deliberately misalign the value.  */
+#define	flat_stack_align(sp)	sp = (void *)(((unsigned long)(sp) - 4) | 4)
+#define	flat_argvp_envp_on_stack()		1
+#define	flat_old_ram_flag(flags)		(flags)
+#define	flat_reloc_valid(reloc, size)		((reloc) <= (size))
+#define	flat_get_addr_from_rp(rp, relval, flags, persistent) get_unaligned(rp)
+#define	flat_put_addr_at_rp(rp, val, relval)	put_unaligned(val,rp)
+#define	flat_get_relocate_addr(rel)		(rel)
+#define	flat_set_persistent(relval, p)		0
+
+#endif /* __ARM_FLAT_H__ */
diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h
new file mode 100644
index 0000000..dce20c2
--- /dev/null
+++ b/arch/arm/include/asm/floppy.h
@@ -0,0 +1,148 @@
+/*
+ *  arch/arm/include/asm/floppy.h
+ *
+ *  Copyright (C) 1996-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Note that we don't touch FLOPPY_DMA nor FLOPPY_IRQ here
+ */
+#ifndef __ASM_ARM_FLOPPY_H
+#define __ASM_ARM_FLOPPY_H
+#if 0
+#include <asm/arch/floppy.h>
+#endif
+
+#define fd_outb(val,port)			\
+	do {					\
+		if ((port) == FD_DOR)		\
+			fd_setdor((val));	\
+		else				\
+			outb((val),(port));	\
+	} while(0)
+
+#define fd_inb(port)		inb((port))
+#define fd_request_irq()	request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\
+					    IRQF_DISABLED,"floppy",NULL)
+#define fd_free_irq()		free_irq(IRQ_FLOPPYDISK,NULL)
+#define fd_disable_irq()	disable_irq(IRQ_FLOPPYDISK)
+#define fd_enable_irq()		enable_irq(IRQ_FLOPPYDISK)
+
+static inline int fd_dma_setup(void *data, unsigned int length,
+			       unsigned int mode, unsigned long addr)
+{
+	set_dma_mode(DMA_FLOPPY, mode);
+	__set_dma_addr(DMA_FLOPPY, data);
+	set_dma_count(DMA_FLOPPY, length);
+	virtual_dma_port = addr;
+	enable_dma(DMA_FLOPPY);
+	return 0;
+}
+#define fd_dma_setup		fd_dma_setup
+
+#define fd_request_dma()	request_dma(DMA_FLOPPY,"floppy")
+#define fd_free_dma()		free_dma(DMA_FLOPPY)
+#define fd_disable_dma()	disable_dma(DMA_FLOPPY)
+
+/* need to clean up dma.h */
+#define DMA_FLOPPYDISK		DMA_FLOPPY
+
+/* Floppy_selects is the list of DOR's to select drive fd
+ *
+ * On initialisation, the floppy list is scanned, and the drives allocated
+ * in the order that they are found.  This is done by seeking the drive
+ * to a non-zero track, and then restoring it to track 0.  If an error occurs,
+ * then there is no floppy drive present.       [to be put back in again]
+ */
+static unsigned char floppy_selects[2][4] =
+{
+	{ 0x10, 0x21, 0x23, 0x33 },
+	{ 0x10, 0x21, 0x23, 0x33 }
+};
+
+#define fd_setdor(dor)								\
+do {										\
+	int new_dor = (dor);							\
+	if (new_dor & 0xf0)							\
+		new_dor = (new_dor & 0x0c) | floppy_selects[fdc][new_dor & 3];	\
+	else									\
+		new_dor &= 0x0c;						\
+	outb(new_dor, FD_DOR);							\
+} while (0)
+
+/*
+ * Someday, we'll automatically detect which drives are present...
+ */
+static inline void fd_scandrives (void)
+{
+#if 0
+	int floppy, drive_count;
+
+	fd_disable_irq();
+	raw_cmd = &default_raw_cmd;
+	raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_SEEK;
+	raw_cmd->track = 0;
+	raw_cmd->rate = ?;
+	drive_count = 0;
+	for (floppy = 0; floppy < 4; floppy ++) {
+		current_drive = drive_count;
+		/*
+		 * Turn on floppy motor
+		 */
+		if (start_motor(redo_fd_request))
+			continue;
+		/*
+		 * Set up FDC
+		 */
+		fdc_specify();
+		/*
+		 * Tell FDC to recalibrate
+		 */
+		output_byte(FD_RECALIBRATE);
+		LAST_OUT(UNIT(floppy));
+		/* wait for command to complete */
+		if (!successful) {
+			int i;
+			for (i = drive_count; i < 3; i--)
+				floppy_selects[fdc][i] = floppy_selects[fdc][i + 1];
+			floppy_selects[fdc][3] = 0;
+			floppy -= 1;
+		} else
+			drive_count++;
+	}
+#else
+	floppy_selects[0][0] = 0x10;
+	floppy_selects[0][1] = 0x21;
+	floppy_selects[0][2] = 0x23;
+	floppy_selects[0][3] = 0x33;
+#endif
+}
+
+#define FDC1 (0x3f0)
+
+#define FLOPPY0_TYPE 4
+#define FLOPPY1_TYPE 4
+
+#define N_FDC 1
+#define N_DRIVE 4
+
+#define CROSS_64KB(a,s) (0)
+
+/*
+ * This allows people to reverse the order of
+ * fd0 and fd1, in case their hardware is
+ * strangely connected (as some RiscPCs
+ * and A5000s seem to be).
+ */
+static void driveswap(int *ints, int dummy, int dummy2)
+{
+	floppy_selects[0][0] ^= floppy_selects[0][1];
+	floppy_selects[0][1] ^= floppy_selects[0][0];
+	floppy_selects[0][0] ^= floppy_selects[0][1];
+}
+
+#define EXTRA_FLOPPY_PARAMS ,{ "driveswap", &driveswap, NULL, 0, 0 }
+	
+#endif
diff --git a/arch/arm/include/asm/fpstate.h b/arch/arm/include/asm/fpstate.h
new file mode 100644
index 0000000..ee5e03e
--- /dev/null
+++ b/arch/arm/include/asm/fpstate.h
@@ -0,0 +1,93 @@
+/*
+ *  arch/arm/include/asm/fpstate.h
+ *
+ *  Copyright (C) 1995 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARM_FPSTATE_H
+#define __ASM_ARM_FPSTATE_H
+
+
+#ifndef __ASSEMBLY__
+
+/*
+ * VFP storage area has:
+ *  - FPEXC, FPSCR, FPINST and FPINST2.
+ *  - 16 or 32 double precision data registers
+ *  - an implementation-dependant word of state for FLDMX/FSTMX (pre-ARMv6)
+ * 
+ *  FPEXC will always be non-zero once the VFP has been used in this process.
+ */
+
+struct vfp_hard_struct {
+#ifdef CONFIG_VFPv3
+	__u64 fpregs[32];
+#else
+	__u64 fpregs[16];
+#endif
+#if __LINUX_ARM_ARCH__ < 6
+	__u32 fpmx_state;
+#endif
+	__u32 fpexc;
+	__u32 fpscr;
+	/*
+	 * VFP implementation specific state
+	 */
+	__u32 fpinst;
+	__u32 fpinst2;
+
+#ifdef CONFIG_SMP
+	__u32 cpu;
+#endif
+};
+
+union vfp_state {
+	struct vfp_hard_struct	hard;
+};
+
+extern void vfp_flush_thread(union vfp_state *);
+extern void vfp_release_thread(union vfp_state *);
+
+#define FP_HARD_SIZE 35
+
+struct fp_hard_struct {
+	unsigned int save[FP_HARD_SIZE];		/* as yet undefined */
+};
+
+#define FP_SOFT_SIZE 35
+
+struct fp_soft_struct {
+	unsigned int save[FP_SOFT_SIZE];		/* undefined information */
+};
+
+#define IWMMXT_SIZE	0x98
+
+struct iwmmxt_struct {
+	unsigned int save[IWMMXT_SIZE / sizeof(unsigned int)];
+};
+
+union fp_state {
+	struct fp_hard_struct	hard;
+	struct fp_soft_struct	soft;
+#ifdef CONFIG_IWMMXT
+	struct iwmmxt_struct	iwmmxt;
+#endif
+};
+
+#define FP_SIZE (sizeof(union fp_state) / sizeof(int))
+
+struct crunch_state {
+	unsigned int	mvdx[16][2];
+	unsigned int	mvax[4][3];
+	unsigned int	dspsc[2];
+};
+
+#define CRUNCH_SIZE	sizeof(struct crunch_state)
+
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
new file mode 100644
index 0000000..584ef9a
--- /dev/null
+++ b/arch/arm/include/asm/ftrace.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_ARM_FTRACE
+#define _ASM_ARM_FTRACE
+
+#ifdef CONFIG_FTRACE
+#define MCOUNT_ADDR		((long)(mcount))
+#define MCOUNT_INSN_SIZE	4 /* sizeof mcount call */
+
+#ifndef __ASSEMBLY__
+extern void mcount(void);
+#endif
+
+#endif
+
+#endif /* _ASM_ARM_FTRACE */
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
new file mode 100644
index 0000000..6a332a9
--- /dev/null
+++ b/arch/arm/include/asm/futex.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#include <asm-generic/futex.h>
+
+#endif
diff --git a/arch/arm/include/asm/glue.h b/arch/arm/include/asm/glue.h
new file mode 100644
index 0000000..a0e39d5
--- /dev/null
+++ b/arch/arm/include/asm/glue.h
@@ -0,0 +1,149 @@
+/*
+ *  arch/arm/include/asm/glue.h
+ *
+ *  Copyright (C) 1997-1999 Russell King
+ *  Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  This file provides the glue to stick the processor-specific bits
+ *  into the kernel in an efficient manner.  The idea is to use branches
+ *  when we're only targetting one class of TLB, or indirect calls
+ *  when we're targetting multiple classes of TLBs.
+ */
+#ifdef __KERNEL__
+
+
+#ifdef __STDC__
+#define ____glue(name,fn)	name##fn
+#else
+#define ____glue(name,fn)	name/**/fn
+#endif
+#define __glue(name,fn)		____glue(name,fn)
+
+
+
+/*
+ *	Data Abort Model
+ *	================
+ *
+ *	We have the following to choose from:
+ *	  arm6          - ARM6 style
+ *	  arm7		- ARM7 style
+ *	  v4_early	- ARMv4 without Thumb early abort handler
+ *	  v4t_late	- ARMv4 with Thumb late abort handler
+ *	  v4t_early	- ARMv4 with Thumb early abort handler
+ *	  v5tej_early	- ARMv5 with Thumb and Java early abort handler
+ *	  xscale	- ARMv5 with Thumb with Xscale extensions
+ *	  v6_early	- ARMv6 generic early abort handler
+ *	  v7_early	- ARMv7 generic early abort handler
+ */
+#undef CPU_DABORT_HANDLER
+#undef MULTI_DABORT
+
+#if defined(CONFIG_CPU_ARM610)
+# ifdef CPU_DABORT_HANDLER
+#  define MULTI_DABORT 1
+# else
+#  define CPU_DABORT_HANDLER cpu_arm6_data_abort
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM710)
+# ifdef CPU_DABORT_HANDLER
+#  define MULTI_DABORT 1
+# else
+#  define CPU_DABORT_HANDLER cpu_arm7_data_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_LV4T
+# ifdef CPU_DABORT_HANDLER
+#  define MULTI_DABORT 1
+# else
+#  define CPU_DABORT_HANDLER v4t_late_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV4
+# ifdef CPU_DABORT_HANDLER
+#  define MULTI_DABORT 1
+# else
+#  define CPU_DABORT_HANDLER v4_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV4T
+# ifdef CPU_DABORT_HANDLER
+#  define MULTI_DABORT 1
+# else
+#  define CPU_DABORT_HANDLER v4t_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV5TJ
+# ifdef CPU_DABORT_HANDLER
+#  define MULTI_DABORT 1
+# else
+#  define CPU_DABORT_HANDLER v5tj_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV5T
+# ifdef CPU_DABORT_HANDLER
+#  define MULTI_DABORT 1
+# else
+#  define CPU_DABORT_HANDLER v5t_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV6
+# ifdef CPU_DABORT_HANDLER
+#  define MULTI_DABORT 1
+# else
+#  define CPU_DABORT_HANDLER v6_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV7
+# ifdef CPU_DABORT_HANDLER
+#  define MULTI_DABORT 1
+# else
+#  define CPU_DABORT_HANDLER v7_early_abort
+# endif
+#endif
+
+#ifndef CPU_DABORT_HANDLER
+#error Unknown data abort handler type
+#endif
+
+/*
+ * Prefetch abort handler.  If the CPU has an IFAR use that, otherwise
+ * use the address of the aborted instruction
+ */
+#undef CPU_PABORT_HANDLER
+#undef MULTI_PABORT
+
+#ifdef CONFIG_CPU_PABRT_IFAR
+# ifdef CPU_PABORT_HANDLER
+#  define MULTI_PABORT 1
+# else
+#  define CPU_PABORT_HANDLER(reg, insn)	mrc p15, 0, reg, cr6, cr0, 2
+# endif
+#endif
+
+#ifdef CONFIG_CPU_PABRT_NOIFAR
+# ifdef CPU_PABORT_HANDLER
+#  define MULTI_PABORT 1
+# else
+#  define CPU_PABORT_HANDLER(reg, insn)	mov reg, insn
+# endif
+#endif
+
+#ifndef CPU_PABORT_HANDLER
+#error Unknown prefetch abort handler type
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/gpio.h b/arch/arm/include/asm/gpio.h
new file mode 100644
index 0000000..fff4f80
--- /dev/null
+++ b/arch/arm/include/asm/gpio.h
@@ -0,0 +1,7 @@
+#ifndef _ARCH_ARM_GPIO_H
+#define _ARCH_ARM_GPIO_H
+
+/* not all ARM platforms necessarily support this API ... */
+#include <asm/arch/gpio.h>
+
+#endif /* _ARCH_ARM_GPIO_H */
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
new file mode 100644
index 0000000..182310b
--- /dev/null
+++ b/arch/arm/include/asm/hardirq.h
@@ -0,0 +1,32 @@
+#ifndef __ASM_HARDIRQ_H
+#define __ASM_HARDIRQ_H
+
+#include <linux/cache.h>
+#include <linux/threads.h>
+#include <asm/irq.h>
+
+typedef struct {
+	unsigned int __softirq_pending;
+	unsigned int local_timer_irqs;
+} ____cacheline_aligned irq_cpustat_t;
+
+#include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */
+
+#if NR_IRQS > 256
+#define HARDIRQ_BITS	9
+#else
+#define HARDIRQ_BITS	8
+#endif
+
+/*
+ * The hardirq mask has to be large enough to have space
+ * for potentially all IRQ sources in the system nesting
+ * on a single CPU:
+ */
+#if (1 << HARDIRQ_BITS) < NR_IRQS
+# error HARDIRQ_BITS is too low!
+#endif
+
+#define __ARCH_IRQ_EXIT_IRQS_DISABLED	1
+
+#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/arm/include/asm/hardware.h b/arch/arm/include/asm/hardware.h
new file mode 100644
index 0000000..eb3b3ab
--- /dev/null
+++ b/arch/arm/include/asm/hardware.h
@@ -0,0 +1,18 @@
+/*
+ *  arch/arm/include/asm/hardware.h
+ *
+ *  Copyright (C) 1996 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Common hardware definitions
+ */
+
+#ifndef __ASM_HARDWARE_H
+#define __ASM_HARDWARE_H
+
+#include <asm/arch/hardware.h>
+
+#endif
diff --git a/arch/arm/include/asm/hardware/arm_timer.h b/arch/arm/include/asm/hardware/arm_timer.h
new file mode 100644
index 0000000..04be3bd
--- /dev/null
+++ b/arch/arm/include/asm/hardware/arm_timer.h
@@ -0,0 +1,21 @@
+#ifndef __ASM_ARM_HARDWARE_ARM_TIMER_H
+#define __ASM_ARM_HARDWARE_ARM_TIMER_H
+
+#define TIMER_LOAD	0x00
+#define TIMER_VALUE	0x04
+#define TIMER_CTRL	0x08
+#define TIMER_CTRL_ONESHOT	(1 << 0)
+#define TIMER_CTRL_32BIT	(1 << 1)
+#define TIMER_CTRL_DIV1		(0 << 2)
+#define TIMER_CTRL_DIV16	(1 << 2)
+#define TIMER_CTRL_DIV256	(2 << 2)
+#define TIMER_CTRL_IE		(1 << 5)	/* Interrupt Enable (versatile only) */
+#define TIMER_CTRL_PERIODIC	(1 << 6)
+#define TIMER_CTRL_ENABLE	(1 << 7)
+
+#define TIMER_INTCLR	0x0c
+#define TIMER_RIS	0x10
+#define TIMER_MIS	0x14
+#define TIMER_BGLOAD	0x18
+
+#endif
diff --git a/arch/arm/include/asm/hardware/arm_twd.h b/arch/arm/include/asm/hardware/arm_twd.h
new file mode 100644
index 0000000..e521b70
--- /dev/null
+++ b/arch/arm/include/asm/hardware/arm_twd.h
@@ -0,0 +1,21 @@
+#ifndef __ASM_HARDWARE_TWD_H
+#define __ASM_HARDWARE_TWD_H
+
+#define TWD_TIMER_LOAD 			0x00
+#define TWD_TIMER_COUNTER		0x04
+#define TWD_TIMER_CONTROL		0x08
+#define TWD_TIMER_INTSTAT		0x0C
+
+#define TWD_WDOG_LOAD			0x20
+#define TWD_WDOG_COUNTER		0x24
+#define TWD_WDOG_CONTROL		0x28
+#define TWD_WDOG_INTSTAT		0x2C
+#define TWD_WDOG_RESETSTAT		0x30
+#define TWD_WDOG_DISABLE		0x34
+
+#define TWD_TIMER_CONTROL_ENABLE	(1 << 0)
+#define TWD_TIMER_CONTROL_ONESHOT	(0 << 1)
+#define TWD_TIMER_CONTROL_PERIODIC	(1 << 1)
+#define TWD_TIMER_CONTROL_IT_ENABLE	(1 << 2)
+
+#endif
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
new file mode 100644
index 0000000..64f2252
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -0,0 +1,56 @@
+/*
+ * arch/arm/include/asm/hardware/cache-l2x0.h
+ *
+ * Copyright (C) 2007 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARM_HARDWARE_L2X0_H
+#define __ASM_ARM_HARDWARE_L2X0_H
+
+#define L2X0_CACHE_ID			0x000
+#define L2X0_CACHE_TYPE			0x004
+#define L2X0_CTRL			0x100
+#define L2X0_AUX_CTRL			0x104
+#define L2X0_EVENT_CNT_CTRL		0x200
+#define L2X0_EVENT_CNT1_CFG		0x204
+#define L2X0_EVENT_CNT0_CFG		0x208
+#define L2X0_EVENT_CNT1_VAL		0x20C
+#define L2X0_EVENT_CNT0_VAL		0x210
+#define L2X0_INTR_MASK			0x214
+#define L2X0_MASKED_INTR_STAT		0x218
+#define L2X0_RAW_INTR_STAT		0x21C
+#define L2X0_INTR_CLEAR			0x220
+#define L2X0_CACHE_SYNC			0x730
+#define L2X0_INV_LINE_PA		0x770
+#define L2X0_INV_WAY			0x77C
+#define L2X0_CLEAN_LINE_PA		0x7B0
+#define L2X0_CLEAN_LINE_IDX		0x7B8
+#define L2X0_CLEAN_WAY			0x7BC
+#define L2X0_CLEAN_INV_LINE_PA		0x7F0
+#define L2X0_CLEAN_INV_LINE_IDX		0x7F8
+#define L2X0_CLEAN_INV_WAY		0x7FC
+#define L2X0_LOCKDOWN_WAY_D		0x900
+#define L2X0_LOCKDOWN_WAY_I		0x904
+#define L2X0_TEST_OPERATION		0xF00
+#define L2X0_LINE_DATA			0xF10
+#define L2X0_LINE_TAG			0xF30
+#define L2X0_DEBUG_CTRL			0xF40
+
+#ifndef __ASSEMBLY__
+extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask);
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/hardware/clps7111.h b/arch/arm/include/asm/hardware/clps7111.h
new file mode 100644
index 0000000..4447722
--- /dev/null
+++ b/arch/arm/include/asm/hardware/clps7111.h
@@ -0,0 +1,184 @@
+/*
+ *  arch/arm/include/asm/hardware/clps7111.h
+ *
+ *  This file contains the hardware definitions of the CLPS7111 internal
+ *  registers.
+ *
+ *  Copyright (C) 2000 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef __ASM_HARDWARE_CLPS7111_H
+#define __ASM_HARDWARE_CLPS7111_H
+
+#define CLPS7111_PHYS_BASE	(0x80000000)
+
+#ifndef __ASSEMBLY__
+#define clps_readb(off)		__raw_readb(CLPS7111_BASE + (off))
+#define clps_readw(off)		__raw_readw(CLPS7111_BASE + (off))
+#define clps_readl(off)		__raw_readl(CLPS7111_BASE + (off))
+#define clps_writeb(val,off)	__raw_writeb(val, CLPS7111_BASE + (off))
+#define clps_writew(val,off)	__raw_writew(val, CLPS7111_BASE + (off))
+#define clps_writel(val,off)	__raw_writel(val, CLPS7111_BASE + (off))
+#endif
+
+#define PADR		(0x0000)
+#define PBDR		(0x0001)
+#define PDDR		(0x0003)
+#define PADDR		(0x0040)
+#define PBDDR		(0x0041)
+#define PDDDR		(0x0043)
+#define PEDR		(0x0080)
+#define PEDDR		(0x00c0)
+#define SYSCON1		(0x0100)
+#define SYSFLG1		(0x0140)
+#define MEMCFG1		(0x0180)
+#define MEMCFG2		(0x01c0)
+#define DRFPR		(0x0200)
+#define INTSR1		(0x0240)
+#define INTMR1		(0x0280)
+#define LCDCON		(0x02c0)
+#define TC1D            (0x0300)
+#define TC2D		(0x0340)
+#define RTCDR		(0x0380)
+#define RTCMR		(0x03c0)
+#define PMPCON		(0x0400)
+#define CODR		(0x0440)
+#define UARTDR1		(0x0480)
+#define UBRLCR1		(0x04c0)
+#define SYNCIO		(0x0500)
+#define PALLSW		(0x0540)
+#define PALMSW		(0x0580)
+#define STFCLR		(0x05c0)
+#define BLEOI		(0x0600)
+#define MCEOI		(0x0640)
+#define TEOI		(0x0680)
+#define TC1EOI		(0x06c0)
+#define TC2EOI		(0x0700)
+#define RTCEOI		(0x0740)
+#define UMSEOI		(0x0780)
+#define COEOI		(0x07c0)
+#define HALT		(0x0800)
+#define STDBY		(0x0840)
+
+#define FBADDR		(0x1000)
+#define SYSCON2		(0x1100)
+#define SYSFLG2		(0x1140)
+#define INTSR2		(0x1240)
+#define INTMR2		(0x1280)
+#define UARTDR2		(0x1480)
+#define UBRLCR2		(0x14c0)
+#define SS2DR		(0x1500)
+#define SRXEOF		(0x1600)
+#define SS2POP		(0x16c0)
+#define KBDEOI		(0x1700)
+
+/* common bits: SYSCON1 / SYSCON2 */
+#define SYSCON_UARTEN		(1 << 8)
+
+#define SYSCON1_KBDSCAN(x)	((x) & 15)
+#define SYSCON1_KBDSCANMASK	(15)
+#define SYSCON1_TC1M		(1 << 4)
+#define SYSCON1_TC1S		(1 << 5)
+#define SYSCON1_TC2M		(1 << 6)
+#define SYSCON1_TC2S		(1 << 7)
+#define SYSCON1_UART1EN		SYSCON_UARTEN
+#define SYSCON1_BZTOG		(1 << 9)
+#define SYSCON1_BZMOD		(1 << 10)
+#define SYSCON1_DBGEN		(1 << 11)
+#define SYSCON1_LCDEN		(1 << 12)
+#define SYSCON1_CDENTX		(1 << 13)
+#define SYSCON1_CDENRX		(1 << 14)
+#define SYSCON1_SIREN		(1 << 15)
+#define SYSCON1_ADCKSEL(x)	(((x) & 3) << 16)
+#define SYSCON1_ADCKSEL_MASK	(3 << 16)
+#define SYSCON1_EXCKEN		(1 << 18)
+#define SYSCON1_WAKEDIS		(1 << 19)
+#define SYSCON1_IRTXM		(1 << 20)
+
+/* common bits: SYSFLG1 / SYSFLG2 */
+#define SYSFLG_UBUSY		(1 << 11)
+#define SYSFLG_URXFE		(1 << 22)
+#define SYSFLG_UTXFF		(1 << 23)
+
+#define SYSFLG1_MCDR		(1 << 0)
+#define SYSFLG1_DCDET		(1 << 1)
+#define SYSFLG1_WUDR		(1 << 2)
+#define SYSFLG1_WUON		(1 << 3)
+#define SYSFLG1_CTS		(1 << 8)
+#define SYSFLG1_DSR		(1 << 9)
+#define SYSFLG1_DCD		(1 << 10)
+#define SYSFLG1_UBUSY		SYSFLG_UBUSY
+#define SYSFLG1_NBFLG		(1 << 12)
+#define SYSFLG1_RSTFLG		(1 << 13)
+#define SYSFLG1_PFFLG		(1 << 14)
+#define SYSFLG1_CLDFLG		(1 << 15)
+#define SYSFLG1_URXFE		SYSFLG_URXFE
+#define SYSFLG1_UTXFF		SYSFLG_UTXFF
+#define SYSFLG1_CRXFE		(1 << 24)
+#define SYSFLG1_CTXFF		(1 << 25)
+#define SYSFLG1_SSIBUSY		(1 << 26)
+#define SYSFLG1_ID		(1 << 29)
+
+#define SYSFLG2_SSRXOF		(1 << 0)
+#define SYSFLG2_RESVAL		(1 << 1)
+#define SYSFLG2_RESFRM		(1 << 2)
+#define SYSFLG2_SS2RXFE		(1 << 3)
+#define SYSFLG2_SS2TXFF		(1 << 4)
+#define SYSFLG2_SS2TXUF		(1 << 5)
+#define SYSFLG2_CKMODE		(1 << 6)
+#define SYSFLG2_UBUSY		SYSFLG_UBUSY
+#define SYSFLG2_URXFE		SYSFLG_URXFE
+#define SYSFLG2_UTXFF		SYSFLG_UTXFF
+
+#define LCDCON_GSEN		(1 << 30)
+#define LCDCON_GSMD		(1 << 31)
+
+#define SYSCON2_SERSEL		(1 << 0)
+#define SYSCON2_KBD6		(1 << 1)
+#define SYSCON2_DRAMZ		(1 << 2)
+#define SYSCON2_KBWEN		(1 << 3)
+#define SYSCON2_SS2TXEN		(1 << 4)
+#define SYSCON2_PCCARD1		(1 << 5)
+#define SYSCON2_PCCARD2		(1 << 6)
+#define SYSCON2_SS2RXEN		(1 << 7)
+#define SYSCON2_UART2EN		SYSCON_UARTEN
+#define SYSCON2_SS2MAEN		(1 << 9)
+#define SYSCON2_OSTB		(1 << 12)
+#define SYSCON2_CLKENSL		(1 << 13)
+#define SYSCON2_BUZFREQ		(1 << 14)
+
+/* common bits: UARTDR1 / UARTDR2 */
+#define UARTDR_FRMERR		(1 << 8)
+#define UARTDR_PARERR		(1 << 9)
+#define UARTDR_OVERR		(1 << 10)
+
+/* common bits: UBRLCR1 / UBRLCR2 */
+#define UBRLCR_BAUD_MASK	((1 << 12) - 1)
+#define UBRLCR_BREAK		(1 << 12)
+#define UBRLCR_PRTEN		(1 << 13)
+#define UBRLCR_EVENPRT		(1 << 14)
+#define UBRLCR_XSTOP		(1 << 15)
+#define UBRLCR_FIFOEN		(1 << 16)
+#define UBRLCR_WRDLEN5		(0 << 17)
+#define UBRLCR_WRDLEN6		(1 << 17)
+#define UBRLCR_WRDLEN7		(2 << 17)
+#define UBRLCR_WRDLEN8		(3 << 17)
+#define UBRLCR_WRDLEN_MASK	(3 << 17)
+
+#define SYNCIO_SMCKEN		(1 << 13)
+#define SYNCIO_TXFRMEN		(1 << 14)
+
+#endif /* __ASM_HARDWARE_CLPS7111_H */
diff --git a/arch/arm/include/asm/hardware/cs89712.h b/arch/arm/include/asm/hardware/cs89712.h
new file mode 100644
index 0000000..f756269
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cs89712.h
@@ -0,0 +1,49 @@
+/*
+ *  arch/arm/include/asm/hardware/cs89712.h
+ *
+ *  This file contains the hardware definitions of the CS89712
+ *  additional internal registers.
+ *
+ *  Copyright (C) 2001 Thomas Gleixner autronix automation <gleixner@autronix.de>
+ *			
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef __ASM_HARDWARE_CS89712_H
+#define __ASM_HARDWARE_CS89712_H
+
+/*
+*	CS89712 additional registers
+*/
+                                  
+#define PCDR			0x0002	/* Port C Data register ---------------------------- */
+#define PCDDR			0x0042	/* Port C Data Direction register ------------------ */
+#define SDCONF			0x2300  /* SDRAM Configuration register ---------------------*/
+#define SDRFPR			0x2340  /* SDRAM Refresh period register --------------------*/
+
+#define SDCONF_ACTIVE		(1 << 10)
+#define SDCONF_CLKCTL		(1 << 9)
+#define SDCONF_WIDTH_4		(0 << 7)
+#define SDCONF_WIDTH_8		(1 << 7)
+#define SDCONF_WIDTH_16		(2 << 7)
+#define SDCONF_WIDTH_32		(3 << 7)
+#define SDCONF_SIZE_16		(0 << 5)
+#define SDCONF_SIZE_64		(1 << 5)
+#define SDCONF_SIZE_128		(2 << 5)
+#define SDCONF_SIZE_256		(3 << 5)
+#define SDCONF_CASLAT_2		(2)
+#define SDCONF_CASLAT_3		(3)
+
+#endif /* __ASM_HARDWARE_CS89712_H */
diff --git a/arch/arm/include/asm/hardware/debug-8250.S b/arch/arm/include/asm/hardware/debug-8250.S
new file mode 100644
index 0000000..22c6892
--- /dev/null
+++ b/arch/arm/include/asm/hardware/debug-8250.S
@@ -0,0 +1,29 @@
+/*
+ * arch/arm/include/asm/hardware/debug-8250.S
+ *
+ *  Copyright (C) 1994-1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/serial_reg.h>
+
+		.macro	senduart,rd,rx
+		strb	\rd, [\rx, #UART_TX << UART_SHIFT]
+		.endm
+
+		.macro	busyuart,rd,rx
+1002:		ldrb	\rd, [\rx, #UART_LSR << UART_SHIFT]
+		and	\rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
+		teq	\rd, #UART_LSR_TEMT | UART_LSR_THRE
+		bne	1002b
+		.endm
+
+		.macro	waituart,rd,rx
+#ifdef FLOW_CONTROL
+1001:		ldrb	\rd, [\rx, #UART_MSR << UART_SHIFT]
+		tst	\rd, #UART_MSR_CTS
+		beq	1001b
+#endif
+		.endm
diff --git a/arch/arm/include/asm/hardware/debug-pl01x.S b/arch/arm/include/asm/hardware/debug-pl01x.S
new file mode 100644
index 0000000..f9fd083
--- /dev/null
+++ b/arch/arm/include/asm/hardware/debug-pl01x.S
@@ -0,0 +1,29 @@
+/* arch/arm/include/asm/hardware/debug-pl01x.S
+ *
+ * Debugging macro include header
+ *
+ *  Copyright (C) 1994-1999 Russell King
+ *  Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+#include <linux/amba/serial.h>
+
+		.macro	senduart,rd,rx
+		strb	\rd, [\rx, #UART01x_DR]
+		.endm
+
+		.macro	waituart,rd,rx
+1001:		ldr	\rd, [\rx, #UART01x_FR]
+		tst	\rd, #UART01x_FR_TXFF
+		bne	1001b
+		.endm
+
+		.macro	busyuart,rd,rx
+1001:		ldr	\rd, [\rx, #UART01x_FR]
+		tst	\rd, #UART01x_FR_BUSY
+		bne	1001b
+		.endm
diff --git a/arch/arm/include/asm/hardware/dec21285.h b/arch/arm/include/asm/hardware/dec21285.h
new file mode 100644
index 0000000..7068a1c
--- /dev/null
+++ b/arch/arm/include/asm/hardware/dec21285.h
@@ -0,0 +1,147 @@
+/*
+ *  arch/arm/include/asm/hardware/dec21285.h
+ *
+ *  Copyright (C) 1998 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  DC21285 registers
+ */
+#define DC21285_PCI_IACK		0x79000000
+#define DC21285_ARMCSR_BASE		0x42000000
+#define DC21285_PCI_TYPE_0_CONFIG	0x7b000000
+#define DC21285_PCI_TYPE_1_CONFIG	0x7a000000
+#define DC21285_OUTBOUND_WRITE_FLUSH	0x78000000
+#define DC21285_FLASH			0x41000000
+#define DC21285_PCI_IO			0x7c000000
+#define DC21285_PCI_MEM			0x80000000
+
+#ifndef __ASSEMBLY__
+#include <asm/hardware.h>
+#define DC21285_IO(x)		((volatile unsigned long *)(ARMCSR_BASE+(x)))
+#else
+#define DC21285_IO(x)		(x)
+#endif
+
+#define CSR_PCICMD		DC21285_IO(0x0004)
+#define CSR_CLASSREV		DC21285_IO(0x0008)
+#define CSR_PCICACHELINESIZE	DC21285_IO(0x000c)
+#define CSR_PCICSRBASE		DC21285_IO(0x0010)
+#define CSR_PCICSRIOBASE	DC21285_IO(0x0014)
+#define CSR_PCISDRAMBASE	DC21285_IO(0x0018)
+#define CSR_PCIROMBASE		DC21285_IO(0x0030)
+#define CSR_MBOX0		DC21285_IO(0x0050)
+#define CSR_MBOX1		DC21285_IO(0x0054)
+#define CSR_MBOX2		DC21285_IO(0x0058)
+#define CSR_MBOX3		DC21285_IO(0x005c)
+#define CSR_DOORBELL		DC21285_IO(0x0060)
+#define CSR_DOORBELL_SETUP	DC21285_IO(0x0064)
+#define CSR_ROMWRITEREG		DC21285_IO(0x0068)
+#define CSR_CSRBASEMASK		DC21285_IO(0x00f8)
+#define CSR_CSRBASEOFFSET	DC21285_IO(0x00fc)
+#define CSR_SDRAMBASEMASK	DC21285_IO(0x0100)
+#define CSR_SDRAMBASEOFFSET	DC21285_IO(0x0104)
+#define CSR_ROMBASEMASK		DC21285_IO(0x0108)
+#define CSR_SDRAMTIMING		DC21285_IO(0x010c)
+#define CSR_SDRAMADDRSIZE0	DC21285_IO(0x0110)
+#define CSR_SDRAMADDRSIZE1	DC21285_IO(0x0114)
+#define CSR_SDRAMADDRSIZE2	DC21285_IO(0x0118)
+#define CSR_SDRAMADDRSIZE3	DC21285_IO(0x011c)
+#define CSR_I2O_INFREEHEAD	DC21285_IO(0x0120)
+#define CSR_I2O_INPOSTTAIL	DC21285_IO(0x0124)
+#define CSR_I2O_OUTPOSTHEAD	DC21285_IO(0x0128)
+#define CSR_I2O_OUTFREETAIL	DC21285_IO(0x012c)
+#define CSR_I2O_INFREECOUNT	DC21285_IO(0x0130)
+#define CSR_I2O_OUTPOSTCOUNT	DC21285_IO(0x0134)
+#define CSR_I2O_INPOSTCOUNT	DC21285_IO(0x0138)
+#define CSR_SA110_CNTL		DC21285_IO(0x013c)
+#define SA110_CNTL_INITCMPLETE		(1 << 0)
+#define SA110_CNTL_ASSERTSERR		(1 << 1)
+#define SA110_CNTL_RXSERR		(1 << 3)
+#define SA110_CNTL_SA110DRAMPARITY	(1 << 4)
+#define SA110_CNTL_PCISDRAMPARITY	(1 << 5)
+#define SA110_CNTL_DMASDRAMPARITY	(1 << 6)
+#define SA110_CNTL_DISCARDTIMER		(1 << 8)
+#define SA110_CNTL_PCINRESET		(1 << 9)
+#define SA110_CNTL_I2O_256		(0 << 10)
+#define SA110_CNTL_I20_512		(1 << 10)
+#define SA110_CNTL_I2O_1024		(2 << 10)
+#define SA110_CNTL_I2O_2048		(3 << 10)
+#define SA110_CNTL_I2O_4096		(4 << 10)
+#define SA110_CNTL_I2O_8192		(5 << 10)
+#define SA110_CNTL_I2O_16384		(6 << 10)
+#define SA110_CNTL_I2O_32768		(7 << 10)
+#define SA110_CNTL_WATCHDOG		(1 << 13)
+#define SA110_CNTL_ROMWIDTH_UNDEF	(0 << 14)
+#define SA110_CNTL_ROMWIDTH_16		(1 << 14)
+#define SA110_CNTL_ROMWIDTH_32		(2 << 14)
+#define SA110_CNTL_ROMWIDTH_8		(3 << 14)
+#define SA110_CNTL_ROMACCESSTIME(x)	((x)<<16)
+#define SA110_CNTL_ROMBURSTTIME(x)	((x)<<20)
+#define SA110_CNTL_ROMTRISTATETIME(x)	((x)<<24)
+#define SA110_CNTL_XCSDIR(x)		((x)<<28)
+#define SA110_CNTL_PCICFN		(1 << 31)
+
+/*
+ * footbridge_cfn_mode() is used when we want
+ * to check whether we are the central function
+ */
+#define __footbridge_cfn_mode() (*CSR_SA110_CNTL & SA110_CNTL_PCICFN)
+#if defined(CONFIG_FOOTBRIDGE_HOST) && defined(CONFIG_FOOTBRIDGE_ADDIN)
+#define footbridge_cfn_mode() __footbridge_cfn_mode()
+#elif defined(CONFIG_FOOTBRIDGE_HOST)
+#define footbridge_cfn_mode() (1)
+#else
+#define footbridge_cfn_mode() (0)
+#endif
+
+#define CSR_PCIADDR_EXTN	DC21285_IO(0x0140)
+#define CSR_PREFETCHMEMRANGE	DC21285_IO(0x0144)
+#define CSR_XBUS_CYCLE		DC21285_IO(0x0148)
+#define CSR_XBUS_IOSTROBE	DC21285_IO(0x014c)
+#define CSR_DOORBELL_PCI	DC21285_IO(0x0150)
+#define CSR_DOORBELL_SA110	DC21285_IO(0x0154)
+#define CSR_UARTDR		DC21285_IO(0x0160)
+#define CSR_RXSTAT		DC21285_IO(0x0164)
+#define CSR_H_UBRLCR		DC21285_IO(0x0168)
+#define CSR_M_UBRLCR		DC21285_IO(0x016c)
+#define CSR_L_UBRLCR		DC21285_IO(0x0170)
+#define CSR_UARTCON		DC21285_IO(0x0174)
+#define CSR_UARTFLG		DC21285_IO(0x0178)
+#define CSR_IRQ_STATUS		DC21285_IO(0x0180)
+#define CSR_IRQ_RAWSTATUS	DC21285_IO(0x0184)
+#define CSR_IRQ_ENABLE		DC21285_IO(0x0188)
+#define CSR_IRQ_DISABLE		DC21285_IO(0x018c)
+#define CSR_IRQ_SOFT		DC21285_IO(0x0190)
+#define CSR_FIQ_STATUS		DC21285_IO(0x0280)
+#define CSR_FIQ_RAWSTATUS	DC21285_IO(0x0284)
+#define CSR_FIQ_ENABLE		DC21285_IO(0x0288)
+#define CSR_FIQ_DISABLE		DC21285_IO(0x028c)
+#define CSR_FIQ_SOFT		DC21285_IO(0x0290)
+#define CSR_TIMER1_LOAD		DC21285_IO(0x0300)
+#define CSR_TIMER1_VALUE	DC21285_IO(0x0304)
+#define CSR_TIMER1_CNTL		DC21285_IO(0x0308)
+#define CSR_TIMER1_CLR		DC21285_IO(0x030c)
+#define CSR_TIMER2_LOAD		DC21285_IO(0x0320)
+#define CSR_TIMER2_VALUE	DC21285_IO(0x0324)
+#define CSR_TIMER2_CNTL		DC21285_IO(0x0328)
+#define CSR_TIMER2_CLR		DC21285_IO(0x032c)
+#define CSR_TIMER3_LOAD		DC21285_IO(0x0340)
+#define CSR_TIMER3_VALUE	DC21285_IO(0x0344)
+#define CSR_TIMER3_CNTL		DC21285_IO(0x0348)
+#define CSR_TIMER3_CLR		DC21285_IO(0x034c)
+#define CSR_TIMER4_LOAD		DC21285_IO(0x0360)
+#define CSR_TIMER4_VALUE	DC21285_IO(0x0364)
+#define CSR_TIMER4_CNTL		DC21285_IO(0x0368)
+#define CSR_TIMER4_CLR		DC21285_IO(0x036c)
+
+#define TIMER_CNTL_ENABLE	(1 << 7)
+#define TIMER_CNTL_AUTORELOAD	(1 << 6)
+#define TIMER_CNTL_DIV1		(0)
+#define TIMER_CNTL_DIV16	(1 << 2)
+#define TIMER_CNTL_DIV256	(2 << 2)
+#define TIMER_CNTL_CNTEXT	(3 << 2)
+
+
diff --git a/arch/arm/include/asm/hardware/entry-macro-iomd.S b/arch/arm/include/asm/hardware/entry-macro-iomd.S
new file mode 100644
index 0000000..e0af498
--- /dev/null
+++ b/arch/arm/include/asm/hardware/entry-macro-iomd.S
@@ -0,0 +1,139 @@
+/*
+ * arch/arm/include/asm/hardware/entry-macro-iomd.S
+ *
+ * Low-level IRQ helper macros for IOC/IOMD based platforms
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/* IOC / IOMD based hardware */
+#include <asm/hardware/iomd.h>
+
+		.macro	disable_fiq
+		mov	r12, #ioc_base_high
+		.if	ioc_base_low
+		orr	r12, r12, #ioc_base_low
+		.endif
+		strb	r12, [r12, #0x38]	@ Disable FIQ register
+		.endm
+
+		.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
+		ldrb	\irqstat, [\base, #IOMD_IRQREQB]	@ get high priority first
+		ldr	\tmp, =irq_prio_h
+		teq	\irqstat, #0
+#ifdef IOMD_BASE
+		ldreqb	\irqstat, [\base, #IOMD_DMAREQ]	@ get dma
+		addeq	\tmp, \tmp, #256		@ irq_prio_h table size
+		teqeq	\irqstat, #0
+		bne	2406f
+#endif
+		ldreqb	\irqstat, [\base, #IOMD_IRQREQA]	@ get low priority
+		addeq	\tmp, \tmp, #256		@ irq_prio_d table size
+		teqeq	\irqstat, #0
+#ifdef IOMD_IRQREQC
+		ldreqb	\irqstat, [\base, #IOMD_IRQREQC]
+		addeq	\tmp, \tmp, #256		@ irq_prio_l table size
+		teqeq	\irqstat, #0
+#endif
+#ifdef IOMD_IRQREQD
+		ldreqb	\irqstat, [\base, #IOMD_IRQREQD]
+		addeq	\tmp, \tmp, #256		@ irq_prio_lc table size
+		teqeq	\irqstat, #0
+#endif
+2406:		ldrneb	\irqnr, [\tmp, \irqstat]	@ get IRQ number
+		.endm
+
+/*
+ * Interrupt table (incorporates priority).  Please note that we
+ * rely on the order of these tables (see above code).
+ */
+		.align	5
+irq_prio_h:	.byte	 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
+		.byte	12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
+		.byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+		.byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+		.byte	14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
+		.byte	14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
+		.byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+		.byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+		.byte	15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+		.byte	15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+		.byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+		.byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+		.byte	15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+		.byte	15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+		.byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+		.byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+#ifdef IOMD_BASE
+irq_prio_d:	.byte	 0,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+		.byte	20,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+		.byte	21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+		.byte	21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+		.byte	22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+		.byte	22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+		.byte	21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+		.byte	21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+		.byte	23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+		.byte	23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+		.byte	21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+		.byte	21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+		.byte	22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+		.byte	22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+		.byte	21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+		.byte	21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+#endif
+irq_prio_l:	.byte	 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
+		.byte	 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
+		.byte	 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+		.byte	 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+		.byte	 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
+		.byte	 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
+		.byte	 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+		.byte	 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+		.byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+		.byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+		.byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+		.byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+		.byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+		.byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+		.byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+		.byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+#ifdef IOMD_IRQREQC
+irq_prio_lc:	.byte	24,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27
+		.byte	28,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27
+		.byte	29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
+		.byte	29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
+		.byte	30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27
+		.byte	30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27
+		.byte	29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
+		.byte	29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
+		.byte	31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
+		.byte	31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
+		.byte	31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
+		.byte	31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
+		.byte	31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
+		.byte	31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
+		.byte	31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
+		.byte	31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
+#endif
+#ifdef IOMD_IRQREQD
+irq_prio_ld:	.byte	40,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43
+		.byte	44,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43
+		.byte	45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
+		.byte	45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
+		.byte	46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43
+		.byte	46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43
+		.byte	45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
+		.byte	45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
+		.byte	47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
+		.byte	47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
+		.byte	47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
+		.byte	47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
+		.byte	47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
+		.byte	47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
+		.byte	47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
+		.byte	47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
+#endif
+
diff --git a/arch/arm/include/asm/hardware/ep7211.h b/arch/arm/include/asm/hardware/ep7211.h
new file mode 100644
index 0000000..654d5f6
--- /dev/null
+++ b/arch/arm/include/asm/hardware/ep7211.h
@@ -0,0 +1,40 @@
+/*
+ *  arch/arm/include/asm/hardware/ep7211.h
+ *
+ *  This file contains the hardware definitions of the EP7211 internal
+ *  registers.
+ *
+ *  Copyright (C) 2001 Blue Mug, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef __ASM_HARDWARE_EP7211_H
+#define __ASM_HARDWARE_EP7211_H
+
+#include <asm/hardware/clps7111.h>
+
+/*
+ * define EP7211_BASE to be the base address of the region
+ * you want to access.
+ */
+
+#define EP7211_PHYS_BASE	(0x80000000)
+
+/*
+ * XXX miket@bluemug.com: need to introduce EP7211 registers (those not
+ * present in 7212) here.
+ */
+
+#endif /* __ASM_HARDWARE_EP7211_H */
diff --git a/arch/arm/include/asm/hardware/ep7212.h b/arch/arm/include/asm/hardware/ep7212.h
new file mode 100644
index 0000000..3b43bbe
--- /dev/null
+++ b/arch/arm/include/asm/hardware/ep7212.h
@@ -0,0 +1,83 @@
+/*
+ *  arch/arm/include/asm/hardware/ep7212.h
+ *
+ *  This file contains the hardware definitions of the EP7212 internal
+ *  registers.
+ *
+ *  Copyright (C) 2000 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef __ASM_HARDWARE_EP7212_H
+#define __ASM_HARDWARE_EP7212_H
+
+/*
+ * define EP7212_BASE to be the base address of the region
+ * you want to access.
+ */
+
+#define EP7212_PHYS_BASE	(0x80000000)
+
+#ifndef __ASSEMBLY__
+#define ep_readl(off)		__raw_readl(EP7212_BASE + (off))
+#define ep_writel(val,off)	__raw_writel(val, EP7212_BASE + (off))
+#endif
+
+/*
+ * These registers are specific to the EP7212 only
+ */
+#define DAIR			0x2000
+#define DAIR0			0x2040
+#define DAIDR1			0x2080
+#define DAIDR2			0x20c0
+#define DAISR			0x2100
+#define SYSCON3			0x2200
+#define INTSR3			0x2240
+#define INTMR3			0x2280
+#define LEDFLSH			0x22c0
+
+#define DAIR_DAIEN		(1 << 16)
+#define DAIR_ECS		(1 << 17)
+#define DAIR_LCTM		(1 << 19)
+#define DAIR_LCRM		(1 << 20)
+#define DAIR_RCTM		(1 << 21)
+#define DAIR_RCRM		(1 << 22)
+#define DAIR_LBM		(1 << 23)
+
+#define DAIDR2_FIFOEN		(1 << 15)
+#define DAIDR2_FIFOLEFT		(0x0d << 16)
+#define DAIDR2_FIFORIGHT	(0x11 << 16)
+
+#define DAISR_RCTS		(1 << 0)
+#define DAISR_RCRS		(1 << 1)
+#define DAISR_LCTS		(1 << 2)
+#define DAISR_LCRS		(1 << 3)
+#define DAISR_RCTU		(1 << 4)
+#define DAISR_RCRO		(1 << 5)
+#define DAISR_LCTU		(1 << 6)
+#define DAISR_LCRO		(1 << 7)
+#define DAISR_RCNF		(1 << 8)
+#define DAISR_RCNE		(1 << 9)
+#define DAISR_LCNF		(1 << 10)
+#define DAISR_LCNE		(1 << 11)
+#define DAISR_FIFO		(1 << 12)
+
+#define SYSCON3_ADCCON		(1 << 0)
+#define SYSCON3_DAISEL		(1 << 3)
+#define SYSCON3_ADCCKNSEN	(1 << 4)
+#define SYSCON3_FASTWAKE	(1 << 8)
+#define SYSCON3_DAIEN		(1 << 9)
+
+#endif /* __ASM_HARDWARE_EP7212_H */
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
new file mode 100644
index 0000000..4924914
--- /dev/null
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -0,0 +1,42 @@
+/*
+ *  arch/arm/include/asm/hardware/gic.h
+ *
+ *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARM_HARDWARE_GIC_H
+#define __ASM_ARM_HARDWARE_GIC_H
+
+#include <linux/compiler.h>
+
+#define GIC_CPU_CTRL			0x00
+#define GIC_CPU_PRIMASK			0x04
+#define GIC_CPU_BINPOINT		0x08
+#define GIC_CPU_INTACK			0x0c
+#define GIC_CPU_EOI			0x10
+#define GIC_CPU_RUNNINGPRI		0x14
+#define GIC_CPU_HIGHPRI			0x18
+
+#define GIC_DIST_CTRL			0x000
+#define GIC_DIST_CTR			0x004
+#define GIC_DIST_ENABLE_SET		0x100
+#define GIC_DIST_ENABLE_CLEAR		0x180
+#define GIC_DIST_PENDING_SET		0x200
+#define GIC_DIST_PENDING_CLEAR		0x280
+#define GIC_DIST_ACTIVE_BIT		0x300
+#define GIC_DIST_PRI			0x400
+#define GIC_DIST_TARGET			0x800
+#define GIC_DIST_CONFIG			0xc00
+#define GIC_DIST_SOFTINT		0xf00
+
+#ifndef __ASSEMBLY__
+void gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start);
+void gic_cpu_init(unsigned int gic_nr, void __iomem *base);
+void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
+void gic_raise_softirq(cpumask_t cpumask, unsigned int irq);
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/hardware/icst307.h b/arch/arm/include/asm/hardware/icst307.h
new file mode 100644
index 0000000..554f128
--- /dev/null
+++ b/arch/arm/include/asm/hardware/icst307.h
@@ -0,0 +1,38 @@
+/*
+ *  arch/arm/include/asm/hardware/icst307.h
+ *
+ *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Support functions for calculating clocks/divisors for the ICS307
+ *  clock generators.  See http://www.icst.com/ for more information
+ *  on these devices.
+ *
+ *  This file is similar to the icst525.h file
+ */
+#ifndef ASMARM_HARDWARE_ICST307_H
+#define ASMARM_HARDWARE_ICST307_H
+
+struct icst307_params {
+	unsigned long	ref;
+	unsigned long	vco_max;	/* inclusive */
+	unsigned short	vd_min;		/* inclusive */
+	unsigned short	vd_max;		/* inclusive */
+	unsigned char	rd_min;		/* inclusive */
+	unsigned char	rd_max;		/* inclusive */
+};
+
+struct icst307_vco {
+	unsigned short	v;
+	unsigned char	r;
+	unsigned char	s;
+};
+
+unsigned long icst307_khz(const struct icst307_params *p, struct icst307_vco vco);
+struct icst307_vco icst307_khz_to_vco(const struct icst307_params *p, unsigned long freq);
+struct icst307_vco icst307_ps_to_vco(const struct icst307_params *p, unsigned long period);
+
+#endif
diff --git a/arch/arm/include/asm/hardware/icst525.h b/arch/arm/include/asm/hardware/icst525.h
new file mode 100644
index 0000000..58f0dc4
--- /dev/null
+++ b/arch/arm/include/asm/hardware/icst525.h
@@ -0,0 +1,36 @@
+/*
+ *  arch/arm/include/asm/hardware/icst525.h
+ *
+ *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Support functions for calculating clocks/divisors for the ICST525
+ *  clock generators.  See http://www.icst.com/ for more information
+ *  on these devices.
+ */
+#ifndef ASMARM_HARDWARE_ICST525_H
+#define ASMARM_HARDWARE_ICST525_H
+
+struct icst525_params {
+	unsigned long	ref;
+	unsigned long	vco_max;	/* inclusive */
+	unsigned short	vd_min;		/* inclusive */
+	unsigned short	vd_max;		/* inclusive */
+	unsigned char	rd_min;		/* inclusive */
+	unsigned char	rd_max;		/* inclusive */
+};
+
+struct icst525_vco {
+	unsigned short	v;
+	unsigned char	r;
+	unsigned char	s;
+};
+
+unsigned long icst525_khz(const struct icst525_params *p, struct icst525_vco vco);
+struct icst525_vco icst525_khz_to_vco(const struct icst525_params *p, unsigned long freq);
+struct icst525_vco icst525_ps_to_vco(const struct icst525_params *p, unsigned long period);
+
+#endif
diff --git a/arch/arm/include/asm/hardware/ioc.h b/arch/arm/include/asm/hardware/ioc.h
new file mode 100644
index 0000000..1f6b801
--- /dev/null
+++ b/arch/arm/include/asm/hardware/ioc.h
@@ -0,0 +1,72 @@
+/*
+ *  arch/arm/include/asm/hardware/ioc.h
+ *
+ *  Copyright (C) Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Use these macros to read/write the IOC.  All it does is perform the actual
+ *  read/write.
+ */
+#ifndef __ASMARM_HARDWARE_IOC_H
+#define __ASMARM_HARDWARE_IOC_H
+
+#ifndef __ASSEMBLY__
+
+/*
+ * We use __raw_base variants here so that we give the compiler the
+ * chance to keep IOC_BASE in a register.
+ */
+#define ioc_readb(off)		__raw_readb(IOC_BASE + (off))
+#define ioc_writeb(val,off)	__raw_writeb(val, IOC_BASE + (off))
+
+#endif
+
+#define IOC_CONTROL	(0x00)
+#define IOC_KARTTX	(0x04)
+#define IOC_KARTRX	(0x04)
+
+#define IOC_IRQSTATA	(0x10)
+#define IOC_IRQREQA	(0x14)
+#define IOC_IRQCLRA	(0x14)
+#define IOC_IRQMASKA	(0x18)
+
+#define IOC_IRQSTATB	(0x20)
+#define IOC_IRQREQB	(0x24)
+#define IOC_IRQMASKB	(0x28)
+
+#define IOC_FIQSTAT	(0x30)
+#define IOC_FIQREQ	(0x34)
+#define IOC_FIQMASK	(0x38)
+
+#define IOC_T0CNTL	(0x40)
+#define IOC_T0LTCHL	(0x40)
+#define IOC_T0CNTH	(0x44)
+#define IOC_T0LTCHH	(0x44)
+#define IOC_T0GO	(0x48)
+#define IOC_T0LATCH	(0x4c)
+
+#define IOC_T1CNTL	(0x50)
+#define IOC_T1LTCHL	(0x50)
+#define IOC_T1CNTH	(0x54)
+#define IOC_T1LTCHH	(0x54)
+#define IOC_T1GO	(0x58)
+#define IOC_T1LATCH	(0x5c)
+
+#define IOC_T2CNTL	(0x60)
+#define IOC_T2LTCHL	(0x60)
+#define IOC_T2CNTH	(0x64)
+#define IOC_T2LTCHH	(0x64)
+#define IOC_T2GO	(0x68)
+#define IOC_T2LATCH	(0x6c)
+
+#define IOC_T3CNTL	(0x70)
+#define IOC_T3LTCHL	(0x70)
+#define IOC_T3CNTH	(0x74)
+#define IOC_T3LTCHH	(0x74)
+#define IOC_T3GO	(0x78)
+#define IOC_T3LATCH	(0x7c)
+
+#endif
diff --git a/arch/arm/include/asm/hardware/iomd.h b/arch/arm/include/asm/hardware/iomd.h
new file mode 100644
index 0000000..9c5afbd
--- /dev/null
+++ b/arch/arm/include/asm/hardware/iomd.h
@@ -0,0 +1,226 @@
+/*
+ *  arch/arm/include/asm/hardware/iomd.h
+ *
+ *  Copyright (C) 1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  This file contains information out the IOMD ASIC used in the
+ *  Acorn RiscPC and subsequently integrated into the CLPS7500 chips.
+ */
+#ifndef __ASMARM_HARDWARE_IOMD_H
+#define __ASMARM_HARDWARE_IOMD_H
+
+
+#ifndef __ASSEMBLY__
+
+/*
+ * We use __raw_base variants here so that we give the compiler the
+ * chance to keep IOC_BASE in a register.
+ */
+#define iomd_readb(off)		__raw_readb(IOMD_BASE + (off))
+#define iomd_readl(off)		__raw_readl(IOMD_BASE + (off))
+#define iomd_writeb(val,off)	__raw_writeb(val, IOMD_BASE + (off))
+#define iomd_writel(val,off)	__raw_writel(val, IOMD_BASE + (off))
+
+#endif
+
+#define IOMD_CONTROL	(0x000)
+#define IOMD_KARTTX	(0x004)
+#define IOMD_KARTRX	(0x004)
+#define IOMD_KCTRL	(0x008)
+
+#ifdef CONFIG_ARCH_CLPS7500
+#define IOMD_IOLINES	(0x00C)
+#endif
+
+#define IOMD_IRQSTATA	(0x010)
+#define IOMD_IRQREQA	(0x014)
+#define IOMD_IRQCLRA	(0x014)
+#define IOMD_IRQMASKA	(0x018)
+
+#ifdef CONFIG_ARCH_CLPS7500
+#define IOMD_SUSMODE	(0x01C)
+#endif
+
+#define IOMD_IRQSTATB	(0x020)
+#define IOMD_IRQREQB	(0x024)
+#define IOMD_IRQMASKB	(0x028)
+
+#define IOMD_FIQSTAT	(0x030)
+#define IOMD_FIQREQ	(0x034)
+#define IOMD_FIQMASK	(0x038)
+
+#ifdef CONFIG_ARCH_CLPS7500
+#define IOMD_CLKCTL	(0x03C)
+#endif
+
+#define IOMD_T0CNTL	(0x040)
+#define IOMD_T0LTCHL	(0x040)
+#define IOMD_T0CNTH	(0x044)
+#define IOMD_T0LTCHH	(0x044)
+#define IOMD_T0GO	(0x048)
+#define IOMD_T0LATCH	(0x04c)
+
+#define IOMD_T1CNTL	(0x050)
+#define IOMD_T1LTCHL	(0x050)
+#define IOMD_T1CNTH	(0x054)
+#define IOMD_T1LTCHH	(0x054)
+#define IOMD_T1GO	(0x058)
+#define IOMD_T1LATCH	(0x05c)
+
+#ifdef CONFIG_ARCH_CLPS7500
+#define IOMD_IRQSTATC	(0x060)
+#define IOMD_IRQREQC	(0x064)
+#define IOMD_IRQMASKC	(0x068)
+
+#define IOMD_VIDMUX	(0x06c)
+
+#define IOMD_IRQSTATD	(0x070)
+#define IOMD_IRQREQD	(0x074)
+#define IOMD_IRQMASKD	(0x078)
+#endif
+
+#define IOMD_ROMCR0	(0x080)
+#define IOMD_ROMCR1	(0x084)
+#ifdef CONFIG_ARCH_RPC
+#define IOMD_DRAMCR	(0x088)
+#endif
+#define IOMD_REFCR	(0x08C)
+
+#define IOMD_FSIZE	(0x090)
+#define IOMD_ID0	(0x094)
+#define IOMD_ID1	(0x098)
+#define IOMD_VERSION	(0x09C)
+
+#ifdef CONFIG_ARCH_RPC
+#define IOMD_MOUSEX	(0x0A0)
+#define IOMD_MOUSEY	(0x0A4)
+#endif
+
+#ifdef CONFIG_ARCH_CLPS7500
+#define IOMD_MSEDAT	(0x0A8)
+#define IOMD_MSECTL	(0x0Ac)
+#endif
+
+#ifdef CONFIG_ARCH_RPC
+#define IOMD_DMATCR	(0x0C0)
+#endif
+#define IOMD_IOTCR	(0x0C4)
+#define IOMD_ECTCR	(0x0C8)
+#ifdef CONFIG_ARCH_RPC
+#define IOMD_DMAEXT	(0x0CC)
+#endif
+#ifdef CONFIG_ARCH_CLPS7500
+#define IOMD_ASTCR	(0x0CC)
+#define IOMD_DRAMCR	(0x0D0)
+#define IOMD_SELFREF	(0x0D4)
+#define IOMD_ATODICR	(0x0E0)
+#define IOMD_ATODSR	(0x0E4)
+#define IOMD_ATODCC	(0x0E8)
+#define IOMD_ATODCNT1	(0x0EC)
+#define IOMD_ATODCNT2	(0x0F0)
+#define IOMD_ATODCNT3	(0x0F4)
+#define IOMD_ATODCNT4	(0x0F8)
+#endif
+
+#ifdef CONFIG_ARCH_RPC
+#define DMA_EXT_IO0	1
+#define DMA_EXT_IO1	2
+#define DMA_EXT_IO2	4
+#define DMA_EXT_IO3	8
+
+#define IOMD_IO0CURA	(0x100)
+#define IOMD_IO0ENDA	(0x104)
+#define IOMD_IO0CURB	(0x108)
+#define IOMD_IO0ENDB	(0x10C)
+#define IOMD_IO0CR	(0x110)
+#define IOMD_IO0ST	(0x114)
+
+#define IOMD_IO1CURA	(0x120)
+#define IOMD_IO1ENDA	(0x124)
+#define IOMD_IO1CURB	(0x128)
+#define IOMD_IO1ENDB	(0x12C)
+#define IOMD_IO1CR	(0x130)
+#define IOMD_IO1ST	(0x134)
+
+#define IOMD_IO2CURA	(0x140)
+#define IOMD_IO2ENDA	(0x144)
+#define IOMD_IO2CURB	(0x148)
+#define IOMD_IO2ENDB	(0x14C)
+#define IOMD_IO2CR	(0x150)
+#define IOMD_IO2ST	(0x154)
+
+#define IOMD_IO3CURA	(0x160)
+#define IOMD_IO3ENDA	(0x164)
+#define IOMD_IO3CURB	(0x168)
+#define IOMD_IO3ENDB	(0x16C)
+#define IOMD_IO3CR	(0x170)
+#define IOMD_IO3ST	(0x174)
+#endif
+
+#define IOMD_SD0CURA	(0x180)
+#define IOMD_SD0ENDA	(0x184)
+#define IOMD_SD0CURB	(0x188)
+#define IOMD_SD0ENDB	(0x18C)
+#define IOMD_SD0CR	(0x190)
+#define IOMD_SD0ST	(0x194)
+
+#ifdef CONFIG_ARCH_RPC
+#define IOMD_SD1CURA	(0x1A0)
+#define IOMD_SD1ENDA	(0x1A4)
+#define IOMD_SD1CURB	(0x1A8)
+#define IOMD_SD1ENDB	(0x1AC)
+#define IOMD_SD1CR	(0x1B0)
+#define IOMD_SD1ST	(0x1B4)
+#endif
+
+#define IOMD_CURSCUR	(0x1C0)
+#define IOMD_CURSINIT	(0x1C4)
+
+#define IOMD_VIDCUR	(0x1D0)
+#define IOMD_VIDEND	(0x1D4)
+#define IOMD_VIDSTART	(0x1D8)
+#define IOMD_VIDINIT	(0x1DC)
+#define IOMD_VIDCR	(0x1E0)
+
+#define IOMD_DMASTAT	(0x1F0)
+#define IOMD_DMAREQ	(0x1F4)
+#define IOMD_DMAMASK	(0x1F8)
+
+#define DMA_END_S	(1 << 31)
+#define DMA_END_L	(1 << 30)
+
+#define DMA_CR_C	0x80
+#define DMA_CR_D	0x40
+#define DMA_CR_E	0x20
+
+#define DMA_ST_OFL	4
+#define DMA_ST_INT	2
+#define DMA_ST_AB	1
+
+/*
+ * DMA (MEMC) compatibility
+ */
+#define HALF_SAM	vram_half_sam
+#define VDMA_ALIGNMENT	(HALF_SAM * 2)
+#define VDMA_XFERSIZE	(HALF_SAM)
+#define VDMA_INIT	IOMD_VIDINIT
+#define VDMA_START	IOMD_VIDSTART
+#define VDMA_END	IOMD_VIDEND
+
+#ifndef __ASSEMBLY__
+extern unsigned int vram_half_sam;
+#define video_set_dma(start,end,offset)				\
+do {								\
+	outl (SCREEN_START + start, VDMA_START);		\
+	outl (SCREEN_START + end - VDMA_XFERSIZE, VDMA_END);	\
+	if (offset >= end - VDMA_XFERSIZE)			\
+		offset |= 0x40000000;				\
+	outl (SCREEN_START + offset, VDMA_INIT);		\
+} while (0)
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h
new file mode 100644
index 0000000..af64676
--- /dev/null
+++ b/arch/arm/include/asm/hardware/iop3xx-adma.h
@@ -0,0 +1,888 @@
+/*
+ * Copyright © 2006, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef _ADMA_H
+#define _ADMA_H
+#include <linux/types.h>
+#include <linux/io.h>
+#include <asm/hardware.h>
+#include <asm/hardware/iop_adma.h>
+
+/* Memory copy units */
+#define DMA_CCR(chan)		(chan->mmr_base + 0x0)
+#define DMA_CSR(chan)		(chan->mmr_base + 0x4)
+#define DMA_DAR(chan)		(chan->mmr_base + 0xc)
+#define DMA_NDAR(chan)		(chan->mmr_base + 0x10)
+#define DMA_PADR(chan)		(chan->mmr_base + 0x14)
+#define DMA_PUADR(chan)	(chan->mmr_base + 0x18)
+#define DMA_LADR(chan)		(chan->mmr_base + 0x1c)
+#define DMA_BCR(chan)		(chan->mmr_base + 0x20)
+#define DMA_DCR(chan)		(chan->mmr_base + 0x24)
+
+/* Application accelerator unit  */
+#define AAU_ACR(chan)		(chan->mmr_base + 0x0)
+#define AAU_ASR(chan)		(chan->mmr_base + 0x4)
+#define AAU_ADAR(chan)		(chan->mmr_base + 0x8)
+#define AAU_ANDAR(chan)	(chan->mmr_base + 0xc)
+#define AAU_SAR(src, chan)	(chan->mmr_base + (0x10 + ((src) << 2)))
+#define AAU_DAR(chan)		(chan->mmr_base + 0x20)
+#define AAU_ABCR(chan)		(chan->mmr_base + 0x24)
+#define AAU_ADCR(chan)		(chan->mmr_base + 0x28)
+#define AAU_SAR_EDCR(src_edc)	(chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
+#define AAU_EDCR0_IDX	8
+#define AAU_EDCR1_IDX	17
+#define AAU_EDCR2_IDX	26
+
+#define DMA0_ID 0
+#define DMA1_ID 1
+#define AAU_ID 2
+
+struct iop3xx_aau_desc_ctrl {
+	unsigned int int_en:1;
+	unsigned int blk1_cmd_ctrl:3;
+	unsigned int blk2_cmd_ctrl:3;
+	unsigned int blk3_cmd_ctrl:3;
+	unsigned int blk4_cmd_ctrl:3;
+	unsigned int blk5_cmd_ctrl:3;
+	unsigned int blk6_cmd_ctrl:3;
+	unsigned int blk7_cmd_ctrl:3;
+	unsigned int blk8_cmd_ctrl:3;
+	unsigned int blk_ctrl:2;
+	unsigned int dual_xor_en:1;
+	unsigned int tx_complete:1;
+	unsigned int zero_result_err:1;
+	unsigned int zero_result_en:1;
+	unsigned int dest_write_en:1;
+};
+
+struct iop3xx_aau_e_desc_ctrl {
+	unsigned int reserved:1;
+	unsigned int blk1_cmd_ctrl:3;
+	unsigned int blk2_cmd_ctrl:3;
+	unsigned int blk3_cmd_ctrl:3;
+	unsigned int blk4_cmd_ctrl:3;
+	unsigned int blk5_cmd_ctrl:3;
+	unsigned int blk6_cmd_ctrl:3;
+	unsigned int blk7_cmd_ctrl:3;
+	unsigned int blk8_cmd_ctrl:3;
+	unsigned int reserved2:7;
+};
+
+struct iop3xx_dma_desc_ctrl {
+	unsigned int pci_transaction:4;
+	unsigned int int_en:1;
+	unsigned int dac_cycle_en:1;
+	unsigned int mem_to_mem_en:1;
+	unsigned int crc_data_tx_en:1;
+	unsigned int crc_gen_en:1;
+	unsigned int crc_seed_dis:1;
+	unsigned int reserved:21;
+	unsigned int crc_tx_complete:1;
+};
+
+struct iop3xx_desc_dma {
+	u32 next_desc;
+	union {
+		u32 pci_src_addr;
+		u32 pci_dest_addr;
+		u32 src_addr;
+	};
+	union {
+		u32 upper_pci_src_addr;
+		u32 upper_pci_dest_addr;
+	};
+	union {
+		u32 local_pci_src_addr;
+		u32 local_pci_dest_addr;
+		u32 dest_addr;
+	};
+	u32 byte_count;
+	union {
+		u32 desc_ctrl;
+		struct iop3xx_dma_desc_ctrl desc_ctrl_field;
+	};
+	u32 crc_addr;
+};
+
+struct iop3xx_desc_aau {
+	u32 next_desc;
+	u32 src[4];
+	u32 dest_addr;
+	u32 byte_count;
+	union {
+		u32 desc_ctrl;
+		struct iop3xx_aau_desc_ctrl desc_ctrl_field;
+	};
+	union {
+		u32 src_addr;
+		u32 e_desc_ctrl;
+		struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
+	} src_edc[31];
+};
+
+struct iop3xx_aau_gfmr {
+	unsigned int gfmr1:8;
+	unsigned int gfmr2:8;
+	unsigned int gfmr3:8;
+	unsigned int gfmr4:8;
+};
+
+struct iop3xx_desc_pq_xor {
+	u32 next_desc;
+	u32 src[3];
+	union {
+		u32 data_mult1;
+		struct iop3xx_aau_gfmr data_mult1_field;
+	};
+	u32 dest_addr;
+	u32 byte_count;
+	union {
+		u32 desc_ctrl;
+		struct iop3xx_aau_desc_ctrl desc_ctrl_field;
+	};
+	union {
+		u32 src_addr;
+		u32 e_desc_ctrl;
+		struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
+		u32 data_multiplier;
+		struct iop3xx_aau_gfmr data_mult_field;
+		u32 reserved;
+	} src_edc_gfmr[19];
+};
+
+struct iop3xx_desc_dual_xor {
+	u32 next_desc;
+	u32 src0_addr;
+	u32 src1_addr;
+	u32 h_src_addr;
+	u32 d_src_addr;
+	u32 h_dest_addr;
+	u32 byte_count;
+	union {
+		u32 desc_ctrl;
+		struct iop3xx_aau_desc_ctrl desc_ctrl_field;
+	};
+	u32 d_dest_addr;
+};
+
+union iop3xx_desc {
+	struct iop3xx_desc_aau *aau;
+	struct iop3xx_desc_dma *dma;
+	struct iop3xx_desc_pq_xor *pq_xor;
+	struct iop3xx_desc_dual_xor *dual_xor;
+	void *ptr;
+};
+
+static inline int iop_adma_get_max_xor(void)
+{
+	return 32;
+}
+
+static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
+{
+	int id = chan->device->id;
+
+	switch (id) {
+	case DMA0_ID:
+	case DMA1_ID:
+		return __raw_readl(DMA_DAR(chan));
+	case AAU_ID:
+		return __raw_readl(AAU_ADAR(chan));
+	default:
+		BUG();
+	}
+	return 0;
+}
+
+static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
+						u32 next_desc_addr)
+{
+	int id = chan->device->id;
+
+	switch (id) {
+	case DMA0_ID:
+	case DMA1_ID:
+		__raw_writel(next_desc_addr, DMA_NDAR(chan));
+		break;
+	case AAU_ID:
+		__raw_writel(next_desc_addr, AAU_ANDAR(chan));
+		break;
+	}
+
+}
+
+#define IOP_ADMA_STATUS_BUSY (1 << 10)
+#define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
+#define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
+#define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
+
+static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
+{
+	u32 status = __raw_readl(DMA_CSR(chan));
+	return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
+}
+
+static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
+					int num_slots)
+{
+	/* num_slots will only ever be 1, 2, 4, or 8 */
+	return (desc->idx & (num_slots - 1)) ? 0 : 1;
+}
+
+/* to do: support large (i.e. > hw max) buffer sizes */
+static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
+{
+	*slots_per_op = 1;
+	return 1;
+}
+
+/* to do: support large (i.e. > hw max) buffer sizes */
+static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
+{
+	*slots_per_op = 1;
+	return 1;
+}
+
+static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
+					int *slots_per_op)
+{
+	static const char slot_count_table[] = {
+						1, 1, 1, 1, /* 01 - 04 */
+						2, 2, 2, 2, /* 05 - 08 */
+						4, 4, 4, 4, /* 09 - 12 */
+						4, 4, 4, 4, /* 13 - 16 */
+						8, 8, 8, 8, /* 17 - 20 */
+						8, 8, 8, 8, /* 21 - 24 */
+						8, 8, 8, 8, /* 25 - 28 */
+						8, 8, 8, 8, /* 29 - 32 */
+					      };
+	*slots_per_op = slot_count_table[src_cnt - 1];
+	return *slots_per_op;
+}
+
+static inline int
+iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
+{
+	switch (chan->device->id) {
+	case DMA0_ID:
+	case DMA1_ID:
+		return iop_chan_memcpy_slot_count(0, slots_per_op);
+	case AAU_ID:
+		return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
+	default:
+		BUG();
+	}
+	return 0;
+}
+
+static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
+						int *slots_per_op)
+{
+	int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
+
+	if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
+		return slot_cnt;
+
+	len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
+	while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
+		len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
+		slot_cnt += *slots_per_op;
+	}
+
+	if (len)
+		slot_cnt += *slots_per_op;
+
+	return slot_cnt;
+}
+
+/* zero sum on iop3xx is limited to 1k at a time so it requires multiple
+ * descriptors
+ */
+static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
+						int *slots_per_op)
+{
+	int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
+
+	if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
+		return slot_cnt;
+
+	len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
+	while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
+		len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
+		slot_cnt += *slots_per_op;
+	}
+
+	if (len)
+		slot_cnt += *slots_per_op;
+
+	return slot_cnt;
+}
+
+static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
+					struct iop_adma_chan *chan)
+{
+	union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+
+	switch (chan->device->id) {
+	case DMA0_ID:
+	case DMA1_ID:
+		return hw_desc.dma->dest_addr;
+	case AAU_ID:
+		return hw_desc.aau->dest_addr;
+	default:
+		BUG();
+	}
+	return 0;
+}
+
+static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
+					struct iop_adma_chan *chan)
+{
+	union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+
+	switch (chan->device->id) {
+	case DMA0_ID:
+	case DMA1_ID:
+		return hw_desc.dma->byte_count;
+	case AAU_ID:
+		return hw_desc.aau->byte_count;
+	default:
+		BUG();
+	}
+	return 0;
+}
+
+/* translate the src_idx to a descriptor word index */
+static inline int __desc_idx(int src_idx)
+{
+	static const int desc_idx_table[] = { 0, 0, 0, 0,
+					      0, 1, 2, 3,
+					      5, 6, 7, 8,
+					      9, 10, 11, 12,
+					      14, 15, 16, 17,
+					      18, 19, 20, 21,
+					      23, 24, 25, 26,
+					      27, 28, 29, 30,
+					    };
+
+	return desc_idx_table[src_idx];
+}
+
+static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
+					struct iop_adma_chan *chan,
+					int src_idx)
+{
+	union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+
+	switch (chan->device->id) {
+	case DMA0_ID:
+	case DMA1_ID:
+		return hw_desc.dma->src_addr;
+	case AAU_ID:
+		break;
+	default:
+		BUG();
+	}
+
+	if (src_idx < 4)
+		return hw_desc.aau->src[src_idx];
+	else
+		return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
+}
+
+static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
+					int src_idx, dma_addr_t addr)
+{
+	if (src_idx < 4)
+		hw_desc->src[src_idx] = addr;
+	else
+		hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
+}
+
+static inline void
+iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
+{
+	struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
+	union {
+		u32 value;
+		struct iop3xx_dma_desc_ctrl field;
+	} u_desc_ctrl;
+
+	u_desc_ctrl.value = 0;
+	u_desc_ctrl.field.mem_to_mem_en = 1;
+	u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
+	u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
+	hw_desc->desc_ctrl = u_desc_ctrl.value;
+	hw_desc->upper_pci_src_addr = 0;
+	hw_desc->crc_addr = 0;
+}
+
+static inline void
+iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
+{
+	struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
+	union {
+		u32 value;
+		struct iop3xx_aau_desc_ctrl field;
+	} u_desc_ctrl;
+
+	u_desc_ctrl.value = 0;
+	u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
+	u_desc_ctrl.field.dest_write_en = 1;
+	u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
+	hw_desc->desc_ctrl = u_desc_ctrl.value;
+}
+
+static inline u32
+iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
+		     unsigned long flags)
+{
+	int i, shift;
+	u32 edcr;
+	union {
+		u32 value;
+		struct iop3xx_aau_desc_ctrl field;
+	} u_desc_ctrl;
+
+	u_desc_ctrl.value = 0;
+	switch (src_cnt) {
+	case 25 ... 32:
+		u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
+		edcr = 0;
+		shift = 1;
+		for (i = 24; i < src_cnt; i++) {
+			edcr |= (1 << shift);
+			shift += 3;
+		}
+		hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
+		src_cnt = 24;
+		/* fall through */
+	case 17 ... 24:
+		if (!u_desc_ctrl.field.blk_ctrl) {
+			hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
+			u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
+		}
+		edcr = 0;
+		shift = 1;
+		for (i = 16; i < src_cnt; i++) {
+			edcr |= (1 << shift);
+			shift += 3;
+		}
+		hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
+		src_cnt = 16;
+		/* fall through */
+	case 9 ... 16:
+		if (!u_desc_ctrl.field.blk_ctrl)
+			u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
+		edcr = 0;
+		shift = 1;
+		for (i = 8; i < src_cnt; i++) {
+			edcr |= (1 << shift);
+			shift += 3;
+		}
+		hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
+		src_cnt = 8;
+		/* fall through */
+	case 2 ... 8:
+		shift = 1;
+		for (i = 0; i < src_cnt; i++) {
+			u_desc_ctrl.value |= (1 << shift);
+			shift += 3;
+		}
+
+		if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
+			u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
+	}
+
+	u_desc_ctrl.field.dest_write_en = 1;
+	u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
+	u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
+	hw_desc->desc_ctrl = u_desc_ctrl.value;
+
+	return u_desc_ctrl.value;
+}
+
+static inline void
+iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
+		  unsigned long flags)
+{
+	iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
+}
+
+/* return the number of operations */
+static inline int
+iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
+		       unsigned long flags)
+{
+	int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
+	struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
+	union {
+		u32 value;
+		struct iop3xx_aau_desc_ctrl field;
+	} u_desc_ctrl;
+	int i, j;
+
+	hw_desc = desc->hw_desc;
+
+	for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
+		i += slots_per_op, j++) {
+		iter = iop_hw_desc_slot_idx(hw_desc, i);
+		u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
+		u_desc_ctrl.field.dest_write_en = 0;
+		u_desc_ctrl.field.zero_result_en = 1;
+		u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
+		iter->desc_ctrl = u_desc_ctrl.value;
+
+		/* for the subsequent descriptors preserve the store queue
+		 * and chain them together
+		 */
+		if (i) {
+			prev_hw_desc =
+				iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
+			prev_hw_desc->next_desc =
+				(u32) (desc->async_tx.phys + (i << 5));
+		}
+	}
+
+	return j;
+}
+
+static inline void
+iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
+		       unsigned long flags)
+{
+	struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
+	union {
+		u32 value;
+		struct iop3xx_aau_desc_ctrl field;
+	} u_desc_ctrl;
+
+	u_desc_ctrl.value = 0;
+	switch (src_cnt) {
+	case 25 ... 32:
+		u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
+		hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
+		/* fall through */
+	case 17 ... 24:
+		if (!u_desc_ctrl.field.blk_ctrl) {
+			hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
+			u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
+		}
+		hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
+		/* fall through */
+	case 9 ... 16:
+		if (!u_desc_ctrl.field.blk_ctrl)
+			u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
+		hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
+		/* fall through */
+	case 1 ... 8:
+		if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
+			u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
+	}
+
+	u_desc_ctrl.field.dest_write_en = 0;
+	u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
+	hw_desc->desc_ctrl = u_desc_ctrl.value;
+}
+
+static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
+					struct iop_adma_chan *chan,
+					u32 byte_count)
+{
+	union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+
+	switch (chan->device->id) {
+	case DMA0_ID:
+	case DMA1_ID:
+		hw_desc.dma->byte_count = byte_count;
+		break;
+	case AAU_ID:
+		hw_desc.aau->byte_count = byte_count;
+		break;
+	default:
+		BUG();
+	}
+}
+
+static inline void
+iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
+			struct iop_adma_chan *chan)
+{
+	union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+
+	switch (chan->device->id) {
+	case DMA0_ID:
+	case DMA1_ID:
+		iop_desc_init_memcpy(desc, 1);
+		hw_desc.dma->byte_count = 0;
+		hw_desc.dma->dest_addr = 0;
+		hw_desc.dma->src_addr = 0;
+		break;
+	case AAU_ID:
+		iop_desc_init_null_xor(desc, 2, 1);
+		hw_desc.aau->byte_count = 0;
+		hw_desc.aau->dest_addr = 0;
+		hw_desc.aau->src[0] = 0;
+		hw_desc.aau->src[1] = 0;
+		break;
+	default:
+		BUG();
+	}
+}
+
+static inline void
+iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
+{
+	int slots_per_op = desc->slots_per_op;
+	struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
+	int i = 0;
+
+	if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
+		hw_desc->byte_count = len;
+	} else {
+		do {
+			iter = iop_hw_desc_slot_idx(hw_desc, i);
+			iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
+			len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
+			i += slots_per_op;
+		} while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
+
+		if (len) {
+			iter = iop_hw_desc_slot_idx(hw_desc, i);
+			iter->byte_count = len;
+		}
+	}
+}
+
+static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
+					struct iop_adma_chan *chan,
+					dma_addr_t addr)
+{
+	union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+
+	switch (chan->device->id) {
+	case DMA0_ID:
+	case DMA1_ID:
+		hw_desc.dma->dest_addr = addr;
+		break;
+	case AAU_ID:
+		hw_desc.aau->dest_addr = addr;
+		break;
+	default:
+		BUG();
+	}
+}
+
+static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
+					dma_addr_t addr)
+{
+	struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
+	hw_desc->src_addr = addr;
+}
+
+static inline void
+iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
+				dma_addr_t addr)
+{
+
+	struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
+	int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
+	int i;
+
+	for (i = 0; (slot_cnt -= slots_per_op) >= 0;
+		i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
+		iter = iop_hw_desc_slot_idx(hw_desc, i);
+		iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
+	}
+}
+
+static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
+					int src_idx, dma_addr_t addr)
+{
+
+	struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
+	int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
+	int i;
+
+	for (i = 0; (slot_cnt -= slots_per_op) >= 0;
+		i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
+		iter = iop_hw_desc_slot_idx(hw_desc, i);
+		iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
+	}
+}
+
+static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
+					u32 next_desc_addr)
+{
+	/* hw_desc->next_desc is the same location for all channels */
+	union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+	BUG_ON(hw_desc.dma->next_desc);
+	hw_desc.dma->next_desc = next_desc_addr;
+}
+
+static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
+{
+	/* hw_desc->next_desc is the same location for all channels */
+	union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+	return hw_desc.dma->next_desc;
+}
+
+static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
+{
+	/* hw_desc->next_desc is the same location for all channels */
+	union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+	hw_desc.dma->next_desc = 0;
+}
+
+static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
+						u32 val)
+{
+	struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
+	hw_desc->src[0] = val;
+}
+
+static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
+{
+	struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
+	struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
+
+	BUG_ON(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
+	return desc_ctrl.zero_result_err;
+}
+
+static inline void iop_chan_append(struct iop_adma_chan *chan)
+{
+	u32 dma_chan_ctrl;
+
+	dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
+	dma_chan_ctrl |= 0x2;
+	__raw_writel(dma_chan_ctrl, DMA_CCR(chan));
+}
+
+static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
+{
+	return __raw_readl(DMA_CSR(chan));
+}
+
+static inline void iop_chan_disable(struct iop_adma_chan *chan)
+{
+	u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
+	dma_chan_ctrl &= ~1;
+	__raw_writel(dma_chan_ctrl, DMA_CCR(chan));
+}
+
+static inline void iop_chan_enable(struct iop_adma_chan *chan)
+{
+	u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
+
+	dma_chan_ctrl |= 1;
+	__raw_writel(dma_chan_ctrl, DMA_CCR(chan));
+}
+
+static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
+{
+	u32 status = __raw_readl(DMA_CSR(chan));
+	status &= (1 << 9);
+	__raw_writel(status, DMA_CSR(chan));
+}
+
+static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
+{
+	u32 status = __raw_readl(DMA_CSR(chan));
+	status &= (1 << 8);
+	__raw_writel(status, DMA_CSR(chan));
+}
+
+static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
+{
+	u32 status = __raw_readl(DMA_CSR(chan));
+
+	switch (chan->device->id) {
+	case DMA0_ID:
+	case DMA1_ID:
+		status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
+		break;
+	case AAU_ID:
+		status &= (1 << 5);
+		break;
+	default:
+		BUG();
+	}
+
+	__raw_writel(status, DMA_CSR(chan));
+}
+
+static inline int
+iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
+{
+	return 0;
+}
+
+static inline int
+iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
+{
+	return 0;
+}
+
+static inline int
+iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
+{
+	return 0;
+}
+
+static inline int
+iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
+{
+	return test_bit(5, &status);
+}
+
+static inline int
+iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
+{
+	switch (chan->device->id) {
+	case DMA0_ID:
+	case DMA1_ID:
+		return test_bit(2, &status);
+	default:
+		return 0;
+	}
+}
+
+static inline int
+iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
+{
+	switch (chan->device->id) {
+	case DMA0_ID:
+	case DMA1_ID:
+		return test_bit(3, &status);
+	default:
+		return 0;
+	}
+}
+
+static inline int
+iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
+{
+	switch (chan->device->id) {
+	case DMA0_ID:
+	case DMA1_ID:
+		return test_bit(1, &status);
+	default:
+		return 0;
+	}
+}
+#endif /* _ADMA_H */
diff --git a/arch/arm/include/asm/hardware/iop3xx-gpio.h b/arch/arm/include/asm/hardware/iop3xx-gpio.h
new file mode 100644
index 0000000..222e74b
--- /dev/null
+++ b/arch/arm/include/asm/hardware/iop3xx-gpio.h
@@ -0,0 +1,73 @@
+/*
+ * arch/arm/include/asm/hardware/iop3xx-gpio.h
+ *
+ * IOP3xx GPIO wrappers
+ *
+ * Copyright (c) 2008 Arnaud Patard <arnaud.patard@rtp-net.org>
+ * Based on IXP4XX gpio.h file
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __ASM_ARM_HARDWARE_IOP3XX_GPIO_H
+#define __ASM_ARM_HARDWARE_IOP3XX_GPIO_H
+
+#include <asm/hardware.h>
+#include <asm-generic/gpio.h>
+
+#define IOP3XX_N_GPIOS	8
+
+static inline int gpio_get_value(unsigned gpio)
+{
+	if (gpio > IOP3XX_N_GPIOS)
+		return __gpio_get_value(gpio);
+
+	return gpio_line_get(gpio);
+}
+
+static inline void gpio_set_value(unsigned gpio, int value)
+{
+	if (gpio > IOP3XX_N_GPIOS) {
+		__gpio_set_value(gpio, value);
+		return;
+	}
+	gpio_line_set(gpio, value);
+}
+
+static inline int gpio_cansleep(unsigned gpio)
+{
+	if (gpio < IOP3XX_N_GPIOS)
+		return 0;
+	else
+		return __gpio_cansleep(gpio);
+}
+
+/*
+ * The GPIOs are not generating any interrupt
+ * Note : manuals are not clear about this
+ */
+static inline int gpio_to_irq(int gpio)
+{
+	return -EINVAL;
+}
+
+static inline int irq_to_gpio(int gpio)
+{
+	return -EINVAL;
+}
+
+#endif
+
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h
new file mode 100644
index 0000000..4b8e7f5
--- /dev/null
+++ b/arch/arm/include/asm/hardware/iop3xx.h
@@ -0,0 +1,312 @@
+/*
+ * arch/arm/include/asm/hardware/iop3xx.h
+ *
+ * Intel IOP32X and IOP33X register definitions
+ *
+ * Author: Rory Bolt <rorybolt@pacbell.net>
+ * Copyright (C) 2002 Rory Bolt
+ * Copyright (C) 2004 Intel Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __IOP3XX_H
+#define __IOP3XX_H
+
+/*
+ * IOP3XX GPIO handling
+ */
+#define GPIO_IN			0
+#define GPIO_OUT		1
+#define GPIO_LOW		0
+#define GPIO_HIGH		1
+#define IOP3XX_GPIO_LINE(x)	(x)
+
+#ifndef __ASSEMBLY__
+extern void gpio_line_config(int line, int direction);
+extern int  gpio_line_get(int line);
+extern void gpio_line_set(int line, int value);
+extern int init_atu;
+extern int iop3xx_get_init_atu(void);
+#endif
+
+
+/*
+ * IOP3XX processor registers
+ */
+#define IOP3XX_PERIPHERAL_PHYS_BASE	0xffffe000
+#define IOP3XX_PERIPHERAL_VIRT_BASE	0xfeffe000
+#define IOP3XX_PERIPHERAL_SIZE		0x00002000
+#define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\
+					IOP3XX_PERIPHERAL_SIZE - 1)
+#define IOP3XX_PERIPHERAL_UPPER_VA (IOP3XX_PERIPHERAL_VIRT_BASE +\
+					IOP3XX_PERIPHERAL_SIZE - 1)
+#define IOP3XX_PMMR_PHYS_TO_VIRT(addr) (u32) ((u32) (addr) -\
+					(IOP3XX_PERIPHERAL_PHYS_BASE\
+					- IOP3XX_PERIPHERAL_VIRT_BASE))
+#define IOP3XX_REG_ADDR(reg)		(IOP3XX_PERIPHERAL_VIRT_BASE + (reg))
+
+/* Address Translation Unit  */
+#define IOP3XX_ATUVID		(volatile u16 *)IOP3XX_REG_ADDR(0x0100)
+#define IOP3XX_ATUDID		(volatile u16 *)IOP3XX_REG_ADDR(0x0102)
+#define IOP3XX_ATUCMD		(volatile u16 *)IOP3XX_REG_ADDR(0x0104)
+#define IOP3XX_ATUSR		(volatile u16 *)IOP3XX_REG_ADDR(0x0106)
+#define IOP3XX_ATURID		(volatile u8  *)IOP3XX_REG_ADDR(0x0108)
+#define IOP3XX_ATUCCR		(volatile u32 *)IOP3XX_REG_ADDR(0x0109)
+#define IOP3XX_ATUCLSR		(volatile u8  *)IOP3XX_REG_ADDR(0x010c)
+#define IOP3XX_ATULT		(volatile u8  *)IOP3XX_REG_ADDR(0x010d)
+#define IOP3XX_ATUHTR		(volatile u8  *)IOP3XX_REG_ADDR(0x010e)
+#define IOP3XX_ATUBIST		(volatile u8  *)IOP3XX_REG_ADDR(0x010f)
+#define IOP3XX_IABAR0		(volatile u32 *)IOP3XX_REG_ADDR(0x0110)
+#define IOP3XX_IAUBAR0		(volatile u32 *)IOP3XX_REG_ADDR(0x0114)
+#define IOP3XX_IABAR1		(volatile u32 *)IOP3XX_REG_ADDR(0x0118)
+#define IOP3XX_IAUBAR1		(volatile u32 *)IOP3XX_REG_ADDR(0x011c)
+#define IOP3XX_IABAR2		(volatile u32 *)IOP3XX_REG_ADDR(0x0120)
+#define IOP3XX_IAUBAR2		(volatile u32 *)IOP3XX_REG_ADDR(0x0124)
+#define IOP3XX_ASVIR		(volatile u16 *)IOP3XX_REG_ADDR(0x012c)
+#define IOP3XX_ASIR		(volatile u16 *)IOP3XX_REG_ADDR(0x012e)
+#define IOP3XX_ERBAR		(volatile u32 *)IOP3XX_REG_ADDR(0x0130)
+#define IOP3XX_ATUILR		(volatile u8  *)IOP3XX_REG_ADDR(0x013c)
+#define IOP3XX_ATUIPR		(volatile u8  *)IOP3XX_REG_ADDR(0x013d)
+#define IOP3XX_ATUMGNT		(volatile u8  *)IOP3XX_REG_ADDR(0x013e)
+#define IOP3XX_ATUMLAT		(volatile u8  *)IOP3XX_REG_ADDR(0x013f)
+#define IOP3XX_IALR0		(volatile u32 *)IOP3XX_REG_ADDR(0x0140)
+#define IOP3XX_IATVR0		(volatile u32 *)IOP3XX_REG_ADDR(0x0144)
+#define IOP3XX_ERLR		(volatile u32 *)IOP3XX_REG_ADDR(0x0148)
+#define IOP3XX_ERTVR		(volatile u32 *)IOP3XX_REG_ADDR(0x014c)
+#define IOP3XX_IALR1		(volatile u32 *)IOP3XX_REG_ADDR(0x0150)
+#define IOP3XX_IALR2		(volatile u32 *)IOP3XX_REG_ADDR(0x0154)
+#define IOP3XX_IATVR2		(volatile u32 *)IOP3XX_REG_ADDR(0x0158)
+#define IOP3XX_OIOWTVR		(volatile u32 *)IOP3XX_REG_ADDR(0x015c)
+#define IOP3XX_OMWTVR0		(volatile u32 *)IOP3XX_REG_ADDR(0x0160)
+#define IOP3XX_OUMWTVR0		(volatile u32 *)IOP3XX_REG_ADDR(0x0164)
+#define IOP3XX_OMWTVR1		(volatile u32 *)IOP3XX_REG_ADDR(0x0168)
+#define IOP3XX_OUMWTVR1		(volatile u32 *)IOP3XX_REG_ADDR(0x016c)
+#define IOP3XX_OUDWTVR		(volatile u32 *)IOP3XX_REG_ADDR(0x0178)
+#define IOP3XX_ATUCR		(volatile u32 *)IOP3XX_REG_ADDR(0x0180)
+#define IOP3XX_PCSR		(volatile u32 *)IOP3XX_REG_ADDR(0x0184)
+#define IOP3XX_ATUISR		(volatile u32 *)IOP3XX_REG_ADDR(0x0188)
+#define IOP3XX_ATUIMR		(volatile u32 *)IOP3XX_REG_ADDR(0x018c)
+#define IOP3XX_IABAR3		(volatile u32 *)IOP3XX_REG_ADDR(0x0190)
+#define IOP3XX_IAUBAR3		(volatile u32 *)IOP3XX_REG_ADDR(0x0194)
+#define IOP3XX_IALR3		(volatile u32 *)IOP3XX_REG_ADDR(0x0198)
+#define IOP3XX_IATVR3		(volatile u32 *)IOP3XX_REG_ADDR(0x019c)
+#define IOP3XX_OCCAR		(volatile u32 *)IOP3XX_REG_ADDR(0x01a4)
+#define IOP3XX_OCCDR		(volatile u32 *)IOP3XX_REG_ADDR(0x01ac)
+#define IOP3XX_PDSCR		(volatile u32 *)IOP3XX_REG_ADDR(0x01bc)
+#define IOP3XX_PMCAPID		(volatile u8  *)IOP3XX_REG_ADDR(0x01c0)
+#define IOP3XX_PMNEXT		(volatile u8  *)IOP3XX_REG_ADDR(0x01c1)
+#define IOP3XX_APMCR		(volatile u16 *)IOP3XX_REG_ADDR(0x01c2)
+#define IOP3XX_APMCSR		(volatile u16 *)IOP3XX_REG_ADDR(0x01c4)
+#define IOP3XX_PCIXCAPID	(volatile u8  *)IOP3XX_REG_ADDR(0x01e0)
+#define IOP3XX_PCIXNEXT		(volatile u8  *)IOP3XX_REG_ADDR(0x01e1)
+#define IOP3XX_PCIXCMD		(volatile u16 *)IOP3XX_REG_ADDR(0x01e2)
+#define IOP3XX_PCIXSR		(volatile u32 *)IOP3XX_REG_ADDR(0x01e4)
+#define IOP3XX_PCIIRSR		(volatile u32 *)IOP3XX_REG_ADDR(0x01ec)
+#define IOP3XX_PCSR_OUT_Q_BUSY (1 << 15)
+#define IOP3XX_PCSR_IN_Q_BUSY	(1 << 14)
+#define IOP3XX_ATUCR_OUT_EN	(1 << 1)
+
+#define IOP3XX_INIT_ATU_DEFAULT 0
+#define IOP3XX_INIT_ATU_DISABLE -1
+#define IOP3XX_INIT_ATU_ENABLE	 1
+
+/* Messaging Unit  */
+#define IOP3XX_IMR0		(volatile u32 *)IOP3XX_REG_ADDR(0x0310)
+#define IOP3XX_IMR1		(volatile u32 *)IOP3XX_REG_ADDR(0x0314)
+#define IOP3XX_OMR0		(volatile u32 *)IOP3XX_REG_ADDR(0x0318)
+#define IOP3XX_OMR1		(volatile u32 *)IOP3XX_REG_ADDR(0x031c)
+#define IOP3XX_IDR		(volatile u32 *)IOP3XX_REG_ADDR(0x0320)
+#define IOP3XX_IISR		(volatile u32 *)IOP3XX_REG_ADDR(0x0324)
+#define IOP3XX_IIMR		(volatile u32 *)IOP3XX_REG_ADDR(0x0328)
+#define IOP3XX_ODR		(volatile u32 *)IOP3XX_REG_ADDR(0x032c)
+#define IOP3XX_OISR		(volatile u32 *)IOP3XX_REG_ADDR(0x0330)
+#define IOP3XX_OIMR		(volatile u32 *)IOP3XX_REG_ADDR(0x0334)
+#define IOP3XX_MUCR		(volatile u32 *)IOP3XX_REG_ADDR(0x0350)
+#define IOP3XX_QBAR		(volatile u32 *)IOP3XX_REG_ADDR(0x0354)
+#define IOP3XX_IFHPR		(volatile u32 *)IOP3XX_REG_ADDR(0x0360)
+#define IOP3XX_IFTPR		(volatile u32 *)IOP3XX_REG_ADDR(0x0364)
+#define IOP3XX_IPHPR		(volatile u32 *)IOP3XX_REG_ADDR(0x0368)
+#define IOP3XX_IPTPR		(volatile u32 *)IOP3XX_REG_ADDR(0x036c)
+#define IOP3XX_OFHPR		(volatile u32 *)IOP3XX_REG_ADDR(0x0370)
+#define IOP3XX_OFTPR		(volatile u32 *)IOP3XX_REG_ADDR(0x0374)
+#define IOP3XX_OPHPR		(volatile u32 *)IOP3XX_REG_ADDR(0x0378)
+#define IOP3XX_OPTPR		(volatile u32 *)IOP3XX_REG_ADDR(0x037c)
+#define IOP3XX_IAR		(volatile u32 *)IOP3XX_REG_ADDR(0x0380)
+
+/* DMA Controller  */
+#define IOP3XX_DMA_PHYS_BASE(chan) (IOP3XX_PERIPHERAL_PHYS_BASE + \
+					(0x400 + (chan << 6)))
+#define IOP3XX_DMA_UPPER_PA(chan)  (IOP3XX_DMA_PHYS_BASE(chan) + 0x27)
+
+/* Peripheral bus interface  */
+#define IOP3XX_PBCR		(volatile u32 *)IOP3XX_REG_ADDR(0x0680)
+#define IOP3XX_PBISR		(volatile u32 *)IOP3XX_REG_ADDR(0x0684)
+#define IOP3XX_PBBAR0		(volatile u32 *)IOP3XX_REG_ADDR(0x0688)
+#define IOP3XX_PBLR0		(volatile u32 *)IOP3XX_REG_ADDR(0x068c)
+#define IOP3XX_PBBAR1		(volatile u32 *)IOP3XX_REG_ADDR(0x0690)
+#define IOP3XX_PBLR1		(volatile u32 *)IOP3XX_REG_ADDR(0x0694)
+#define IOP3XX_PBBAR2		(volatile u32 *)IOP3XX_REG_ADDR(0x0698)
+#define IOP3XX_PBLR2		(volatile u32 *)IOP3XX_REG_ADDR(0x069c)
+#define IOP3XX_PBBAR3		(volatile u32 *)IOP3XX_REG_ADDR(0x06a0)
+#define IOP3XX_PBLR3		(volatile u32 *)IOP3XX_REG_ADDR(0x06a4)
+#define IOP3XX_PBBAR4		(volatile u32 *)IOP3XX_REG_ADDR(0x06a8)
+#define IOP3XX_PBLR4		(volatile u32 *)IOP3XX_REG_ADDR(0x06ac)
+#define IOP3XX_PBBAR5		(volatile u32 *)IOP3XX_REG_ADDR(0x06b0)
+#define IOP3XX_PBLR5		(volatile u32 *)IOP3XX_REG_ADDR(0x06b4)
+#define IOP3XX_PMBR0		(volatile u32 *)IOP3XX_REG_ADDR(0x06c0)
+#define IOP3XX_PMBR1		(volatile u32 *)IOP3XX_REG_ADDR(0x06e0)
+#define IOP3XX_PMBR2		(volatile u32 *)IOP3XX_REG_ADDR(0x06e4)
+
+/* Peripheral performance monitoring unit  */
+#define IOP3XX_GTMR		(volatile u32 *)IOP3XX_REG_ADDR(0x0700)
+#define IOP3XX_ESR		(volatile u32 *)IOP3XX_REG_ADDR(0x0704)
+#define IOP3XX_EMISR		(volatile u32 *)IOP3XX_REG_ADDR(0x0708)
+#define IOP3XX_GTSR		(volatile u32 *)IOP3XX_REG_ADDR(0x0710)
+/* PERCR0 DOESN'T EXIST - index from 1! */
+#define IOP3XX_PERCR0		(volatile u32 *)IOP3XX_REG_ADDR(0x0710)
+
+/* General Purpose I/O  */
+#define IOP3XX_GPOE		(volatile u32 *)IOP3XX_GPIO_REG(0x0000)
+#define IOP3XX_GPID		(volatile u32 *)IOP3XX_GPIO_REG(0x0004)
+#define IOP3XX_GPOD		(volatile u32 *)IOP3XX_GPIO_REG(0x0008)
+
+/* Timers  */
+#define IOP3XX_TU_TMR0		(volatile u32 *)IOP3XX_TIMER_REG(0x0000)
+#define IOP3XX_TU_TMR1		(volatile u32 *)IOP3XX_TIMER_REG(0x0004)
+#define IOP3XX_TU_TCR0		(volatile u32 *)IOP3XX_TIMER_REG(0x0008)
+#define IOP3XX_TU_TCR1		(volatile u32 *)IOP3XX_TIMER_REG(0x000c)
+#define IOP3XX_TU_TRR0		(volatile u32 *)IOP3XX_TIMER_REG(0x0010)
+#define IOP3XX_TU_TRR1		(volatile u32 *)IOP3XX_TIMER_REG(0x0014)
+#define IOP3XX_TU_TISR		(volatile u32 *)IOP3XX_TIMER_REG(0x0018)
+#define IOP3XX_TU_WDTCR		(volatile u32 *)IOP3XX_TIMER_REG(0x001c)
+#define IOP_TMR_EN	    0x02
+#define IOP_TMR_RELOAD	    0x04
+#define IOP_TMR_PRIVILEGED 0x08
+#define IOP_TMR_RATIO_1_1  0x00
+
+/* Watchdog timer definitions */
+#define IOP_WDTCR_EN_ARM        0x1e1e1e1e
+#define IOP_WDTCR_EN            0xe1e1e1e1
+/* iop3xx does not support stopping the watchdog, so we just re-arm */
+#define IOP_WDTCR_DIS_ARM	(IOP_WDTCR_EN_ARM)
+#define IOP_WDTCR_DIS		(IOP_WDTCR_EN)
+
+/* Application accelerator unit  */
+#define IOP3XX_AAU_PHYS_BASE (IOP3XX_PERIPHERAL_PHYS_BASE + 0x800)
+#define IOP3XX_AAU_UPPER_PA (IOP3XX_AAU_PHYS_BASE + 0xa7)
+
+/* I2C bus interface unit  */
+#define IOP3XX_ICR0		(volatile u32 *)IOP3XX_REG_ADDR(0x1680)
+#define IOP3XX_ISR0		(volatile u32 *)IOP3XX_REG_ADDR(0x1684)
+#define IOP3XX_ISAR0		(volatile u32 *)IOP3XX_REG_ADDR(0x1688)
+#define IOP3XX_IDBR0		(volatile u32 *)IOP3XX_REG_ADDR(0x168c)
+#define IOP3XX_IBMR0		(volatile u32 *)IOP3XX_REG_ADDR(0x1694)
+#define IOP3XX_ICR1		(volatile u32 *)IOP3XX_REG_ADDR(0x16a0)
+#define IOP3XX_ISR1		(volatile u32 *)IOP3XX_REG_ADDR(0x16a4)
+#define IOP3XX_ISAR1		(volatile u32 *)IOP3XX_REG_ADDR(0x16a8)
+#define IOP3XX_IDBR1		(volatile u32 *)IOP3XX_REG_ADDR(0x16ac)
+#define IOP3XX_IBMR1		(volatile u32 *)IOP3XX_REG_ADDR(0x16b4)
+
+
+/*
+ * IOP3XX I/O and Mem space regions for PCI autoconfiguration
+ */
+#define IOP3XX_PCI_LOWER_MEM_PA	0x80000000
+
+#define IOP3XX_PCI_IO_WINDOW_SIZE	0x00010000
+#define IOP3XX_PCI_LOWER_IO_PA		0x90000000
+#define IOP3XX_PCI_LOWER_IO_VA		0xfe000000
+#define IOP3XX_PCI_LOWER_IO_BA		0x90000000
+#define IOP3XX_PCI_UPPER_IO_PA		(IOP3XX_PCI_LOWER_IO_PA +\
+					IOP3XX_PCI_IO_WINDOW_SIZE - 1)
+#define IOP3XX_PCI_UPPER_IO_VA		(IOP3XX_PCI_LOWER_IO_VA +\
+					IOP3XX_PCI_IO_WINDOW_SIZE - 1)
+#define IOP3XX_PCI_IO_PHYS_TO_VIRT(addr) (((u32) (addr) -\
+					IOP3XX_PCI_LOWER_IO_PA) +\
+					IOP3XX_PCI_LOWER_IO_VA)
+
+
+#ifndef __ASSEMBLY__
+void iop3xx_map_io(void);
+void iop_init_cp6_handler(void);
+void iop_init_time(unsigned long tickrate);
+unsigned long iop_gettimeoffset(void);
+
+static inline void write_tmr0(u32 val)
+{
+	asm volatile("mcr p6, 0, %0, c0, c1, 0" : : "r" (val));
+}
+
+static inline void write_tmr1(u32 val)
+{
+	asm volatile("mcr p6, 0, %0, c1, c1, 0" : : "r" (val));
+}
+
+static inline u32 read_tcr0(void)
+{
+	u32 val;
+	asm volatile("mrc p6, 0, %0, c2, c1, 0" : "=r" (val));
+	return val;
+}
+
+static inline u32 read_tcr1(void)
+{
+	u32 val;
+	asm volatile("mrc p6, 0, %0, c3, c1, 0" : "=r" (val));
+	return val;
+}
+
+static inline void write_trr0(u32 val)
+{
+	asm volatile("mcr p6, 0, %0, c4, c1, 0" : : "r" (val));
+}
+
+static inline void write_trr1(u32 val)
+{
+	asm volatile("mcr p6, 0, %0, c5, c1, 0" : : "r" (val));
+}
+
+static inline void write_tisr(u32 val)
+{
+	asm volatile("mcr p6, 0, %0, c6, c1, 0" : : "r" (val));
+}
+
+static inline u32 read_wdtcr(void)
+{
+	u32 val;
+	asm volatile("mrc p6, 0, %0, c7, c1, 0":"=r" (val));
+	return val;
+}
+static inline void write_wdtcr(u32 val)
+{
+	asm volatile("mcr p6, 0, %0, c7, c1, 0"::"r" (val));
+}
+
+extern unsigned long get_iop_tick_rate(void);
+
+/* only iop13xx has these registers, we define these to present a
+ * common register interface for the iop_wdt driver.
+ */
+#define IOP_RCSR_WDT	(0)
+static inline u32 read_rcsr(void)
+{
+	return 0;
+}
+static inline void write_wdtsr(u32 val)
+{
+	do { } while (0);
+}
+
+extern struct platform_device iop3xx_dma_0_channel;
+extern struct platform_device iop3xx_dma_1_channel;
+extern struct platform_device iop3xx_aau_channel;
+extern struct platform_device iop3xx_i2c0_device;
+extern struct platform_device iop3xx_i2c1_device;
+
+#endif
+
+
+#endif
diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h
new file mode 100644
index 0000000..cb7e361
--- /dev/null
+++ b/arch/arm/include/asm/hardware/iop_adma.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright © 2006, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef IOP_ADMA_H
+#define IOP_ADMA_H
+#include <linux/types.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+#define IOP_ADMA_SLOT_SIZE 32
+#define IOP_ADMA_THRESHOLD 4
+
+/**
+ * struct iop_adma_device - internal representation of an ADMA device
+ * @pdev: Platform device
+ * @id: HW ADMA Device selector
+ * @dma_desc_pool: base of DMA descriptor region (DMA address)
+ * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
+ * @common: embedded struct dma_device
+ */
+struct iop_adma_device {
+	struct platform_device *pdev;
+	int id;
+	dma_addr_t dma_desc_pool;
+	void *dma_desc_pool_virt;
+	struct dma_device common;
+};
+
+/**
+ * struct iop_adma_chan - internal representation of an ADMA device
+ * @pending: allows batching of hardware operations
+ * @completed_cookie: identifier for the most recently completed operation
+ * @lock: serializes enqueue/dequeue operations to the slot pool
+ * @mmr_base: memory mapped register base
+ * @chain: device chain view of the descriptors
+ * @device: parent device
+ * @common: common dmaengine channel object members
+ * @last_used: place holder for allocation to continue from where it left off
+ * @all_slots: complete domain of slots usable by the channel
+ * @slots_allocated: records the actual size of the descriptor slot pool
+ * @irq_tasklet: bottom half where iop_adma_slot_cleanup runs
+ */
+struct iop_adma_chan {
+	int pending;
+	dma_cookie_t completed_cookie;
+	spinlock_t lock; /* protects the descriptor slot pool */
+	void __iomem *mmr_base;
+	struct list_head chain;
+	struct iop_adma_device *device;
+	struct dma_chan common;
+	struct iop_adma_desc_slot *last_used;
+	struct list_head all_slots;
+	int slots_allocated;
+	struct tasklet_struct irq_tasklet;
+};
+
+/**
+ * struct iop_adma_desc_slot - IOP-ADMA software descriptor
+ * @slot_node: node on the iop_adma_chan.all_slots list
+ * @chain_node: node on the op_adma_chan.chain list
+ * @hw_desc: virtual address of the hardware descriptor chain
+ * @phys: hardware address of the hardware descriptor chain
+ * @group_head: first operation in a transaction
+ * @slot_cnt: total slots used in an transaction (group of operations)
+ * @slots_per_op: number of slots per operation
+ * @idx: pool index
+ * @unmap_src_cnt: number of xor sources
+ * @unmap_len: transaction bytecount
+ * @async_tx: support for the async_tx api
+ * @group_list: list of slots that make up a multi-descriptor transaction
+ *	for example transfer lengths larger than the supported hw max
+ * @xor_check_result: result of zero sum
+ * @crc32_result: result crc calculation
+ */
+struct iop_adma_desc_slot {
+	struct list_head slot_node;
+	struct list_head chain_node;
+	void *hw_desc;
+	struct iop_adma_desc_slot *group_head;
+	u16 slot_cnt;
+	u16 slots_per_op;
+	u16 idx;
+	u16 unmap_src_cnt;
+	size_t unmap_len;
+	struct dma_async_tx_descriptor async_tx;
+	union {
+		u32 *xor_check_result;
+		u32 *crc32_result;
+	};
+};
+
+struct iop_adma_platform_data {
+	int hw_id;
+	dma_cap_mask_t cap_mask;
+	size_t pool_size;
+};
+
+#define to_iop_sw_desc(addr_hw_desc) \
+	container_of(addr_hw_desc, struct iop_adma_desc_slot, hw_desc)
+#define iop_hw_desc_slot_idx(hw_desc, idx) \
+	( (void *) (((unsigned long) hw_desc) + ((idx) << 5)) )
+#endif
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
new file mode 100644
index 0000000..74b5fff
--- /dev/null
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -0,0 +1,99 @@
+/*
+ * linux/include/arm/hardware/it8152.h
+ *
+ * Copyright Compulab Ltd., 2006,2007
+ * Mike Rapoport <mike@compulab.co.il>
+ *
+ * ITE 8152 companion chip register definitions
+ */
+
+#ifndef __ASM_HARDWARE_IT8152_H
+#define __ASM_HARDWARE_IT8152_H
+extern unsigned long it8152_base_address;
+
+#define IT8152_IO_BASE			(it8152_base_address + 0x03e00000)
+#define IT8152_CFGREG_BASE		(it8152_base_address + 0x03f00000)
+
+#define __REG_IT8152(x)			(it8152_base_address + (x))
+
+#define IT8152_PCI_CFG_ADDR		__REG_IT8152(0x3f00800)
+#define IT8152_PCI_CFG_DATA		__REG_IT8152(0x3f00804)
+
+#define IT8152_INTC_LDCNIRR		__REG_IT8152(0x3f00300)
+#define IT8152_INTC_LDPNIRR		__REG_IT8152(0x3f00304)
+#define IT8152_INTC_LDCNIMR		__REG_IT8152(0x3f00308)
+#define IT8152_INTC_LDPNIMR		__REG_IT8152(0x3f0030C)
+#define IT8152_INTC_LDNITR		__REG_IT8152(0x3f00310)
+#define IT8152_INTC_LDNIAR		__REG_IT8152(0x3f00314)
+#define IT8152_INTC_LPCNIRR		__REG_IT8152(0x3f00320)
+#define IT8152_INTC_LPPNIRR		__REG_IT8152(0x3f00324)
+#define IT8152_INTC_LPCNIMR		__REG_IT8152(0x3f00328)
+#define IT8152_INTC_LPPNIMR		__REG_IT8152(0x3f0032C)
+#define IT8152_INTC_LPNITR		__REG_IT8152(0x3f00330)
+#define IT8152_INTC_LPNIAR		__REG_IT8152(0x3f00334)
+#define IT8152_INTC_PDCNIRR		__REG_IT8152(0x3f00340)
+#define IT8152_INTC_PDPNIRR		__REG_IT8152(0x3f00344)
+#define IT8152_INTC_PDCNIMR		__REG_IT8152(0x3f00348)
+#define IT8152_INTC_PDPNIMR		__REG_IT8152(0x3f0034C)
+#define IT8152_INTC_PDNITR		__REG_IT8152(0x3f00350)
+#define IT8152_INTC_PDNIAR		__REG_IT8152(0x3f00354)
+#define IT8152_INTC_INTC_TYPER		__REG_IT8152(0x3f003FC)
+
+#define IT8152_GPIO_GPDR		__REG_IT8152(0x3f00500)
+
+/*
+  Interrupt controller per register summary:
+  ---------------------------------------
+  LCDNIRR:
+  IT8152_LD_IRQ(8) PCICLK stop
+  IT8152_LD_IRQ(7) MCLK ready
+  IT8152_LD_IRQ(6) s/w
+  IT8152_LD_IRQ(5) UART
+  IT8152_LD_IRQ(4) GPIO
+  IT8152_LD_IRQ(3) TIMER 4
+  IT8152_LD_IRQ(2) TIMER 3
+  IT8152_LD_IRQ(1) TIMER 2
+  IT8152_LD_IRQ(0) TIMER 1
+
+  LPCNIRR:
+  IT8152_LP_IRQ(x) serial IRQ x
+
+  PCIDNIRR:
+  IT8152_PD_IRQ(14) PCISERR
+  IT8152_PD_IRQ(13) CPU/PCI bridge target abort (h2pTADR)
+  IT8152_PD_IRQ(12) CPU/PCI bridge master abort (h2pMADR)
+  IT8152_PD_IRQ(11) PCI INTD
+  IT8152_PD_IRQ(10) PCI INTC
+  IT8152_PD_IRQ(9)  PCI INTB
+  IT8152_PD_IRQ(8)  PCI INTA
+  IT8152_PD_IRQ(7)  serial INTD
+  IT8152_PD_IRQ(6)  serial INTC
+  IT8152_PD_IRQ(5)  serial INTB
+  IT8152_PD_IRQ(4)  serial INTA
+  IT8152_PD_IRQ(3)  serial IRQ IOCHK (IOCHKR)
+  IT8152_PD_IRQ(2)  chaining DMA (CDMAR)
+  IT8152_PD_IRQ(1)  USB (USBR)
+  IT8152_PD_IRQ(0)  Audio controller (ACR)
+ */
+/* frequently used interrupts */
+#define IT8152_PCISERR		IT8152_PD_IRQ(14)
+#define IT8152_H2PTADR		IT8152_PD_IRQ(13)
+#define IT8152_H2PMAR		IT8152_PD_IRQ(12)
+#define IT8152_PCI_INTD		IT8152_PD_IRQ(11)
+#define IT8152_PCI_INTC		IT8152_PD_IRQ(10)
+#define IT8152_PCI_INTB		IT8152_PD_IRQ(9)
+#define IT8152_PCI_INTA		IT8152_PD_IRQ(8)
+#define IT8152_CDMA_INT		IT8152_PD_IRQ(2)
+#define IT8152_USB_INT		IT8152_PD_IRQ(1)
+#define IT8152_AUDIO_INT	IT8152_PD_IRQ(0)
+
+struct pci_dev;
+struct pci_sys_data;
+
+extern void it8152_irq_demux(unsigned int irq, struct irq_desc *desc);
+extern void it8152_init_irq(void);
+extern int it8152_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin);
+extern int it8152_pci_setup(int nr, struct pci_sys_data *sys);
+extern struct pci_bus *it8152_pci_scan_bus(int nr, struct pci_sys_data *sys);
+
+#endif /* __ASM_HARDWARE_IT8152_H */
diff --git a/arch/arm/include/asm/hardware/linkup-l1110.h b/arch/arm/include/asm/hardware/linkup-l1110.h
new file mode 100644
index 0000000..7ec9116
--- /dev/null
+++ b/arch/arm/include/asm/hardware/linkup-l1110.h
@@ -0,0 +1,48 @@
+/*
+*
+* Definitions for H3600 Handheld Computer
+*
+* Copyright 2001 Compaq Computer Corporation.
+*
+* Use consistent with the GNU GPL is permitted,
+* provided that this copyright notice is
+* preserved in its entirety in all copies and derived works.
+*
+* COMPAQ COMPUTER CORPORATION MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
+* AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
+* FITNESS FOR ANY PARTICULAR PURPOSE.
+*
+* Author: Jamey Hicks.
+*
+*/
+
+/* LinkUp Systems PCCard/CompactFlash Interface for SA-1100 */
+
+/* PC Card Status Register */
+#define LINKUP_PRS_S1	(1 << 0) /* voltage control bits S1-S4 */
+#define LINKUP_PRS_S2	(1 << 1)
+#define LINKUP_PRS_S3	(1 << 2)
+#define LINKUP_PRS_S4	(1 << 3)
+#define LINKUP_PRS_BVD1	(1 << 4)
+#define LINKUP_PRS_BVD2	(1 << 5)
+#define LINKUP_PRS_VS1	(1 << 6)
+#define LINKUP_PRS_VS2	(1 << 7)
+#define LINKUP_PRS_RDY	(1 << 8)
+#define LINKUP_PRS_CD1	(1 << 9)
+#define LINKUP_PRS_CD2	(1 << 10)
+
+/* PC Card Command Register */
+#define LINKUP_PRC_S1	(1 << 0)
+#define LINKUP_PRC_S2	(1 << 1)
+#define LINKUP_PRC_S3	(1 << 2)
+#define LINKUP_PRC_S4	(1 << 3)
+#define LINKUP_PRC_RESET (1 << 4)
+#define LINKUP_PRC_APOE	(1 << 5) /* Auto Power Off Enable: clears S1-S4 when either nCD goes high */
+#define LINKUP_PRC_CFE	(1 << 6) /* CompactFlash mode Enable: addresses A[10:0] only, A[25:11] high */
+#define LINKUP_PRC_SOE	(1 << 7) /* signal output driver enable */
+#define LINKUP_PRC_SSP	(1 << 8) /* sock select polarity: 0 for socket 0, 1 for socket 1 */
+#define LINKUP_PRC_MBZ	(1 << 15) /* must be zero */
+
+struct linkup_l1110 {
+	volatile short prc;
+};
diff --git a/arch/arm/include/asm/hardware/locomo.h b/arch/arm/include/asm/hardware/locomo.h
new file mode 100644
index 0000000..954b1be
--- /dev/null
+++ b/arch/arm/include/asm/hardware/locomo.h
@@ -0,0 +1,217 @@
+/*
+ * arch/arm/include/asm/hardware/locomo.h
+ *
+ * This file contains the definitions for the LoCoMo G/A Chip
+ *
+ * (C) Copyright 2004 John Lenz
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License.  See linux/COPYING for more information.
+ *
+ * Based on sa1111.h
+ */
+#ifndef _ASM_ARCH_LOCOMO
+#define _ASM_ARCH_LOCOMO
+
+#define locomo_writel(val,addr)	({ *(volatile u16 *)(addr) = (val); })
+#define locomo_readl(addr)	(*(volatile u16 *)(addr))
+
+/* LOCOMO version */
+#define LOCOMO_VER	0x00
+
+/* Pin status */
+#define LOCOMO_ST	0x04
+
+/* Pin status */
+#define LOCOMO_C32K	0x08
+
+/* Interrupt controller */
+#define LOCOMO_ICR	0x0C
+
+/* MCS decoder for boot selecting */
+#define LOCOMO_MCSX0	0x10
+#define LOCOMO_MCSX1	0x14
+#define LOCOMO_MCSX2	0x18
+#define LOCOMO_MCSX3	0x1c
+
+/* Touch panel controller */
+#define LOCOMO_ASD	0x20		/* AD start delay */
+#define LOCOMO_HSD	0x28		/* HSYS delay */
+#define LOCOMO_HSC	0x2c		/* HSYS period */
+#define LOCOMO_TADC	0x30		/* tablet ADC clock */
+
+
+/* Long time timer */
+#define LOCOMO_LTC	0xd8		/* LTC interrupt setting */
+#define LOCOMO_LTINT	0xdc		/* LTC interrupt */
+
+/* DAC control signal for LCD (COMADJ ) */
+#define LOCOMO_DAC		0xe0
+/* DAC control */
+#define	LOCOMO_DAC_SCLOEB	0x08	/* SCL pin output data       */
+#define	LOCOMO_DAC_TEST		0x04	/* Test bit                  */
+#define	LOCOMO_DAC_SDA		0x02	/* SDA pin level (read-only) */
+#define	LOCOMO_DAC_SDAOEB	0x01	/* SDA pin output data       */
+
+/* SPI interface */
+#define LOCOMO_SPI	0x60
+#define LOCOMO_SPIMD	0x00		/* SPI mode setting */
+#define LOCOMO_SPICT	0x04		/* SPI mode control */
+#define LOCOMO_SPIST	0x08		/* SPI status */
+#define	LOCOMO_SPI_TEND	(1 << 3)	/* Transfer end bit */
+#define	LOCOMO_SPI_REND	(1 << 2)	/* Receive end bit */
+#define	LOCOMO_SPI_RFW	(1 << 1)	/* write buffer bit */
+#define	LOCOMO_SPI_RFR	(1)		/* read buffer bit */
+
+#define LOCOMO_SPIIS	0x10		/* SPI interrupt status */
+#define LOCOMO_SPIWE	0x14		/* SPI interrupt status write enable */
+#define LOCOMO_SPIIE	0x18		/* SPI interrupt enable */
+#define LOCOMO_SPIIR	0x1c		/* SPI interrupt request */
+#define LOCOMO_SPITD	0x20		/* SPI transfer data write */
+#define LOCOMO_SPIRD	0x24		/* SPI receive data read */
+#define LOCOMO_SPITS	0x28		/* SPI transfer data shift */
+#define LOCOMO_SPIRS	0x2C		/* SPI receive data shift */
+
+/* GPIO */
+#define LOCOMO_GPD		0x90	/* GPIO direction */
+#define LOCOMO_GPE		0x94	/* GPIO input enable */
+#define LOCOMO_GPL		0x98	/* GPIO level */
+#define LOCOMO_GPO		0x9c	/* GPIO out data setting */
+#define LOCOMO_GRIE		0xa0	/* GPIO rise detection */
+#define LOCOMO_GFIE		0xa4	/* GPIO fall detection */
+#define LOCOMO_GIS		0xa8	/* GPIO edge detection status */
+#define LOCOMO_GWE		0xac	/* GPIO status write enable */
+#define LOCOMO_GIE		0xb0	/* GPIO interrupt enable */
+#define LOCOMO_GIR		0xb4	/* GPIO interrupt request */
+#define	LOCOMO_GPIO(Nb)		(0x01 << (Nb))
+#define LOCOMO_GPIO_RTS		LOCOMO_GPIO(0)
+#define LOCOMO_GPIO_CTS		LOCOMO_GPIO(1)
+#define LOCOMO_GPIO_DSR		LOCOMO_GPIO(2)
+#define LOCOMO_GPIO_DTR		LOCOMO_GPIO(3)
+#define LOCOMO_GPIO_LCD_VSHA_ON	LOCOMO_GPIO(4)
+#define LOCOMO_GPIO_LCD_VSHD_ON	LOCOMO_GPIO(5)
+#define LOCOMO_GPIO_LCD_VEE_ON	LOCOMO_GPIO(6)
+#define LOCOMO_GPIO_LCD_MOD	LOCOMO_GPIO(7)
+#define LOCOMO_GPIO_DAC_ON	LOCOMO_GPIO(8)
+#define LOCOMO_GPIO_FL_VR	LOCOMO_GPIO(9)
+#define LOCOMO_GPIO_DAC_SDATA	LOCOMO_GPIO(10)
+#define LOCOMO_GPIO_DAC_SCK	LOCOMO_GPIO(11)
+#define LOCOMO_GPIO_DAC_SLOAD	LOCOMO_GPIO(12)
+#define LOCOMO_GPIO_CARD_DETECT LOCOMO_GPIO(13)
+#define LOCOMO_GPIO_WRITE_PROT  LOCOMO_GPIO(14)
+#define LOCOMO_GPIO_CARD_POWER  LOCOMO_GPIO(15)
+
+/* Start the definitions of the devices.  Each device has an initial
+ * base address and a series of offsets from that base address. */
+
+/* Keyboard controller */
+#define LOCOMO_KEYBOARD		0x40
+#define LOCOMO_KIB		0x00	/* KIB level */
+#define LOCOMO_KSC		0x04	/* KSTRB control */
+#define LOCOMO_KCMD		0x08	/* KSTRB command */
+#define LOCOMO_KIC		0x0c	/* Key interrupt */
+
+/* Front light adjustment controller */
+#define LOCOMO_FRONTLIGHT	0xc8
+#define LOCOMO_ALS		0x00	/* Adjust light cycle */
+#define LOCOMO_ALD		0x04	/* Adjust light duty */
+
+#define LOCOMO_ALC_EN		0x8000
+
+/* Backlight controller: TFT signal */
+#define LOCOMO_BACKLIGHT	0x38
+#define LOCOMO_TC		0x00		/* TFT control signal */
+#define LOCOMO_CPSD		0x04		/* CPS delay */
+
+/* Audio controller */
+#define LOCOMO_AUDIO		0x54
+#define LOCOMO_ACC		0x00	/* Audio clock */
+#define LOCOMO_PAIF		0xD0	/* PCM audio interface */
+/* Audio clock */
+#define	LOCOMO_ACC_XON		0x80
+#define	LOCOMO_ACC_XEN		0x40
+#define	LOCOMO_ACC_XSEL0	0x00
+#define	LOCOMO_ACC_XSEL1	0x20
+#define	LOCOMO_ACC_MCLKEN	0x10
+#define	LOCOMO_ACC_64FSEN	0x08
+#define	LOCOMO_ACC_CLKSEL000	0x00	/* mclk  2 */
+#define	LOCOMO_ACC_CLKSEL001	0x01	/* mclk  3 */
+#define	LOCOMO_ACC_CLKSEL010	0x02	/* mclk  4 */
+#define	LOCOMO_ACC_CLKSEL011	0x03	/* mclk  6 */
+#define	LOCOMO_ACC_CLKSEL100	0x04	/* mclk  8 */
+#define	LOCOMO_ACC_CLKSEL101	0x05	/* mclk 12 */
+/* PCM audio interface */
+#define	LOCOMO_PAIF_SCINV	0x20
+#define	LOCOMO_PAIF_SCEN	0x10
+#define	LOCOMO_PAIF_LRCRST	0x08
+#define	LOCOMO_PAIF_LRCEVE	0x04
+#define	LOCOMO_PAIF_LRCINV	0x02
+#define	LOCOMO_PAIF_LRCEN	0x01
+
+/* LED controller */
+#define LOCOMO_LED		0xe8
+#define LOCOMO_LPT0		0x00
+#define LOCOMO_LPT1		0x04
+/* LED control */
+#define LOCOMO_LPT_TOFH		0x80
+#define LOCOMO_LPT_TOFL		0x08
+#define LOCOMO_LPT_TOH(TOH)	((TOH & 0x7) << 4)
+#define LOCOMO_LPT_TOL(TOL)	((TOL & 0x7))
+
+extern struct bus_type locomo_bus_type;
+
+#define LOCOMO_DEVID_KEYBOARD	0
+#define LOCOMO_DEVID_FRONTLIGHT	1
+#define LOCOMO_DEVID_BACKLIGHT	2
+#define LOCOMO_DEVID_AUDIO	3
+#define LOCOMO_DEVID_LED	4
+#define LOCOMO_DEVID_UART	5
+#define LOCOMO_DEVID_SPI	6
+
+struct locomo_dev {
+	struct device	dev;
+	unsigned int	devid;
+	unsigned int	irq[1];
+
+	void		*mapbase;
+	unsigned long	length;
+
+	u64		dma_mask;
+};
+
+#define LOCOMO_DEV(_d)	container_of((_d), struct locomo_dev, dev)
+
+#define locomo_get_drvdata(d)	dev_get_drvdata(&(d)->dev)
+#define locomo_set_drvdata(d,p)	dev_set_drvdata(&(d)->dev, p)
+
+struct locomo_driver {
+	struct device_driver	drv;
+	unsigned int		devid;
+	int (*probe)(struct locomo_dev *);
+	int (*remove)(struct locomo_dev *);
+	int (*suspend)(struct locomo_dev *, pm_message_t);
+	int (*resume)(struct locomo_dev *);
+};
+
+#define LOCOMO_DRV(_d)	container_of((_d), struct locomo_driver, drv)
+
+#define LOCOMO_DRIVER_NAME(_ldev) ((_ldev)->dev.driver->name)
+
+void locomo_lcd_power(struct locomo_dev *, int, unsigned int);
+
+int locomo_driver_register(struct locomo_driver *);
+void locomo_driver_unregister(struct locomo_driver *);
+
+/* GPIO control functions */
+void locomo_gpio_set_dir(struct device *dev, unsigned int bits, unsigned int dir);
+int locomo_gpio_read_level(struct device *dev, unsigned int bits);
+int locomo_gpio_read_output(struct device *dev, unsigned int bits);
+void locomo_gpio_write(struct device *dev, unsigned int bits, unsigned int set);
+
+/* M62332 control function */
+void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int channel);
+
+/* Frontlight control */
+void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf);
+
+#endif
diff --git a/arch/arm/include/asm/hardware/memc.h b/arch/arm/include/asm/hardware/memc.h
new file mode 100644
index 0000000..42ba7c1
--- /dev/null
+++ b/arch/arm/include/asm/hardware/memc.h
@@ -0,0 +1,26 @@
+/*
+ *  arch/arm/include/asm/hardware/memc.h
+ *
+ *  Copyright (C) Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define VDMA_ALIGNMENT	PAGE_SIZE
+#define VDMA_XFERSIZE	16
+#define VDMA_INIT	0
+#define VDMA_START	1
+#define VDMA_END	2
+
+#ifndef __ASSEMBLY__
+extern void memc_write(unsigned int reg, unsigned long val);
+
+#define video_set_dma(start,end,offset)				\
+do {								\
+	memc_write (VDMA_START, (start >> 2));			\
+	memc_write (VDMA_END, (end - VDMA_XFERSIZE) >> 2);	\
+	memc_write (VDMA_INIT, (offset >> 2));			\
+} while (0)
+
+#endif
diff --git a/arch/arm/include/asm/hardware/pci_v3.h b/arch/arm/include/asm/hardware/pci_v3.h
new file mode 100644
index 0000000..2811c7e
--- /dev/null
+++ b/arch/arm/include/asm/hardware/pci_v3.h
@@ -0,0 +1,186 @@
+/*
+ *  arch/arm/include/asm/hardware/pci_v3.h
+ *
+ *  Internal header file PCI V3 chip
+ *
+ *  Copyright (C) ARM Limited
+ *  Copyright (C) 2000-2001 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef ASM_ARM_HARDWARE_PCI_V3_H
+#define ASM_ARM_HARDWARE_PCI_V3_H
+
+/* -------------------------------------------------------------------------------
+ *  V3 Local Bus to PCI Bridge definitions
+ * -------------------------------------------------------------------------------
+ *  Registers (these are taken from page 129 of the EPC User's Manual Rev 1.04
+ *  All V3 register names are prefaced by V3_ to avoid clashing with any other
+ *  PCI definitions.  Their names match the user's manual.
+ * 
+ *  I'm assuming that I20 is disabled.
+ * 
+ */
+#define V3_PCI_VENDOR                   0x00000000
+#define V3_PCI_DEVICE                   0x00000002
+#define V3_PCI_CMD                      0x00000004
+#define V3_PCI_STAT                     0x00000006
+#define V3_PCI_CC_REV                   0x00000008
+#define V3_PCI_HDR_CFG                  0x0000000C
+#define V3_PCI_IO_BASE                  0x00000010
+#define V3_PCI_BASE0                    0x00000014
+#define V3_PCI_BASE1                    0x00000018
+#define V3_PCI_SUB_VENDOR               0x0000002C
+#define V3_PCI_SUB_ID                   0x0000002E
+#define V3_PCI_ROM                      0x00000030
+#define V3_PCI_BPARAM                   0x0000003C
+#define V3_PCI_MAP0                     0x00000040
+#define V3_PCI_MAP1                     0x00000044
+#define V3_PCI_INT_STAT                 0x00000048
+#define V3_PCI_INT_CFG                  0x0000004C 
+#define V3_LB_BASE0                     0x00000054
+#define V3_LB_BASE1                     0x00000058
+#define V3_LB_MAP0                      0x0000005E
+#define V3_LB_MAP1                      0x00000062
+#define V3_LB_BASE2                     0x00000064
+#define V3_LB_MAP2                      0x00000066
+#define V3_LB_SIZE                      0x00000068
+#define V3_LB_IO_BASE                   0x0000006E
+#define V3_FIFO_CFG                     0x00000070
+#define V3_FIFO_PRIORITY                0x00000072
+#define V3_FIFO_STAT                    0x00000074
+#define V3_LB_ISTAT                     0x00000076
+#define V3_LB_IMASK                     0x00000077
+#define V3_SYSTEM                       0x00000078
+#define V3_LB_CFG                       0x0000007A
+#define V3_PCI_CFG                      0x0000007C
+#define V3_DMA_PCI_ADR0                 0x00000080
+#define V3_DMA_PCI_ADR1                 0x00000090
+#define V3_DMA_LOCAL_ADR0               0x00000084
+#define V3_DMA_LOCAL_ADR1               0x00000094
+#define V3_DMA_LENGTH0                  0x00000088
+#define V3_DMA_LENGTH1                  0x00000098
+#define V3_DMA_CSR0                     0x0000008B
+#define V3_DMA_CSR1                     0x0000009B
+#define V3_DMA_CTLB_ADR0                0x0000008C
+#define V3_DMA_CTLB_ADR1                0x0000009C
+#define V3_DMA_DELAY                    0x000000E0
+#define V3_MAIL_DATA                    0x000000C0
+#define V3_PCI_MAIL_IEWR                0x000000D0
+#define V3_PCI_MAIL_IERD                0x000000D2
+#define V3_LB_MAIL_IEWR                 0x000000D4
+#define V3_LB_MAIL_IERD                 0x000000D6
+#define V3_MAIL_WR_STAT                 0x000000D8
+#define V3_MAIL_RD_STAT                 0x000000DA
+#define V3_QBA_MAP                      0x000000DC
+
+/*  PCI COMMAND REGISTER bits
+ */
+#define V3_COMMAND_M_FBB_EN             (1 << 9)
+#define V3_COMMAND_M_SERR_EN            (1 << 8)
+#define V3_COMMAND_M_PAR_EN             (1 << 6)
+#define V3_COMMAND_M_MASTER_EN          (1 << 2)
+#define V3_COMMAND_M_MEM_EN             (1 << 1)
+#define V3_COMMAND_M_IO_EN              (1 << 0)
+
+/*  SYSTEM REGISTER bits
+ */
+#define V3_SYSTEM_M_RST_OUT             (1 << 15)
+#define V3_SYSTEM_M_LOCK                (1 << 14)
+
+/*  PCI_CFG bits
+ */
+#define V3_PCI_CFG_M_I2O_EN		(1 << 15)
+#define V3_PCI_CFG_M_IO_REG_DIS		(1 << 14)
+#define V3_PCI_CFG_M_IO_DIS		(1 << 13)
+#define V3_PCI_CFG_M_EN3V		(1 << 12)
+#define V3_PCI_CFG_M_RETRY_EN           (1 << 10)
+#define V3_PCI_CFG_M_AD_LOW1            (1 << 9)
+#define V3_PCI_CFG_M_AD_LOW0            (1 << 8)
+
+/*  PCI_BASE register bits (PCI -> Local Bus)
+ */
+#define V3_PCI_BASE_M_ADR_BASE          0xFFF00000
+#define V3_PCI_BASE_M_ADR_BASEL         0x000FFF00
+#define V3_PCI_BASE_M_PREFETCH          (1 << 3)
+#define V3_PCI_BASE_M_TYPE              (3 << 1)
+#define V3_PCI_BASE_M_IO                (1 << 0)
+
+/*  PCI MAP register bits (PCI -> Local bus)
+ */
+#define V3_PCI_MAP_M_MAP_ADR            0xFFF00000
+#define V3_PCI_MAP_M_RD_POST_INH        (1 << 15)
+#define V3_PCI_MAP_M_ROM_SIZE           (3 << 10)
+#define V3_PCI_MAP_M_SWAP               (3 << 8)
+#define V3_PCI_MAP_M_ADR_SIZE           0x000000F0
+#define V3_PCI_MAP_M_REG_EN             (1 << 1)
+#define V3_PCI_MAP_M_ENABLE             (1 << 0)
+
+/*
+ *  LB_BASE0,1 register bits (Local bus -> PCI)
+ */
+#define V3_LB_BASE_ADR_BASE		0xfff00000
+#define V3_LB_BASE_SWAP			(3 << 8)
+#define V3_LB_BASE_ADR_SIZE		(15 << 4)
+#define V3_LB_BASE_PREFETCH		(1 << 3)
+#define V3_LB_BASE_ENABLE		(1 << 0)
+
+#define V3_LB_BASE_ADR_SIZE_1MB		(0 << 4)
+#define V3_LB_BASE_ADR_SIZE_2MB		(1 << 4)
+#define V3_LB_BASE_ADR_SIZE_4MB		(2 << 4)
+#define V3_LB_BASE_ADR_SIZE_8MB		(3 << 4)
+#define V3_LB_BASE_ADR_SIZE_16MB	(4 << 4)
+#define V3_LB_BASE_ADR_SIZE_32MB	(5 << 4)
+#define V3_LB_BASE_ADR_SIZE_64MB	(6 << 4)
+#define V3_LB_BASE_ADR_SIZE_128MB	(7 << 4)
+#define V3_LB_BASE_ADR_SIZE_256MB	(8 << 4)
+#define V3_LB_BASE_ADR_SIZE_512MB	(9 << 4)
+#define V3_LB_BASE_ADR_SIZE_1GB		(10 << 4)
+#define V3_LB_BASE_ADR_SIZE_2GB		(11 << 4)
+
+#define v3_addr_to_lb_base(a)	((a) & V3_LB_BASE_ADR_BASE)
+
+/*
+ *  LB_MAP0,1 register bits (Local bus -> PCI)
+ */
+#define V3_LB_MAP_MAP_ADR		0xfff0
+#define V3_LB_MAP_TYPE			(7 << 1)
+#define V3_LB_MAP_AD_LOW_EN		(1 << 0)
+
+#define V3_LB_MAP_TYPE_IACK		(0 << 1)
+#define V3_LB_MAP_TYPE_IO		(1 << 1)
+#define V3_LB_MAP_TYPE_MEM		(3 << 1)
+#define V3_LB_MAP_TYPE_CONFIG		(5 << 1)
+#define V3_LB_MAP_TYPE_MEM_MULTIPLE	(6 << 1)
+
+#define v3_addr_to_lb_map(a)	(((a) >> 16) & V3_LB_MAP_MAP_ADR)
+
+/*
+ *  LB_BASE2 register bits (Local bus -> PCI IO)
+ */
+#define V3_LB_BASE2_ADR_BASE		0xff00
+#define V3_LB_BASE2_SWAP		(3 << 6)
+#define V3_LB_BASE2_ENABLE		(1 << 0)
+
+#define v3_addr_to_lb_base2(a)	(((a) >> 16) & V3_LB_BASE2_ADR_BASE)
+
+/*
+ *  LB_MAP2 register bits (Local bus -> PCI IO)
+ */
+#define V3_LB_MAP2_MAP_ADR		0xff00
+
+#define v3_addr_to_lb_map2(a)	(((a) >> 16) & V3_LB_MAP2_MAP_ADR)
+
+#endif
diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h
new file mode 100644
index 0000000..6cf98d4
--- /dev/null
+++ b/arch/arm/include/asm/hardware/sa1111.h
@@ -0,0 +1,581 @@
+/*
+ * arch/arm/include/asm/hardware/sa1111.h
+ *
+ * Copyright (C) 2000 John G Dorsey <john+@cs.cmu.edu>
+ *
+ * This file contains definitions for the SA-1111 Companion Chip.
+ * (Structure and naming borrowed from SA-1101.h, by Peter Danielsson.)
+ *
+ * Macro that calculates real address for registers in the SA-1111
+ */
+
+#ifndef _ASM_ARCH_SA1111
+#define _ASM_ARCH_SA1111
+
+#include <asm/arch/bitfield.h>
+
+/*
+ * The SA1111 is always located at virtual 0xf4000000, and is always
+ * "native" endian.
+ */
+
+#define SA1111_VBASE		0xf4000000
+
+/* Don't use these! */
+#define SA1111_p2v( x )         ((x) - SA1111_BASE + SA1111_VBASE)
+#define SA1111_v2p( x )         ((x) - SA1111_VBASE + SA1111_BASE)
+
+#ifndef __ASSEMBLY__
+#define _SA1111(x)	((x) + sa1111->resource.start)
+#endif
+
+#define sa1111_writel(val,addr)	__raw_writel(val, addr)
+#define sa1111_readl(addr)	__raw_readl(addr)
+
+/*
+ * 26 bits of the SA-1110 address bus are available to the SA-1111.
+ * Use these when feeding target addresses to the DMA engines.
+ */
+
+#define SA1111_ADDR_WIDTH	(26)
+#define SA1111_ADDR_MASK	((1<<SA1111_ADDR_WIDTH)-1)
+#define SA1111_DMA_ADDR(x)	((x)&SA1111_ADDR_MASK)
+
+/*
+ * Don't ask the (SAC) DMA engines to move less than this amount.
+ */
+
+#define SA1111_SAC_DMA_MIN_XFER	(0x800)
+
+/*
+ * System Bus Interface (SBI)
+ *
+ * Registers
+ *    SKCR	Control Register
+ *    SMCR	Shared Memory Controller Register
+ *    SKID	ID Register
+ */
+#define SA1111_SKCR	0x0000
+#define SA1111_SMCR	0x0004
+#define SA1111_SKID	0x0008
+
+#define SKCR_PLL_BYPASS	(1<<0)
+#define SKCR_RCLKEN	(1<<1)
+#define SKCR_SLEEP	(1<<2)
+#define SKCR_DOZE	(1<<3)
+#define SKCR_VCO_OFF	(1<<4)
+#define SKCR_SCANTSTEN	(1<<5)
+#define SKCR_CLKTSTEN	(1<<6)
+#define SKCR_RDYEN	(1<<7)
+#define SKCR_SELAC	(1<<8)
+#define SKCR_OPPC	(1<<9)
+#define SKCR_PLLTSTEN	(1<<10)
+#define SKCR_USBIOTSTEN	(1<<11)
+/*
+ * Don't believe the specs!  Take them, throw them outside.  Leave them
+ * there for a week.  Spit on them.  Walk on them.  Stamp on them.
+ * Pour gasoline over them and finally burn them.  Now think about coding.
+ *  - The October 1999 errata (278260-007) says its bit 13, 1 to enable.
+ *  - The Feb 2001 errata (278260-010) says that the previous errata
+ *    (278260-009) is wrong, and its bit actually 12, fixed in spec
+ *    278242-003.
+ *  - The SA1111 manual (278242) says bit 12, but 0 to enable.
+ *  - Reality is bit 13, 1 to enable.
+ *      -- rmk
+ */
+#define SKCR_OE_EN	(1<<13)
+
+#define SMCR_DTIM	(1<<0)
+#define SMCR_MBGE	(1<<1)
+#define SMCR_DRAC_0	(1<<2)
+#define SMCR_DRAC_1	(1<<3)
+#define SMCR_DRAC_2	(1<<4)
+#define SMCR_DRAC	Fld(3, 2)
+#define SMCR_CLAT	(1<<5)
+
+#define SKID_SIREV_MASK	(0x000000f0)
+#define SKID_MTREV_MASK (0x0000000f)
+#define SKID_ID_MASK	(0xffffff00)
+#define SKID_SA1111_ID	(0x690cc200)
+
+/*
+ * System Controller
+ *
+ * Registers
+ *    SKPCR	Power Control Register
+ *    SKCDR	Clock Divider Register
+ *    SKAUD	Audio Clock Divider Register
+ *    SKPMC	PS/2 Mouse Clock Divider Register
+ *    SKPTC	PS/2 Track Pad Clock Divider Register
+ *    SKPEN0	PWM0 Enable Register
+ *    SKPWM0	PWM0 Clock Register
+ *    SKPEN1	PWM1 Enable Register
+ *    SKPWM1	PWM1 Clock Register
+ */
+#define SA1111_SKPCR	0x0200
+#define SA1111_SKCDR	0x0204
+#define SA1111_SKAUD	0x0208
+#define SA1111_SKPMC	0x020c
+#define SA1111_SKPTC	0x0210
+#define SA1111_SKPEN0	0x0214
+#define SA1111_SKPWM0	0x0218
+#define SA1111_SKPEN1	0x021c
+#define SA1111_SKPWM1	0x0220
+
+#define SKPCR_UCLKEN	(1<<0)
+#define SKPCR_ACCLKEN	(1<<1)
+#define SKPCR_I2SCLKEN	(1<<2)
+#define SKPCR_L3CLKEN	(1<<3)
+#define SKPCR_SCLKEN	(1<<4)
+#define SKPCR_PMCLKEN	(1<<5)
+#define SKPCR_PTCLKEN	(1<<6)
+#define SKPCR_DCLKEN	(1<<7)
+#define SKPCR_PWMCLKEN	(1<<8)
+
+/*
+ * USB Host controller
+ */
+#define SA1111_USB		0x0400
+
+/*
+ * Offsets from SA1111_USB_BASE
+ */
+#define SA1111_USB_STATUS	0x0118
+#define SA1111_USB_RESET	0x011c
+#define SA1111_USB_IRQTEST	0x0120
+
+#define USB_RESET_FORCEIFRESET	(1 << 0)
+#define USB_RESET_FORCEHCRESET	(1 << 1)
+#define USB_RESET_CLKGENRESET	(1 << 2)
+#define USB_RESET_SIMSCALEDOWN	(1 << 3)
+#define USB_RESET_USBINTTEST	(1 << 4)
+#define USB_RESET_SLEEPSTBYEN	(1 << 5)
+#define USB_RESET_PWRSENSELOW	(1 << 6)
+#define USB_RESET_PWRCTRLLOW	(1 << 7)
+
+#define USB_STATUS_IRQHCIRMTWKUP  (1 <<  7)
+#define USB_STATUS_IRQHCIBUFFACC  (1 <<  8)
+#define USB_STATUS_NIRQHCIM       (1 <<  9)
+#define USB_STATUS_NHCIMFCLR      (1 << 10)
+#define USB_STATUS_USBPWRSENSE    (1 << 11)
+
+/*
+ * Serial Audio Controller
+ *
+ * Registers
+ *    SACR0             Serial Audio Common Control Register
+ *    SACR1             Serial Audio Alternate Mode (I2C/MSB) Control Register
+ *    SACR2             Serial Audio AC-link Control Register
+ *    SASR0             Serial Audio I2S/MSB Interface & FIFO Status Register
+ *    SASR1             Serial Audio AC-link Interface & FIFO Status Register
+ *    SASCR             Serial Audio Status Clear Register
+ *    L3_CAR            L3 Control Bus Address Register
+ *    L3_CDR            L3 Control Bus Data Register
+ *    ACCAR             AC-link Command Address Register
+ *    ACCDR             AC-link Command Data Register
+ *    ACSAR             AC-link Status Address Register
+ *    ACSDR             AC-link Status Data Register
+ *    SADTCS            Serial Audio DMA Transmit Control/Status Register
+ *    SADTSA            Serial Audio DMA Transmit Buffer Start Address A
+ *    SADTCA            Serial Audio DMA Transmit Buffer Count Register A
+ *    SADTSB            Serial Audio DMA Transmit Buffer Start Address B
+ *    SADTCB            Serial Audio DMA Transmit Buffer Count Register B
+ *    SADRCS            Serial Audio DMA Receive Control/Status Register
+ *    SADRSA            Serial Audio DMA Receive Buffer Start Address A
+ *    SADRCA            Serial Audio DMA Receive Buffer Count Register A
+ *    SADRSB            Serial Audio DMA Receive Buffer Start Address B
+ *    SADRCB            Serial Audio DMA Receive Buffer Count Register B
+ *    SAITR             Serial Audio Interrupt Test Register
+ *    SADR              Serial Audio Data Register (16 x 32-bit)
+ */
+
+#define SA1111_SERAUDIO		0x0600
+
+/*
+ * These are offsets from the above base.
+ */
+#define SA1111_SACR0		0x00
+#define SA1111_SACR1		0x04
+#define SA1111_SACR2		0x08
+#define SA1111_SASR0		0x0c
+#define SA1111_SASR1		0x10
+#define SA1111_SASCR		0x18
+#define SA1111_L3_CAR		0x1c
+#define SA1111_L3_CDR		0x20
+#define SA1111_ACCAR		0x24
+#define SA1111_ACCDR		0x28
+#define SA1111_ACSAR		0x2c
+#define SA1111_ACSDR		0x30
+#define SA1111_SADTCS		0x34
+#define SA1111_SADTSA		0x38
+#define SA1111_SADTCA		0x3c
+#define SA1111_SADTSB		0x40
+#define SA1111_SADTCB		0x44
+#define SA1111_SADRCS		0x48
+#define SA1111_SADRSA		0x4c
+#define SA1111_SADRCA		0x50
+#define SA1111_SADRSB		0x54
+#define SA1111_SADRCB		0x58
+#define SA1111_SAITR		0x5c
+#define SA1111_SADR		0x80
+
+#ifndef CONFIG_ARCH_PXA
+
+#define SACR0_ENB	(1<<0)
+#define SACR0_BCKD	(1<<2)
+#define SACR0_RST	(1<<3)
+
+#define SACR1_AMSL	(1<<0)
+#define SACR1_L3EN	(1<<1)
+#define SACR1_L3MB	(1<<2)
+#define SACR1_DREC	(1<<3)
+#define SACR1_DRPL	(1<<4)
+#define SACR1_ENLBF	(1<<5)
+
+#define SACR2_TS3V	(1<<0)
+#define SACR2_TS4V	(1<<1)
+#define SACR2_WKUP	(1<<2)
+#define SACR2_DREC	(1<<3)
+#define SACR2_DRPL	(1<<4)
+#define SACR2_ENLBF	(1<<5)
+#define SACR2_RESET	(1<<6)
+
+#define SASR0_TNF	(1<<0)
+#define SASR0_RNE	(1<<1)
+#define SASR0_BSY	(1<<2)
+#define SASR0_TFS	(1<<3)
+#define SASR0_RFS	(1<<4)
+#define SASR0_TUR	(1<<5)
+#define SASR0_ROR	(1<<6)
+#define SASR0_L3WD	(1<<16)
+#define SASR0_L3RD	(1<<17)
+
+#define SASR1_TNF	(1<<0)
+#define SASR1_RNE	(1<<1)
+#define SASR1_BSY	(1<<2)
+#define SASR1_TFS	(1<<3)
+#define SASR1_RFS	(1<<4)
+#define SASR1_TUR	(1<<5)
+#define SASR1_ROR	(1<<6)
+#define SASR1_CADT	(1<<16)
+#define SASR1_SADR	(1<<17)
+#define SASR1_RSTO	(1<<18)
+#define SASR1_CLPM	(1<<19)
+#define SASR1_CRDY	(1<<20)
+#define SASR1_RS3V	(1<<21)
+#define SASR1_RS4V	(1<<22)
+
+#define SASCR_TUR	(1<<5)
+#define SASCR_ROR	(1<<6)
+#define SASCR_DTS	(1<<16)
+#define SASCR_RDD	(1<<17)
+#define SASCR_STO	(1<<18)
+
+#define SADTCS_TDEN	(1<<0)
+#define SADTCS_TDIE	(1<<1)
+#define SADTCS_TDBDA	(1<<3)
+#define SADTCS_TDSTA	(1<<4)
+#define SADTCS_TDBDB	(1<<5)
+#define SADTCS_TDSTB	(1<<6)
+#define SADTCS_TBIU	(1<<7)
+
+#define SADRCS_RDEN	(1<<0)
+#define SADRCS_RDIE	(1<<1)
+#define SADRCS_RDBDA	(1<<3)
+#define SADRCS_RDSTA	(1<<4)
+#define SADRCS_RDBDB	(1<<5)
+#define SADRCS_RDSTB	(1<<6)
+#define SADRCS_RBIU	(1<<7)
+
+#define SAD_CS_DEN	(1<<0)
+#define SAD_CS_DIE	(1<<1)	/* Not functional on metal 1 */
+#define SAD_CS_DBDA	(1<<3)	/* Not functional on metal 1 */
+#define SAD_CS_DSTA	(1<<4)
+#define SAD_CS_DBDB	(1<<5)	/* Not functional on metal 1 */
+#define SAD_CS_DSTB	(1<<6)
+#define SAD_CS_BIU	(1<<7)	/* Not functional on metal 1 */
+
+#define SAITR_TFS	(1<<0)
+#define SAITR_RFS	(1<<1)
+#define SAITR_TUR	(1<<2)
+#define SAITR_ROR	(1<<3)
+#define SAITR_CADT	(1<<4)
+#define SAITR_SADR	(1<<5)
+#define SAITR_RSTO	(1<<6)
+#define SAITR_TDBDA	(1<<8)
+#define SAITR_TDBDB	(1<<9)
+#define SAITR_RDBDA	(1<<10)
+#define SAITR_RDBDB	(1<<11)
+
+#endif  /* !CONFIG_ARCH_PXA */
+
+/*
+ * General-Purpose I/O Interface
+ *
+ * Registers
+ *    PA_DDR		GPIO Block A Data Direction
+ *    PA_DRR/PA_DWR	GPIO Block A Data Value Register (read/write)
+ *    PA_SDR		GPIO Block A Sleep Direction
+ *    PA_SSR		GPIO Block A Sleep State
+ *    PB_DDR		GPIO Block B Data Direction
+ *    PB_DRR/PB_DWR	GPIO Block B Data Value Register (read/write)
+ *    PB_SDR		GPIO Block B Sleep Direction
+ *    PB_SSR		GPIO Block B Sleep State
+ *    PC_DDR		GPIO Block C Data Direction
+ *    PC_DRR/PC_DWR	GPIO Block C Data Value Register (read/write)
+ *    PC_SDR		GPIO Block C Sleep Direction
+ *    PC_SSR		GPIO Block C Sleep State
+ */
+
+#define _PA_DDR		_SA1111( 0x1000 )
+#define _PA_DRR		_SA1111( 0x1004 )
+#define _PA_DWR		_SA1111( 0x1004 )
+#define _PA_SDR		_SA1111( 0x1008 )
+#define _PA_SSR		_SA1111( 0x100c )
+#define _PB_DDR		_SA1111( 0x1010 )
+#define _PB_DRR		_SA1111( 0x1014 )
+#define _PB_DWR		_SA1111( 0x1014 )
+#define _PB_SDR		_SA1111( 0x1018 )
+#define _PB_SSR		_SA1111( 0x101c )
+#define _PC_DDR		_SA1111( 0x1020 )
+#define _PC_DRR		_SA1111( 0x1024 )
+#define _PC_DWR		_SA1111( 0x1024 )
+#define _PC_SDR		_SA1111( 0x1028 )
+#define _PC_SSR		_SA1111( 0x102c )
+
+#define SA1111_GPIO	0x1000
+
+#define SA1111_GPIO_PADDR	(0x000)
+#define SA1111_GPIO_PADRR	(0x004)
+#define SA1111_GPIO_PADWR	(0x004)
+#define SA1111_GPIO_PASDR	(0x008)
+#define SA1111_GPIO_PASSR	(0x00c)
+#define SA1111_GPIO_PBDDR	(0x010)
+#define SA1111_GPIO_PBDRR	(0x014)
+#define SA1111_GPIO_PBDWR	(0x014)
+#define SA1111_GPIO_PBSDR	(0x018)
+#define SA1111_GPIO_PBSSR	(0x01c)
+#define SA1111_GPIO_PCDDR	(0x020)
+#define SA1111_GPIO_PCDRR	(0x024)
+#define SA1111_GPIO_PCDWR	(0x024)
+#define SA1111_GPIO_PCSDR	(0x028)
+#define SA1111_GPIO_PCSSR	(0x02c)
+
+#define GPIO_A0		(1 << 0)
+#define GPIO_A1		(1 << 1)
+#define GPIO_A2		(1 << 2)
+#define GPIO_A3		(1 << 3)
+
+#define GPIO_B0		(1 << 8)
+#define GPIO_B1		(1 << 9)
+#define GPIO_B2		(1 << 10)
+#define GPIO_B3		(1 << 11)
+#define GPIO_B4		(1 << 12)
+#define GPIO_B5		(1 << 13)
+#define GPIO_B6		(1 << 14)
+#define GPIO_B7		(1 << 15)
+
+#define GPIO_C0		(1 << 16)
+#define GPIO_C1		(1 << 17)
+#define GPIO_C2		(1 << 18)
+#define GPIO_C3		(1 << 19)
+#define GPIO_C4		(1 << 20)
+#define GPIO_C5		(1 << 21)
+#define GPIO_C6		(1 << 22)
+#define GPIO_C7		(1 << 23)
+
+/*
+ * Interrupt Controller
+ *
+ * Registers
+ *    INTTEST0		Test register 0
+ *    INTTEST1		Test register 1
+ *    INTEN0		Interrupt Enable register 0
+ *    INTEN1		Interrupt Enable register 1
+ *    INTPOL0		Interrupt Polarity selection 0
+ *    INTPOL1		Interrupt Polarity selection 1
+ *    INTTSTSEL		Interrupt source selection
+ *    INTSTATCLR0	Interrupt Status/Clear 0
+ *    INTSTATCLR1	Interrupt Status/Clear 1
+ *    INTSET0		Interrupt source set 0
+ *    INTSET1		Interrupt source set 1
+ *    WAKE_EN0		Wake-up source enable 0
+ *    WAKE_EN1		Wake-up source enable 1
+ *    WAKE_POL0		Wake-up polarity selection 0
+ *    WAKE_POL1		Wake-up polarity selection 1
+ */
+#define SA1111_INTC		0x1600
+
+/*
+ * These are offsets from the above base.
+ */
+#define SA1111_INTTEST0		0x0000
+#define SA1111_INTTEST1		0x0004
+#define SA1111_INTEN0		0x0008
+#define SA1111_INTEN1		0x000c
+#define SA1111_INTPOL0		0x0010
+#define SA1111_INTPOL1		0x0014
+#define SA1111_INTTSTSEL	0x0018
+#define SA1111_INTSTATCLR0	0x001c
+#define SA1111_INTSTATCLR1	0x0020
+#define SA1111_INTSET0		0x0024
+#define SA1111_INTSET1		0x0028
+#define SA1111_WAKEEN0		0x002c
+#define SA1111_WAKEEN1		0x0030
+#define SA1111_WAKEPOL0		0x0034
+#define SA1111_WAKEPOL1		0x0038
+
+/*
+ * PS/2 Trackpad and Mouse Interfaces
+ *
+ * Registers
+ *    PS2CR		Control Register
+ *    PS2STAT		Status Register
+ *    PS2DATA		Transmit/Receive Data register
+ *    PS2CLKDIV		Clock Division Register
+ *    PS2PRECNT		Clock Precount Register
+ *    PS2TEST1		Test register 1
+ *    PS2TEST2		Test register 2
+ *    PS2TEST3		Test register 3
+ *    PS2TEST4		Test register 4
+ */
+
+#define SA1111_KBD		0x0a00
+#define SA1111_MSE		0x0c00
+
+/*
+ * These are offsets from the above bases.
+ */
+#define SA1111_PS2CR		0x0000
+#define SA1111_PS2STAT		0x0004
+#define SA1111_PS2DATA		0x0008
+#define SA1111_PS2CLKDIV	0x000c
+#define SA1111_PS2PRECNT	0x0010
+
+#define PS2CR_ENA		0x08
+#define PS2CR_FKD		0x02
+#define PS2CR_FKC		0x01
+
+#define PS2STAT_STP		0x0100
+#define PS2STAT_TXE		0x0080
+#define PS2STAT_TXB		0x0040
+#define PS2STAT_RXF		0x0020
+#define PS2STAT_RXB		0x0010
+#define PS2STAT_ENA		0x0008
+#define PS2STAT_RXP		0x0004
+#define PS2STAT_KBD		0x0002
+#define PS2STAT_KBC		0x0001
+
+/*
+ * PCMCIA Interface
+ *
+ * Registers
+ *    PCSR	Status Register
+ *    PCCR	Control Register
+ *    PCSSR	Sleep State Register
+ */
+
+#define SA1111_PCMCIA	0x1600
+
+/*
+ * These are offsets from the above base.
+ */
+#define SA1111_PCCR	0x0000
+#define SA1111_PCSSR	0x0004
+#define SA1111_PCSR	0x0008
+
+#define PCSR_S0_READY	(1<<0)
+#define PCSR_S1_READY	(1<<1)
+#define PCSR_S0_DETECT	(1<<2)
+#define PCSR_S1_DETECT	(1<<3)
+#define PCSR_S0_VS1	(1<<4)
+#define PCSR_S0_VS2	(1<<5)
+#define PCSR_S1_VS1	(1<<6)
+#define PCSR_S1_VS2	(1<<7)
+#define PCSR_S0_WP	(1<<8)
+#define PCSR_S1_WP	(1<<9)
+#define PCSR_S0_BVD1	(1<<10)
+#define PCSR_S0_BVD2	(1<<11)
+#define PCSR_S1_BVD1	(1<<12)
+#define PCSR_S1_BVD2	(1<<13)
+
+#define PCCR_S0_RST	(1<<0)
+#define PCCR_S1_RST	(1<<1)
+#define PCCR_S0_FLT	(1<<2)
+#define PCCR_S1_FLT	(1<<3)
+#define PCCR_S0_PWAITEN	(1<<4)
+#define PCCR_S1_PWAITEN	(1<<5)
+#define PCCR_S0_PSE	(1<<6)
+#define PCCR_S1_PSE	(1<<7)
+
+#define PCSSR_S0_SLEEP	(1<<0)
+#define PCSSR_S1_SLEEP	(1<<1)
+
+
+
+
+extern struct bus_type sa1111_bus_type;
+
+#define SA1111_DEVID_SBI	0
+#define SA1111_DEVID_SK		1
+#define SA1111_DEVID_USB	2
+#define SA1111_DEVID_SAC	3
+#define SA1111_DEVID_SSP	4
+#define SA1111_DEVID_PS2	5
+#define SA1111_DEVID_GPIO	6
+#define SA1111_DEVID_INT	7
+#define SA1111_DEVID_PCMCIA	8
+
+struct sa1111_dev {
+	struct device	dev;
+	unsigned int	devid;
+	struct resource	res;
+	void __iomem	*mapbase;
+	unsigned int	skpcr_mask;
+	unsigned int	irq[6];
+	u64		dma_mask;
+};
+
+#define SA1111_DEV(_d)	container_of((_d), struct sa1111_dev, dev)
+
+#define sa1111_get_drvdata(d)	dev_get_drvdata(&(d)->dev)
+#define sa1111_set_drvdata(d,p)	dev_set_drvdata(&(d)->dev, p)
+
+struct sa1111_driver {
+	struct device_driver	drv;
+	unsigned int		devid;
+	int (*probe)(struct sa1111_dev *);
+	int (*remove)(struct sa1111_dev *);
+	int (*suspend)(struct sa1111_dev *, pm_message_t);
+	int (*resume)(struct sa1111_dev *);
+};
+
+#define SA1111_DRV(_d)	container_of((_d), struct sa1111_driver, drv)
+
+#define SA1111_DRIVER_NAME(_sadev) ((_sadev)->dev.driver->name)
+
+/*
+ * These frob the SKPCR register.
+ */
+void sa1111_enable_device(struct sa1111_dev *);
+void sa1111_disable_device(struct sa1111_dev *);
+
+unsigned int sa1111_pll_clock(struct sa1111_dev *);
+
+#define SA1111_AUDIO_ACLINK	0
+#define SA1111_AUDIO_I2S	1
+
+void sa1111_select_audio_mode(struct sa1111_dev *sadev, int mode);
+int sa1111_set_audio_rate(struct sa1111_dev *sadev, int rate);
+int sa1111_get_audio_rate(struct sa1111_dev *sadev);
+
+int sa1111_check_dma_bug(dma_addr_t addr);
+
+int sa1111_driver_register(struct sa1111_driver *);
+void sa1111_driver_unregister(struct sa1111_driver *);
+
+void sa1111_set_io_dir(struct sa1111_dev *sadev, unsigned int bits, unsigned int dir, unsigned int sleep_dir);
+void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v);
+void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v);
+
+#endif  /* _ASM_ARCH_SA1111 */
diff --git a/arch/arm/include/asm/hardware/scoop.h b/arch/arm/include/asm/hardware/scoop.h
new file mode 100644
index 0000000..dfb8330
--- /dev/null
+++ b/arch/arm/include/asm/hardware/scoop.h
@@ -0,0 +1,69 @@
+/*
+ *  Definitions for the SCOOP interface found on various Sharp PDAs
+ *
+ *  Copyright (c) 2004 Richard Purdie
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ */
+
+#define SCOOP_MCR  0x00
+#define SCOOP_CDR  0x04
+#define SCOOP_CSR  0x08
+#define SCOOP_CPR  0x0C
+#define SCOOP_CCR  0x10
+#define SCOOP_IRR  0x14
+#define SCOOP_IRM  0x14
+#define SCOOP_IMR  0x18
+#define SCOOP_ISR  0x1C
+#define SCOOP_GPCR 0x20
+#define SCOOP_GPWR 0x24
+#define SCOOP_GPRR 0x28
+
+#define SCOOP_GPCR_PA22	( 1 << 12 )
+#define SCOOP_GPCR_PA21	( 1 << 11 )
+#define SCOOP_GPCR_PA20	( 1 << 10 )
+#define SCOOP_GPCR_PA19	( 1 << 9 )
+#define SCOOP_GPCR_PA18	( 1 << 8 )
+#define SCOOP_GPCR_PA17	( 1 << 7 )
+#define SCOOP_GPCR_PA16	( 1 << 6 )
+#define SCOOP_GPCR_PA15	( 1 << 5 )
+#define SCOOP_GPCR_PA14	( 1 << 4 )
+#define SCOOP_GPCR_PA13	( 1 << 3 )
+#define SCOOP_GPCR_PA12	( 1 << 2 )
+#define SCOOP_GPCR_PA11	( 1 << 1 )
+
+struct scoop_config {
+	unsigned short io_out;
+	unsigned short io_dir;
+	unsigned short suspend_clr;
+	unsigned short suspend_set;
+	int gpio_base;
+};
+
+/* Structure for linking scoop devices to PCMCIA sockets */
+struct scoop_pcmcia_dev {
+	struct device *dev;     /* Pointer to this socket's scoop device */
+	int	irq;                /* irq for socket */
+	int cd_irq;
+	const char *cd_irq_str;
+	unsigned char keep_vs;
+	unsigned char keep_rd;
+};
+
+struct scoop_pcmcia_config {
+	struct scoop_pcmcia_dev *devs;
+	int num_devs;
+	void (*pcmcia_init)(void);
+	void (*power_ctrl)(struct device *scoop, unsigned short cpr, int nr);
+};
+
+extern struct scoop_pcmcia_config *platform_scoop_config;
+
+void reset_scoop(struct device *dev);
+unsigned short __deprecated set_scoop_gpio(struct device *dev, unsigned short bit);
+unsigned short __deprecated reset_scoop_gpio(struct device *dev, unsigned short bit);
+unsigned short read_scoop_reg(struct device *dev, unsigned short reg);
+void write_scoop_reg(struct device *dev, unsigned short reg, unsigned short data);
diff --git a/arch/arm/include/asm/hardware/sharpsl_pm.h b/arch/arm/include/asm/hardware/sharpsl_pm.h
new file mode 100644
index 0000000..2d00db2
--- /dev/null
+++ b/arch/arm/include/asm/hardware/sharpsl_pm.h
@@ -0,0 +1,106 @@
+/*
+ * SharpSL Battery/PM Driver
+ *
+ * Copyright (c) 2004-2005 Richard Purdie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/interrupt.h>
+
+struct sharpsl_charger_machinfo {
+	void (*init)(void);
+	void (*exit)(void);
+	int gpio_acin;
+	int gpio_batfull;
+	int batfull_irq;
+	int gpio_batlock;
+	int gpio_fatal;
+	void (*discharge)(int);
+	void (*discharge1)(int);
+	void (*charge)(int);
+	void (*measure_temp)(int);
+	void (*presuspend)(void);
+	void (*postsuspend)(void);
+	void (*earlyresume)(void);
+	unsigned long (*read_devdata)(int);
+#define SHARPSL_BATT_VOLT       1
+#define SHARPSL_BATT_TEMP       2
+#define SHARPSL_ACIN_VOLT       3
+#define SHARPSL_STATUS_ACIN     4
+#define SHARPSL_STATUS_LOCK     5
+#define SHARPSL_STATUS_CHRGFULL 6
+#define SHARPSL_STATUS_FATAL    7
+	unsigned long (*charger_wakeup)(void);
+	int (*should_wakeup)(unsigned int resume_on_alarm);
+	void (*backlight_limit)(int);
+	int (*backlight_get_status) (void);
+	int charge_on_volt;
+	int charge_on_temp;
+	int charge_acin_high;
+	int charge_acin_low;
+	int fatal_acin_volt;
+	int fatal_noacin_volt;
+	int bat_levels;
+	struct battery_thresh *bat_levels_noac;
+	struct battery_thresh *bat_levels_acin;
+	struct battery_thresh *bat_levels_noac_bl;
+	struct battery_thresh *bat_levels_acin_bl;
+	int status_high_acin;
+	int status_low_acin;
+	int status_high_noac;
+	int status_low_noac;
+};
+
+struct battery_thresh {
+	int voltage;
+	int percentage;
+};
+
+struct battery_stat {
+	int ac_status;         /* APM AC Present/Not Present */
+	int mainbat_status;    /* APM Main Battery Status */
+	int mainbat_percent;   /* Main Battery Percentage Charge */
+	int mainbat_voltage;   /* Main Battery Voltage */
+};
+
+struct sharpsl_pm_status {
+	struct device *dev;
+	struct timer_list ac_timer;
+	struct timer_list chrg_full_timer;
+
+	int charge_mode;
+#define CHRG_ERROR    (-1)
+#define CHRG_OFF      (0)
+#define CHRG_ON       (1)
+#define CHRG_DONE     (2)
+
+	unsigned int flags;
+#define SHARPSL_SUSPENDED       (1 << 0)  /* Device is Suspended */
+#define SHARPSL_ALARM_ACTIVE    (1 << 1)  /* Alarm is for charging event (not user) */
+#define SHARPSL_BL_LIMIT        (1 << 2)  /* Backlight Intensity Limited */
+#define SHARPSL_APM_QUEUED      (1 << 3)  /* APM Event Queued */
+#define SHARPSL_DO_OFFLINE_CHRG (1 << 4)  /* Trigger the offline charger */
+
+	int full_count;
+	unsigned long charge_start_time;
+	struct sharpsl_charger_machinfo *machinfo;
+	struct battery_stat battstat;
+};
+
+extern struct sharpsl_pm_status sharpsl_pm;
+
+
+#define SHARPSL_LED_ERROR  2
+#define SHARPSL_LED_ON     1
+#define SHARPSL_LED_OFF    0
+
+void sharpsl_battery_kick(void);
+void sharpsl_pm_led(int val);
+irqreturn_t sharpsl_ac_isr(int irq, void *dev_id);
+irqreturn_t sharpsl_chrg_full_isr(int irq, void *dev_id);
+irqreturn_t sharpsl_fatal_isr(int irq, void *dev_id);
+
diff --git a/arch/arm/include/asm/hardware/ssp.h b/arch/arm/include/asm/hardware/ssp.h
new file mode 100644
index 0000000..3b42e18
--- /dev/null
+++ b/arch/arm/include/asm/hardware/ssp.h
@@ -0,0 +1,28 @@
+/*
+ *  ssp.h
+ *
+ *  Copyright (C) 2003 Russell King, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef SSP_H
+#define SSP_H
+
+struct ssp_state {
+	unsigned int	cr0;
+	unsigned int	cr1;
+};
+
+int ssp_write_word(u16 data);
+int ssp_read_word(u16 *data);
+int ssp_flush(void);
+void ssp_enable(void);
+void ssp_disable(void);
+void ssp_save_state(struct ssp_state *ssp);
+void ssp_restore_state(struct ssp_state *ssp);
+int ssp_init(void);
+void ssp_exit(void);
+
+#endif
diff --git a/arch/arm/include/asm/hardware/uengine.h b/arch/arm/include/asm/hardware/uengine.h
new file mode 100644
index 0000000..b442d65
--- /dev/null
+++ b/arch/arm/include/asm/hardware/uengine.h
@@ -0,0 +1,62 @@
+/*
+ * Generic library functions for the microengines found on the Intel
+ * IXP2000 series of network processors.
+ *
+ * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Dedicated to Marija Kulikova.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of the
+ * License, or (at your option) any later version.
+ */
+
+#ifndef __IXP2000_UENGINE_H
+#define __IXP2000_UENGINE_H
+
+extern u32 ixp2000_uengine_mask;
+
+struct ixp2000_uengine_code
+{
+	u32	cpu_model_bitmask;
+	u8	cpu_min_revision;
+	u8	cpu_max_revision;
+
+	u32	uengine_parameters;
+
+	struct ixp2000_reg_value {
+		int	reg;
+		u32	value;
+	} *initial_reg_values;
+
+	int	num_insns;
+	u8	*insns;
+};
+
+u32 ixp2000_uengine_csr_read(int uengine, int offset);
+void ixp2000_uengine_csr_write(int uengine, int offset, u32 value);
+void ixp2000_uengine_reset(u32 uengine_mask);
+void ixp2000_uengine_set_mode(int uengine, u32 mode);
+void ixp2000_uengine_load_microcode(int uengine, u8 *ucode, int insns);
+void ixp2000_uengine_init_context(int uengine, int context, int pc);
+void ixp2000_uengine_start_contexts(int uengine, u8 ctx_mask);
+void ixp2000_uengine_stop_contexts(int uengine, u8 ctx_mask);
+int ixp2000_uengine_load(int uengine, struct ixp2000_uengine_code *c);
+
+#define IXP2000_UENGINE_8_CONTEXTS		0x00000000
+#define IXP2000_UENGINE_4_CONTEXTS		0x80000000
+#define IXP2000_UENGINE_PRN_UPDATE_EVERY	0x40000000
+#define IXP2000_UENGINE_PRN_UPDATE_ON_ACCESS	0x00000000
+#define IXP2000_UENGINE_NN_FROM_SELF		0x00100000
+#define IXP2000_UENGINE_NN_FROM_PREVIOUS	0x00000000
+#define IXP2000_UENGINE_ASSERT_EMPTY_AT_3	0x000c0000
+#define IXP2000_UENGINE_ASSERT_EMPTY_AT_2	0x00080000
+#define IXP2000_UENGINE_ASSERT_EMPTY_AT_1	0x00040000
+#define IXP2000_UENGINE_ASSERT_EMPTY_AT_0	0x00000000
+#define IXP2000_UENGINE_LM_ADDR1_GLOBAL		0x00020000
+#define IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT	0x00000000
+#define IXP2000_UENGINE_LM_ADDR0_GLOBAL		0x00010000
+#define IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT	0x00000000
+
+
+#endif
diff --git a/arch/arm/include/asm/hardware/vic.h b/arch/arm/include/asm/hardware/vic.h
new file mode 100644
index 0000000..263f2c3
--- /dev/null
+++ b/arch/arm/include/asm/hardware/vic.h
@@ -0,0 +1,45 @@
+/*
+ *  arch/arm/include/asm/hardware/vic.h
+ *
+ *  Copyright (c) ARM Limited 2003.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef __ASM_ARM_HARDWARE_VIC_H
+#define __ASM_ARM_HARDWARE_VIC_H
+
+#define VIC_IRQ_STATUS			0x00
+#define VIC_FIQ_STATUS			0x04
+#define VIC_RAW_STATUS			0x08
+#define VIC_INT_SELECT			0x0c	/* 1 = FIQ, 0 = IRQ */
+#define VIC_INT_ENABLE			0x10	/* 1 = enable, 0 = disable */
+#define VIC_INT_ENABLE_CLEAR		0x14
+#define VIC_INT_SOFT			0x18
+#define VIC_INT_SOFT_CLEAR		0x1c
+#define VIC_PROTECT			0x20
+#define VIC_VECT_ADDR			0x30
+#define VIC_DEF_VECT_ADDR		0x34
+
+#define VIC_VECT_ADDR0			0x100	/* 0 to 15 */
+#define VIC_VECT_CNTL0			0x200	/* 0 to 15 */
+#define VIC_ITCR			0x300	/* VIC test control register */
+
+#define VIC_VECT_CNTL_ENABLE		(1 << 5)
+
+#ifndef __ASSEMBLY__
+void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources);
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h
new file mode 100644
index 0000000..f1a08a5
--- /dev/null
+++ b/arch/arm/include/asm/hw_irq.h
@@ -0,0 +1,9 @@
+/*
+ * Nothing to see here yet
+ */
+#ifndef _ARCH_ARM_HW_IRQ_H
+#define _ARCH_ARM_HW_IRQ_H
+
+#include <asm/mach/irq.h>
+
+#endif
diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h
new file mode 100644
index 0000000..81f4c89
--- /dev/null
+++ b/arch/arm/include/asm/hwcap.h
@@ -0,0 +1,29 @@
+#ifndef __ASMARM_HWCAP_H
+#define __ASMARM_HWCAP_H
+
+/*
+ * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP
+ */
+#define HWCAP_SWP	1
+#define HWCAP_HALF	2
+#define HWCAP_THUMB	4
+#define HWCAP_26BIT	8	/* Play it safe */
+#define HWCAP_FAST_MULT	16
+#define HWCAP_FPA	32
+#define HWCAP_VFP	64
+#define HWCAP_EDSP	128
+#define HWCAP_JAVA	256
+#define HWCAP_IWMMXT	512
+#define HWCAP_CRUNCH	1024
+#define HWCAP_THUMBEE	2048
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports.
+ */
+#define ELF_HWCAP	(elf_hwcap)
+extern unsigned int elf_hwcap;
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/ide.h b/arch/arm/include/asm/ide.h
new file mode 100644
index 0000000..b507ce8
--- /dev/null
+++ b/arch/arm/include/asm/ide.h
@@ -0,0 +1,23 @@
+/*
+ *  arch/arm/include/asm/ide.h
+ *
+ *  Copyright (C) 1994-1996  Linus Torvalds & authors
+ */
+
+/*
+ *  This file contains the ARM architecture specific IDE code.
+ */
+
+#ifndef __ASMARM_IDE_H
+#define __ASMARM_IDE_H
+
+#ifdef __KERNEL__
+
+#define __ide_mm_insw(port,addr,len)	readsw(port,addr,len)
+#define __ide_mm_insl(port,addr,len)	readsl(port,addr,len)
+#define __ide_mm_outsw(port,addr,len)	writesw(port,addr,len)
+#define __ide_mm_outsl(port,addr,len)	writesl(port,addr,len)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASMARM_IDE_H */
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
new file mode 100644
index 0000000..ffe07c0
--- /dev/null
+++ b/arch/arm/include/asm/io.h
@@ -0,0 +1,287 @@
+/*
+ *  arch/arm/include/asm/io.h
+ *
+ *  Copyright (C) 1996-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Modifications:
+ *  16-Sep-1996	RMK	Inlined the inx/outx functions & optimised for both
+ *			constant addresses and variable addresses.
+ *  04-Dec-1997	RMK	Moved a lot of this stuff to the new architecture
+ *			specific IO header files.
+ *  27-Mar-1999	PJB	Second parameter of memcpy_toio is const..
+ *  04-Apr-1999	PJB	Added check_signature.
+ *  12-Dec-1999	RMK	More cleanups
+ *  18-Jun-2000 RMK	Removed virt_to_* and friends definitions
+ *  05-Oct-2004 BJD     Moved memory string functions to use void __iomem
+ */
+#ifndef __ASM_ARM_IO_H
+#define __ASM_ARM_IO_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/memory.h>
+
+/*
+ * ISA I/O bus memory addresses are 1:1 with the physical address.
+ */
+#define isa_virt_to_bus virt_to_phys
+#define isa_page_to_bus page_to_phys
+#define isa_bus_to_virt phys_to_virt
+
+/*
+ * Generic IO read/write.  These perform native-endian accesses.  Note
+ * that some architectures will want to re-define __raw_{read,write}w.
+ */
+extern void __raw_writesb(void __iomem *addr, const void *data, int bytelen);
+extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen);
+extern void __raw_writesl(void __iomem *addr, const void *data, int longlen);
+
+extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
+extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
+extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
+
+#define __raw_writeb(v,a)	(__chk_io_ptr(a), *(volatile unsigned char __force  *)(a) = (v))
+#define __raw_writew(v,a)	(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v))
+#define __raw_writel(v,a)	(__chk_io_ptr(a), *(volatile unsigned int __force   *)(a) = (v))
+
+#define __raw_readb(a)		(__chk_io_ptr(a), *(volatile unsigned char __force  *)(a))
+#define __raw_readw(a)		(__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
+#define __raw_readl(a)		(__chk_io_ptr(a), *(volatile unsigned int __force   *)(a))
+
+/*
+ * Architecture ioremap implementation.
+ */
+#define MT_DEVICE		0
+#define MT_DEVICE_NONSHARED	1
+#define MT_DEVICE_CACHED	2
+#define MT_DEVICE_IXP2000	3
+/*
+ * types 4 onwards can be found in asm/mach/map.h and are undefined
+ * for ioremap
+ */
+
+/*
+ * __arm_ioremap takes CPU physical address.
+ * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page
+ */
+extern void __iomem * __arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
+extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int);
+extern void __iounmap(volatile void __iomem *addr);
+
+/*
+ * Bad read/write accesses...
+ */
+extern void __readwrite_bug(const char *fn);
+
+/*
+ * Now, pick up the machine-defined IO definitions
+ */
+#include <asm/arch/io.h>
+
+/*
+ *  IO port access primitives
+ *  -------------------------
+ *
+ * The ARM doesn't have special IO access instructions; all IO is memory
+ * mapped.  Note that these are defined to perform little endian accesses
+ * only.  Their primary purpose is to access PCI and ISA peripherals.
+ *
+ * Note that for a big endian machine, this implies that the following
+ * big endian mode connectivity is in place, as described by numerous
+ * ARM documents:
+ *
+ *    PCI:  D0-D7   D8-D15 D16-D23 D24-D31
+ *    ARM: D24-D31 D16-D23  D8-D15  D0-D7
+ *
+ * The machine specific io.h include defines __io to translate an "IO"
+ * address to a memory address.
+ *
+ * Note that we prevent GCC re-ordering or caching values in expressions
+ * by introducing sequence points into the in*() definitions.  Note that
+ * __raw_* do not guarantee this behaviour.
+ *
+ * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
+ */
+#ifdef __io
+#define outb(v,p)		__raw_writeb(v,__io(p))
+#define outw(v,p)		__raw_writew((__force __u16) \
+					cpu_to_le16(v),__io(p))
+#define outl(v,p)		__raw_writel((__force __u32) \
+					cpu_to_le32(v),__io(p))
+
+#define inb(p)	({ __u8 __v = __raw_readb(__io(p)); __v; })
+#define inw(p)	({ __u16 __v = le16_to_cpu((__force __le16) \
+			__raw_readw(__io(p))); __v; })
+#define inl(p)	({ __u32 __v = le32_to_cpu((__force __le32) \
+			__raw_readl(__io(p))); __v; })
+
+#define outsb(p,d,l)		__raw_writesb(__io(p),d,l)
+#define outsw(p,d,l)		__raw_writesw(__io(p),d,l)
+#define outsl(p,d,l)		__raw_writesl(__io(p),d,l)
+
+#define insb(p,d,l)		__raw_readsb(__io(p),d,l)
+#define insw(p,d,l)		__raw_readsw(__io(p),d,l)
+#define insl(p,d,l)		__raw_readsl(__io(p),d,l)
+#endif
+
+#define outb_p(val,port)	outb((val),(port))
+#define outw_p(val,port)	outw((val),(port))
+#define outl_p(val,port)	outl((val),(port))
+#define inb_p(port)		inb((port))
+#define inw_p(port)		inw((port))
+#define inl_p(port)		inl((port))
+
+#define outsb_p(port,from,len)	outsb(port,from,len)
+#define outsw_p(port,from,len)	outsw(port,from,len)
+#define outsl_p(port,from,len)	outsl(port,from,len)
+#define insb_p(port,to,len)	insb(port,to,len)
+#define insw_p(port,to,len)	insw(port,to,len)
+#define insl_p(port,to,len)	insl(port,to,len)
+
+/*
+ * String version of IO memory access ops:
+ */
+extern void _memcpy_fromio(void *, const volatile void __iomem *, size_t);
+extern void _memcpy_toio(volatile void __iomem *, const void *, size_t);
+extern void _memset_io(volatile void __iomem *, int, size_t);
+
+#define mmiowb()
+
+/*
+ *  Memory access primitives
+ *  ------------------------
+ *
+ * These perform PCI memory accesses via an ioremap region.  They don't
+ * take an address as such, but a cookie.
+ *
+ * Again, this are defined to perform little endian accesses.  See the
+ * IO port primitives for more information.
+ */
+#ifdef __mem_pci
+#define readb(c) ({ __u8  __v = __raw_readb(__mem_pci(c)); __v; })
+#define readw(c) ({ __u16 __v = le16_to_cpu((__force __le16) \
+					__raw_readw(__mem_pci(c))); __v; })
+#define readl(c) ({ __u32 __v = le32_to_cpu((__force __le32) \
+					__raw_readl(__mem_pci(c))); __v; })
+#define readb_relaxed(addr) readb(addr)
+#define readw_relaxed(addr) readw(addr)
+#define readl_relaxed(addr) readl(addr)
+
+#define readsb(p,d,l)		__raw_readsb(__mem_pci(p),d,l)
+#define readsw(p,d,l)		__raw_readsw(__mem_pci(p),d,l)
+#define readsl(p,d,l)		__raw_readsl(__mem_pci(p),d,l)
+
+#define writeb(v,c)		__raw_writeb(v,__mem_pci(c))
+#define writew(v,c)		__raw_writew((__force __u16) \
+					cpu_to_le16(v),__mem_pci(c))
+#define writel(v,c)		__raw_writel((__force __u32) \
+					cpu_to_le32(v),__mem_pci(c))
+
+#define writesb(p,d,l)		__raw_writesb(__mem_pci(p),d,l)
+#define writesw(p,d,l)		__raw_writesw(__mem_pci(p),d,l)
+#define writesl(p,d,l)		__raw_writesl(__mem_pci(p),d,l)
+
+#define memset_io(c,v,l)	_memset_io(__mem_pci(c),(v),(l))
+#define memcpy_fromio(a,c,l)	_memcpy_fromio((a),__mem_pci(c),(l))
+#define memcpy_toio(c,a,l)	_memcpy_toio(__mem_pci(c),(a),(l))
+
+#elif !defined(readb)
+
+#define readb(c)			(__readwrite_bug("readb"),0)
+#define readw(c)			(__readwrite_bug("readw"),0)
+#define readl(c)			(__readwrite_bug("readl"),0)
+#define writeb(v,c)			__readwrite_bug("writeb")
+#define writew(v,c)			__readwrite_bug("writew")
+#define writel(v,c)			__readwrite_bug("writel")
+
+#define check_signature(io,sig,len)	(0)
+
+#endif	/* __mem_pci */
+
+/*
+ * ioremap and friends.
+ *
+ * ioremap takes a PCI memory address, as specified in
+ * Documentation/IO-mapping.txt.
+ *
+ */
+#ifndef __arch_ioremap
+#define ioremap(cookie,size)		__arm_ioremap(cookie, size, MT_DEVICE)
+#define ioremap_nocache(cookie,size)	__arm_ioremap(cookie, size, MT_DEVICE)
+#define ioremap_cached(cookie,size)	__arm_ioremap(cookie, size, MT_DEVICE_CACHED)
+#define iounmap(cookie)			__iounmap(cookie)
+#else
+#define ioremap(cookie,size)		__arch_ioremap((cookie), (size), MT_DEVICE)
+#define ioremap_nocache(cookie,size)	__arch_ioremap((cookie), (size), MT_DEVICE)
+#define ioremap_cached(cookie,size)	__arch_ioremap((cookie), (size), MT_DEVICE_CACHED)
+#define iounmap(cookie)			__arch_iounmap(cookie)
+#endif
+
+/*
+ * io{read,write}{8,16,32} macros
+ */
+#ifndef ioread8
+#define ioread8(p)	({ unsigned int __v = __raw_readb(p); __v; })
+#define ioread16(p)	({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __v; })
+#define ioread32(p)	({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __v; })
+
+#define iowrite8(v,p)	__raw_writeb(v, p)
+#define iowrite16(v,p)	__raw_writew((__force __u16)cpu_to_le16(v), p)
+#define iowrite32(v,p)	__raw_writel((__force __u32)cpu_to_le32(v), p)
+
+#define ioread8_rep(p,d,c)	__raw_readsb(p,d,c)
+#define ioread16_rep(p,d,c)	__raw_readsw(p,d,c)
+#define ioread32_rep(p,d,c)	__raw_readsl(p,d,c)
+
+#define iowrite8_rep(p,s,c)	__raw_writesb(p,s,c)
+#define iowrite16_rep(p,s,c)	__raw_writesw(p,s,c)
+#define iowrite32_rep(p,s,c)	__raw_writesl(p,s,c)
+
+extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
+extern void ioport_unmap(void __iomem *addr);
+#endif
+
+struct pci_dev;
+
+extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
+
+/*
+ * can the hardware map this into one segment or not, given no other
+ * constraints.
+ */
+#define BIOVEC_MERGEABLE(vec1, vec2)	\
+	((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
+
+#ifdef CONFIG_MMU
+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+extern int valid_phys_addr_range(unsigned long addr, size_t size);
+extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
+#endif
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p)	__va(p)
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p)	p
+
+/*
+ * Register ISA memory and port locations for glibc iopl/inb/outb
+ * emulation.
+ */
+extern void register_isa_ports(unsigned int mmio, unsigned int io,
+			       unsigned int io_shift);
+
+#endif	/* __KERNEL__ */
+#endif	/* __ASM_ARM_IO_H */
diff --git a/arch/arm/include/asm/ioctl.h b/arch/arm/include/asm/ioctl.h
new file mode 100644
index 0000000..b279fe0
--- /dev/null
+++ b/arch/arm/include/asm/ioctl.h
@@ -0,0 +1 @@
+#include <asm-generic/ioctl.h>
diff --git a/arch/arm/include/asm/ioctls.h b/arch/arm/include/asm/ioctls.h
new file mode 100644
index 0000000..a91d8a1
--- /dev/null
+++ b/arch/arm/include/asm/ioctls.h
@@ -0,0 +1,84 @@
+#ifndef __ASM_ARM_IOCTLS_H
+#define __ASM_ARM_IOCTLS_H
+
+#include <asm/ioctl.h>
+
+/* 0x54 is just a magic number to make these relatively unique ('T') */
+
+#define TCGETS		0x5401
+#define TCSETS		0x5402
+#define TCSETSW		0x5403
+#define TCSETSF		0x5404
+#define TCGETA		0x5405
+#define TCSETA		0x5406
+#define TCSETAW		0x5407
+#define TCSETAF		0x5408
+#define TCSBRK		0x5409
+#define TCXONC		0x540A
+#define TCFLSH		0x540B
+#define TIOCEXCL	0x540C
+#define TIOCNXCL	0x540D
+#define TIOCSCTTY	0x540E
+#define TIOCGPGRP	0x540F
+#define TIOCSPGRP	0x5410
+#define TIOCOUTQ	0x5411
+#define TIOCSTI		0x5412
+#define TIOCGWINSZ	0x5413
+#define TIOCSWINSZ	0x5414
+#define TIOCMGET	0x5415
+#define TIOCMBIS	0x5416
+#define TIOCMBIC	0x5417
+#define TIOCMSET	0x5418
+#define TIOCGSOFTCAR	0x5419
+#define TIOCSSOFTCAR	0x541A
+#define FIONREAD	0x541B
+#define TIOCINQ		FIONREAD
+#define TIOCLINUX	0x541C
+#define TIOCCONS	0x541D
+#define TIOCGSERIAL	0x541E
+#define TIOCSSERIAL	0x541F
+#define TIOCPKT		0x5420
+#define FIONBIO		0x5421
+#define TIOCNOTTY	0x5422
+#define TIOCSETD	0x5423
+#define TIOCGETD	0x5424
+#define TCSBRKP		0x5425	/* Needed for POSIX tcsendbreak() */
+#define TIOCSBRK	0x5427  /* BSD compatibility */
+#define TIOCCBRK	0x5428  /* BSD compatibility */
+#define TIOCGSID	0x5429  /* Return the session ID of FD */
+#define TCGETS2		_IOR('T',0x2A, struct termios2)
+#define TCSETS2		_IOW('T',0x2B, struct termios2)
+#define TCSETSW2	_IOW('T',0x2C, struct termios2)
+#define TCSETSF2	_IOW('T',0x2D, struct termios2)
+#define TIOCGPTN	_IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK	_IOW('T',0x31, int)  /* Lock/unlock Pty */
+
+#define FIONCLEX	0x5450  /* these numbers need to be adjusted. */
+#define FIOCLEX		0x5451
+#define FIOASYNC	0x5452
+#define TIOCSERCONFIG	0x5453
+#define TIOCSERGWILD	0x5454
+#define TIOCSERSWILD	0x5455
+#define TIOCGLCKTRMIOS	0x5456
+#define TIOCSLCKTRMIOS	0x5457
+#define TIOCSERGSTRUCT	0x5458 /* For debugging only */
+#define TIOCSERGETLSR   0x5459 /* Get line status register */
+#define TIOCSERGETMULTI 0x545A /* Get multiport config  */
+#define TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TIOCMIWAIT	0x545C	/* wait for a change on serial input line(s) */
+#define TIOCGICOUNT	0x545D	/* read serial port inline interrupt counts */
+#define FIOQSIZE	0x545E
+
+/* Used for packet mode */
+#define TIOCPKT_DATA		 0
+#define TIOCPKT_FLUSHREAD	 1
+#define TIOCPKT_FLUSHWRITE	 2
+#define TIOCPKT_STOP		 4
+#define TIOCPKT_START		 8
+#define TIOCPKT_NOSTOP		16
+#define TIOCPKT_DOSTOP		32
+
+#define TIOCSER_TEMT	0x01	/* Transmitter physically empty */
+
+#endif
diff --git a/arch/arm/include/asm/ipcbuf.h b/arch/arm/include/asm/ipcbuf.h
new file mode 100644
index 0000000..9768397
--- /dev/null
+++ b/arch/arm/include/asm/ipcbuf.h
@@ -0,0 +1,29 @@
+#ifndef __ASMARM_IPCBUF_H
+#define __ASMARM_IPCBUF_H
+
+/*
+ * The ipc64_perm structure for arm architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 32-bit mode_t and seq
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct ipc64_perm
+{
+	__kernel_key_t		key;
+	__kernel_uid32_t	uid;
+	__kernel_gid32_t	gid;
+	__kernel_uid32_t	cuid;
+	__kernel_gid32_t	cgid;
+	__kernel_mode_t		mode;
+	unsigned short		__pad1;
+	unsigned short		seq;
+	unsigned short		__pad2;
+	unsigned long		__unused1;
+	unsigned long		__unused2;
+};
+
+#endif /* __ASMARM_IPCBUF_H */
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
new file mode 100644
index 0000000..9cb0190
--- /dev/null
+++ b/arch/arm/include/asm/irq.h
@@ -0,0 +1,28 @@
+#ifndef __ASM_ARM_IRQ_H
+#define __ASM_ARM_IRQ_H
+
+#include <asm/arch/irqs.h>
+
+#ifndef irq_canonicalize
+#define irq_canonicalize(i)	(i)
+#endif
+
+#ifndef NR_IRQS
+#define NR_IRQS	128
+#endif
+
+/*
+ * Use this value to indicate lack of interrupt
+ * capability
+ */
+#ifndef NO_IRQ
+#define NO_IRQ	((unsigned int)(-1))
+#endif
+
+#ifndef __ASSEMBLY__
+struct irqaction;
+extern void migrate_irqs(void);
+#endif
+
+#endif
+
diff --git a/arch/arm/include/asm/irq_regs.h b/arch/arm/include/asm/irq_regs.h
new file mode 100644
index 0000000..3dd9c0b
--- /dev/null
+++ b/arch/arm/include/asm/irq_regs.h
@@ -0,0 +1 @@
+#include <asm-generic/irq_regs.h>
diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h
new file mode 100644
index 0000000..6d09974
--- /dev/null
+++ b/arch/arm/include/asm/irqflags.h
@@ -0,0 +1,132 @@
+#ifndef __ASM_ARM_IRQFLAGS_H
+#define __ASM_ARM_IRQFLAGS_H
+
+#ifdef __KERNEL__
+
+#include <asm/ptrace.h>
+
+/*
+ * CPU interrupt mask handling.
+ */
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define raw_local_irq_save(x)					\
+	({							\
+	__asm__ __volatile__(					\
+	"mrs	%0, cpsr		@ local_irq_save\n"	\
+	"cpsid	i"						\
+	: "=r" (x) : : "memory", "cc");				\
+	})
+
+#define raw_local_irq_enable()  __asm__("cpsie i	@ __sti" : : : "memory", "cc")
+#define raw_local_irq_disable() __asm__("cpsid i	@ __cli" : : : "memory", "cc")
+#define local_fiq_enable()  __asm__("cpsie f	@ __stf" : : : "memory", "cc")
+#define local_fiq_disable() __asm__("cpsid f	@ __clf" : : : "memory", "cc")
+
+#else
+
+/*
+ * Save the current interrupt enable state & disable IRQs
+ */
+#define raw_local_irq_save(x)					\
+	({							\
+		unsigned long temp;				\
+		(void) (&temp == &x);				\
+	__asm__ __volatile__(					\
+	"mrs	%0, cpsr		@ local_irq_save\n"	\
+"	orr	%1, %0, #128\n"					\
+"	msr	cpsr_c, %1"					\
+	: "=r" (x), "=r" (temp)					\
+	:							\
+	: "memory", "cc");					\
+	})
+	
+/*
+ * Enable IRQs
+ */
+#define raw_local_irq_enable()					\
+	({							\
+		unsigned long temp;				\
+	__asm__ __volatile__(					\
+	"mrs	%0, cpsr		@ local_irq_enable\n"	\
+"	bic	%0, %0, #128\n"					\
+"	msr	cpsr_c, %0"					\
+	: "=r" (temp)						\
+	:							\
+	: "memory", "cc");					\
+	})
+
+/*
+ * Disable IRQs
+ */
+#define raw_local_irq_disable()					\
+	({							\
+		unsigned long temp;				\
+	__asm__ __volatile__(					\
+	"mrs	%0, cpsr		@ local_irq_disable\n"	\
+"	orr	%0, %0, #128\n"					\
+"	msr	cpsr_c, %0"					\
+	: "=r" (temp)						\
+	:							\
+	: "memory", "cc");					\
+	})
+
+/*
+ * Enable FIQs
+ */
+#define local_fiq_enable()					\
+	({							\
+		unsigned long temp;				\
+	__asm__ __volatile__(					\
+	"mrs	%0, cpsr		@ stf\n"		\
+"	bic	%0, %0, #64\n"					\
+"	msr	cpsr_c, %0"					\
+	: "=r" (temp)						\
+	:							\
+	: "memory", "cc");					\
+	})
+
+/*
+ * Disable FIQs
+ */
+#define local_fiq_disable()					\
+	({							\
+		unsigned long temp;				\
+	__asm__ __volatile__(					\
+	"mrs	%0, cpsr		@ clf\n"		\
+"	orr	%0, %0, #64\n"					\
+"	msr	cpsr_c, %0"					\
+	: "=r" (temp)						\
+	:							\
+	: "memory", "cc");					\
+	})
+
+#endif
+
+/*
+ * Save the current interrupt enable state.
+ */
+#define raw_local_save_flags(x)					\
+	({							\
+	__asm__ __volatile__(					\
+	"mrs	%0, cpsr		@ local_save_flags"	\
+	: "=r" (x) : : "memory", "cc");				\
+	})
+
+/*
+ * restore saved IRQ & FIQ state
+ */
+#define raw_local_irq_restore(x)				\
+	__asm__ __volatile__(					\
+	"msr	cpsr_c, %0		@ local_irq_restore\n"	\
+	:							\
+	: "r" (x)						\
+	: "memory", "cc")
+
+#define raw_irqs_disabled_flags(flags)	\
+({					\
+	(int)((flags) & PSR_I_BIT);	\
+})
+
+#endif
+#endif
diff --git a/arch/arm/include/asm/kdebug.h b/arch/arm/include/asm/kdebug.h
new file mode 100644
index 0000000..6ece1b0
--- /dev/null
+++ b/arch/arm/include/asm/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h
new file mode 100644
index 0000000..c8986bb
--- /dev/null
+++ b/arch/arm/include/asm/kexec.h
@@ -0,0 +1,31 @@
+#ifndef _ARM_KEXEC_H
+#define _ARM_KEXEC_H
+
+#ifdef CONFIG_KEXEC
+
+/* Maximum physical address we can use pages from */
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+/* Maximum address we can reach in physical address mode */
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+/* Maximum address we can use for the control code buffer */
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
+
+#define KEXEC_CONTROL_CODE_SIZE	4096
+
+#define KEXEC_ARCH KEXEC_ARCH_ARM
+
+#define KEXEC_ARM_ATAGS_OFFSET  0x1000
+#define KEXEC_ARM_ZIMAGE_OFFSET 0x8000
+
+#ifndef __ASSEMBLY__
+
+struct kimage;
+/* Provide a dummy definition to avoid build failures. */
+static inline void crash_setup_regs(struct pt_regs *newregs,
+                                        struct pt_regs *oldregs) { }
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* CONFIG_KEXEC */
+
+#endif /* _ARM_KEXEC_H */
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
new file mode 100644
index 0000000..67af4b8
--- /dev/null
+++ b/arch/arm/include/asm/kgdb.h
@@ -0,0 +1,104 @@
+/*
+ * ARM KGDB support
+ *
+ * Author: Deepak Saxena <dsaxena@mvista.com>
+ *
+ * Copyright (C) 2002 MontaVista Software Inc.
+ *
+ */
+
+#ifndef __ARM_KGDB_H__
+#define __ARM_KGDB_H__
+
+#include <linux/ptrace.h>
+
+/*
+ * GDB assumes that we're a user process being debugged, so
+ * it will send us an SWI command to write into memory as the
+ * debug trap. When an SWI occurs, the next instruction addr is
+ * placed into R14_svc before jumping to the vector trap.
+ * This doesn't work for kernel debugging as we are already in SVC
+ * we would loose the kernel's LR, which is a bad thing. This
+ * is  bad thing.
+ *
+ * By doing this as an undefined instruction trap, we force a mode
+ * switch from SVC to UND mode, allowing us to save full kernel state.
+ *
+ * We also define a KGDB_COMPILED_BREAK which can be used to compile
+ * in breakpoints. This is important for things like sysrq-G and for
+ * the initial breakpoint from trap_init().
+ *
+ * Note to ARM HW designers: Add real trap support like SH && PPC to
+ * make our lives much much simpler. :)
+ */
+#define BREAK_INSTR_SIZE	4
+#define GDB_BREAKINST		0xef9f0001
+#define KGDB_BREAKINST		0xe7ffdefe
+#define KGDB_COMPILED_BREAK	0xe7ffdeff
+#define CACHE_FLUSH_IS_SAFE	1
+
+#ifndef	__ASSEMBLY__
+
+static inline void arch_kgdb_breakpoint(void)
+{
+	asm(".word 0xe7ffdeff");
+}
+
+extern void kgdb_handle_bus_error(void);
+extern int kgdb_fault_expected;
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * From Kevin Hilman:
+ *
+ * gdb is expecting the following registers layout.
+ *
+ * r0-r15: 1 long word each
+ * f0-f7:  unused, 3 long words each !!
+ * fps:    unused, 1 long word
+ * cpsr:   1 long word
+ *
+ * Even though f0-f7 and fps are not used, they need to be
+ * present in the registers sent for correct processing in
+ * the host-side gdb.
+ *
+ * In particular, it is crucial that CPSR is in the right place,
+ * otherwise gdb will not be able to correctly interpret stepping over
+ * conditional branches.
+ */
+#define _GP_REGS		16
+#define _FP_REGS		8
+#define _EXTRA_REGS		2
+#define GDB_MAX_REGS		(_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS)
+
+#define KGDB_MAX_NO_CPUS	1
+#define BUFMAX			400
+#define NUMREGBYTES		(GDB_MAX_REGS << 2)
+#define NUMCRITREGBYTES		(32 << 2)
+
+#define _R0			0
+#define _R1			1
+#define _R2			2
+#define _R3			3
+#define _R4			4
+#define _R5			5
+#define _R6			6
+#define _R7			7
+#define _R8			8
+#define _R9			9
+#define _R10			10
+#define _FP			11
+#define _IP			12
+#define _SPT			13
+#define _LR			14
+#define _PC			15
+#define _CPSR			(GDB_MAX_REGS - 1)
+
+/*
+ * So that we can denote the end of a frame for tracing,
+ * in the simple case:
+ */
+#define CFI_END_FRAME(func)	__CFI_END_FRAME(_PC, _SPT, func)
+
+#endif /* __ASM_KGDB_H__ */
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
new file mode 100644
index 0000000..45def13
--- /dev/null
+++ b/arch/arm/include/asm/kmap_types.h
@@ -0,0 +1,24 @@
+#ifndef __ARM_KMAP_TYPES_H
+#define __ARM_KMAP_TYPES_H
+
+/*
+ * This is the "bare minimum".  AIO seems to require this.
+ */
+enum km_type {
+	KM_BOUNCE_READ,
+	KM_SKB_SUNRPC_DATA,
+	KM_SKB_DATA_SOFTIRQ,
+	KM_USER0,
+	KM_USER1,
+	KM_BIO_SRC_IRQ,
+	KM_BIO_DST_IRQ,
+	KM_PTE0,
+	KM_PTE1,
+	KM_IRQ0,
+	KM_IRQ1,
+	KM_SOFTIRQ0,
+	KM_SOFTIRQ1,
+	KM_TYPE_NR
+};
+
+#endif
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
new file mode 100644
index 0000000..a5d0d99
--- /dev/null
+++ b/arch/arm/include/asm/kprobes.h
@@ -0,0 +1,79 @@
+/*
+ * arch/arm/include/asm/kprobes.h
+ *
+ * Copyright (C) 2006, 2007 Motorola Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _ARM_KPROBES_H
+#define _ARM_KPROBES_H
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define MAX_INSN_SIZE			2
+#define MAX_STACK_SIZE			64	/* 32 would probably be OK */
+
+/*
+ * This undefined instruction must be unique and
+ * reserved solely for kprobes' use.
+ */
+#define KPROBE_BREAKPOINT_INSTRUCTION	0xe7f001f8
+
+#define regs_return_value(regs)		((regs)->ARM_r0)
+#define flush_insn_slot(p)		do { } while (0)
+#define kretprobe_blacklist_size	0
+
+typedef u32 kprobe_opcode_t;
+
+struct kprobe;
+typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *);
+
+/* Architecture specific copy of original instruction. */
+struct arch_specific_insn {
+	kprobe_opcode_t		*insn;
+	kprobe_insn_handler_t	*insn_handler;
+};
+
+struct prev_kprobe {
+	struct kprobe *kp;
+	unsigned int status;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+	unsigned int kprobe_status;
+	struct prev_kprobe prev_kprobe;
+	struct pt_regs jprobe_saved_regs;
+	char jprobes_stack[MAX_STACK_SIZE];
+};
+
+void arch_remove_kprobe(struct kprobe *);
+void kretprobe_trampoline(void);
+
+int kprobe_trap_handler(struct pt_regs *regs, unsigned int instr);
+int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
+int kprobe_exceptions_notify(struct notifier_block *self,
+			     unsigned long val, void *data);
+
+enum kprobe_insn {
+	INSN_REJECTED,
+	INSN_GOOD,
+	INSN_GOOD_NO_SLOT
+};
+
+enum kprobe_insn arm_kprobe_decode_insn(kprobe_opcode_t,
+					struct arch_specific_insn *);
+void __init arm_kprobe_decode_init(void);
+
+#endif /* _ARM_KPROBES_H */
diff --git a/arch/arm/include/asm/leds.h b/arch/arm/include/asm/leds.h
new file mode 100644
index 0000000..c545739
--- /dev/null
+++ b/arch/arm/include/asm/leds.h
@@ -0,0 +1,50 @@
+/*
+ *  arch/arm/include/asm/leds.h
+ *
+ *  Copyright (C) 1998 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Event-driven interface for LEDs on machines
+ *  Added led_start and led_stop- Alex Holden, 28th Dec 1998.
+ */
+#ifndef ASM_ARM_LEDS_H
+#define ASM_ARM_LEDS_H
+
+
+typedef enum {
+	led_idle_start,
+	led_idle_end,
+	led_timer,
+	led_start,
+	led_stop,
+	led_claim,		/* override idle & timer leds */
+	led_release,		/* restore idle & timer leds */
+	led_start_timer_mode,
+	led_stop_timer_mode,
+	led_green_on,
+	led_green_off,
+	led_amber_on,
+	led_amber_off,
+	led_red_on,
+	led_red_off,
+	led_blue_on,
+	led_blue_off,
+	/*
+	 * I want this between led_timer and led_start, but
+	 * someone has decided to export this to user space
+	 */
+	led_halted
+} led_event_t;
+
+/* Use this routine to handle LEDs */
+
+#ifdef CONFIG_LEDS
+extern void (*leds_event)(led_event_t);
+#else
+#define leds_event(e)
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/limits.h b/arch/arm/include/asm/limits.h
new file mode 100644
index 0000000..08d8c66
--- /dev/null
+++ b/arch/arm/include/asm/limits.h
@@ -0,0 +1,11 @@
+#ifndef __ASM_PIPE_H
+#define __ASM_PIPE_H
+
+#ifndef PAGE_SIZE
+#include <asm/page.h>
+#endif
+
+#define PIPE_BUF	PAGE_SIZE
+
+#endif
+
diff --git a/arch/arm/include/asm/linkage.h b/arch/arm/include/asm/linkage.h
new file mode 100644
index 0000000..5a25632
--- /dev/null
+++ b/arch/arm/include/asm/linkage.h
@@ -0,0 +1,11 @@
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define __ALIGN .align 0
+#define __ALIGN_STR ".align 0"
+
+#define ENDPROC(name) \
+  .type name, %function; \
+  END(name)
+
+#endif
diff --git a/arch/arm/include/asm/local.h b/arch/arm/include/asm/local.h
new file mode 100644
index 0000000..c11c530
--- /dev/null
+++ b/arch/arm/include/asm/local.h
@@ -0,0 +1 @@
+#include <asm-generic/local.h>
diff --git a/arch/arm/include/asm/locks.h b/arch/arm/include/asm/locks.h
new file mode 100644
index 0000000..ef4c897
--- /dev/null
+++ b/arch/arm/include/asm/locks.h
@@ -0,0 +1,274 @@
+/*
+ *  arch/arm/include/asm/locks.h
+ *
+ *  Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Interrupt safe locking assembler. 
+ */
+#ifndef __ASM_PROC_LOCKS_H
+#define __ASM_PROC_LOCKS_H
+
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define __down_op(ptr,fail)			\
+	({					\
+	__asm__ __volatile__(			\
+	"@ down_op\n"				\
+"1:	ldrex	lr, [%0]\n"			\
+"	sub	lr, lr, %1\n"			\
+"	strex	ip, lr, [%0]\n"			\
+"	teq	ip, #0\n"			\
+"	bne	1b\n"				\
+"	teq	lr, #0\n"			\
+"	movmi	ip, %0\n"			\
+"	blmi	" #fail				\
+	:					\
+	: "r" (ptr), "I" (1)			\
+	: "ip", "lr", "cc");			\
+	smp_mb();				\
+	})
+
+#define __down_op_ret(ptr,fail)			\
+	({					\
+		unsigned int ret;		\
+	__asm__ __volatile__(			\
+	"@ down_op_ret\n"			\
+"1:	ldrex	lr, [%1]\n"			\
+"	sub	lr, lr, %2\n"			\
+"	strex	ip, lr, [%1]\n"			\
+"	teq	ip, #0\n"			\
+"	bne	1b\n"				\
+"	teq	lr, #0\n"			\
+"	movmi	ip, %1\n"			\
+"	movpl	ip, #0\n"			\
+"	blmi	" #fail "\n"			\
+"	mov	%0, ip"				\
+	: "=&r" (ret)				\
+	: "r" (ptr), "I" (1)			\
+	: "ip", "lr", "cc");			\
+	smp_mb();				\
+	ret;					\
+	})
+
+#define __up_op(ptr,wake)			\
+	({					\
+	smp_mb();				\
+	__asm__ __volatile__(			\
+	"@ up_op\n"				\
+"1:	ldrex	lr, [%0]\n"			\
+"	add	lr, lr, %1\n"			\
+"	strex	ip, lr, [%0]\n"			\
+"	teq	ip, #0\n"			\
+"	bne	1b\n"				\
+"	cmp	lr, #0\n"			\
+"	movle	ip, %0\n"			\
+"	blle	" #wake				\
+	:					\
+	: "r" (ptr), "I" (1)			\
+	: "ip", "lr", "cc");			\
+	})
+
+/*
+ * The value 0x01000000 supports up to 128 processors and
+ * lots of processes.  BIAS must be chosen such that sub'ing
+ * BIAS once per CPU will result in the long remaining
+ * negative.
+ */
+#define RW_LOCK_BIAS      0x01000000
+#define RW_LOCK_BIAS_STR "0x01000000"
+
+#define __down_op_write(ptr,fail)		\
+	({					\
+	__asm__ __volatile__(			\
+	"@ down_op_write\n"			\
+"1:	ldrex	lr, [%0]\n"			\
+"	sub	lr, lr, %1\n"			\
+"	strex	ip, lr, [%0]\n"			\
+"	teq	ip, #0\n"			\
+"	bne	1b\n"				\
+"	teq	lr, #0\n"			\
+"	movne	ip, %0\n"			\
+"	blne	" #fail				\
+	:					\
+	: "r" (ptr), "I" (RW_LOCK_BIAS)		\
+	: "ip", "lr", "cc");			\
+	smp_mb();				\
+	})
+
+#define __up_op_write(ptr,wake)			\
+	({					\
+	smp_mb();				\
+	__asm__ __volatile__(			\
+	"@ up_op_write\n"			\
+"1:	ldrex	lr, [%0]\n"			\
+"	adds	lr, lr, %1\n"			\
+"	strex	ip, lr, [%0]\n"			\
+"	teq	ip, #0\n"			\
+"	bne	1b\n"				\
+"	movcs	ip, %0\n"			\
+"	blcs	" #wake				\
+	:					\
+	: "r" (ptr), "I" (RW_LOCK_BIAS)		\
+	: "ip", "lr", "cc");			\
+	})
+
+#define __down_op_read(ptr,fail)		\
+	__down_op(ptr, fail)
+
+#define __up_op_read(ptr,wake)			\
+	({					\
+	smp_mb();				\
+	__asm__ __volatile__(			\
+	"@ up_op_read\n"			\
+"1:	ldrex	lr, [%0]\n"			\
+"	add	lr, lr, %1\n"			\
+"	strex	ip, lr, [%0]\n"			\
+"	teq	ip, #0\n"			\
+"	bne	1b\n"				\
+"	teq	lr, #0\n"			\
+"	moveq	ip, %0\n"			\
+"	bleq	" #wake				\
+	:					\
+	: "r" (ptr), "I" (1)			\
+	: "ip", "lr", "cc");			\
+	})
+
+#else
+
+#define __down_op(ptr,fail)			\
+	({					\
+	__asm__ __volatile__(			\
+	"@ down_op\n"				\
+"	mrs	ip, cpsr\n"			\
+"	orr	lr, ip, #128\n"			\
+"	msr	cpsr_c, lr\n"			\
+"	ldr	lr, [%0]\n"			\
+"	subs	lr, lr, %1\n"			\
+"	str	lr, [%0]\n"			\
+"	msr	cpsr_c, ip\n"			\
+"	movmi	ip, %0\n"			\
+"	blmi	" #fail				\
+	:					\
+	: "r" (ptr), "I" (1)			\
+	: "ip", "lr", "cc");			\
+	smp_mb();				\
+	})
+
+#define __down_op_ret(ptr,fail)			\
+	({					\
+		unsigned int ret;		\
+	__asm__ __volatile__(			\
+	"@ down_op_ret\n"			\
+"	mrs	ip, cpsr\n"			\
+"	orr	lr, ip, #128\n"			\
+"	msr	cpsr_c, lr\n"			\
+"	ldr	lr, [%1]\n"			\
+"	subs	lr, lr, %2\n"			\
+"	str	lr, [%1]\n"			\
+"	msr	cpsr_c, ip\n"			\
+"	movmi	ip, %1\n"			\
+"	movpl	ip, #0\n"			\
+"	blmi	" #fail "\n"			\
+"	mov	%0, ip"				\
+	: "=&r" (ret)				\
+	: "r" (ptr), "I" (1)			\
+	: "ip", "lr", "cc");			\
+	smp_mb();				\
+	ret;					\
+	})
+
+#define __up_op(ptr,wake)			\
+	({					\
+	smp_mb();				\
+	__asm__ __volatile__(			\
+	"@ up_op\n"				\
+"	mrs	ip, cpsr\n"			\
+"	orr	lr, ip, #128\n"			\
+"	msr	cpsr_c, lr\n"			\
+"	ldr	lr, [%0]\n"			\
+"	adds	lr, lr, %1\n"			\
+"	str	lr, [%0]\n"			\
+"	msr	cpsr_c, ip\n"			\
+"	movle	ip, %0\n"			\
+"	blle	" #wake				\
+	:					\
+	: "r" (ptr), "I" (1)			\
+	: "ip", "lr", "cc");			\
+	})
+
+/*
+ * The value 0x01000000 supports up to 128 processors and
+ * lots of processes.  BIAS must be chosen such that sub'ing
+ * BIAS once per CPU will result in the long remaining
+ * negative.
+ */
+#define RW_LOCK_BIAS      0x01000000
+#define RW_LOCK_BIAS_STR "0x01000000"
+
+#define __down_op_write(ptr,fail)		\
+	({					\
+	__asm__ __volatile__(			\
+	"@ down_op_write\n"			\
+"	mrs	ip, cpsr\n"			\
+"	orr	lr, ip, #128\n"			\
+"	msr	cpsr_c, lr\n"			\
+"	ldr	lr, [%0]\n"			\
+"	subs	lr, lr, %1\n"			\
+"	str	lr, [%0]\n"			\
+"	msr	cpsr_c, ip\n"			\
+"	movne	ip, %0\n"			\
+"	blne	" #fail				\
+	:					\
+	: "r" (ptr), "I" (RW_LOCK_BIAS)		\
+	: "ip", "lr", "cc");			\
+	smp_mb();				\
+	})
+
+#define __up_op_write(ptr,wake)			\
+	({					\
+	__asm__ __volatile__(			\
+	"@ up_op_write\n"			\
+"	mrs	ip, cpsr\n"			\
+"	orr	lr, ip, #128\n"			\
+"	msr	cpsr_c, lr\n"			\
+"	ldr	lr, [%0]\n"			\
+"	adds	lr, lr, %1\n"			\
+"	str	lr, [%0]\n"			\
+"	msr	cpsr_c, ip\n"			\
+"	movcs	ip, %0\n"			\
+"	blcs	" #wake				\
+	:					\
+	: "r" (ptr), "I" (RW_LOCK_BIAS)		\
+	: "ip", "lr", "cc");			\
+	smp_mb();				\
+	})
+
+#define __down_op_read(ptr,fail)		\
+	__down_op(ptr, fail)
+
+#define __up_op_read(ptr,wake)			\
+	({					\
+	smp_mb();				\
+	__asm__ __volatile__(			\
+	"@ up_op_read\n"			\
+"	mrs	ip, cpsr\n"			\
+"	orr	lr, ip, #128\n"			\
+"	msr	cpsr_c, lr\n"			\
+"	ldr	lr, [%0]\n"			\
+"	adds	lr, lr, %1\n"			\
+"	str	lr, [%0]\n"			\
+"	msr	cpsr_c, ip\n"			\
+"	moveq	ip, %0\n"			\
+"	bleq	" #wake				\
+	:					\
+	: "r" (ptr), "I" (1)			\
+	: "ip", "lr", "cc");			\
+	})
+
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
new file mode 100644
index 0000000..c59842d
--- /dev/null
+++ b/arch/arm/include/asm/mach/arch.h
@@ -0,0 +1,60 @@
+/*
+ *  arch/arm/include/asm/mach/arch.h
+ *
+ *  Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASSEMBLY__
+
+struct tag;
+struct meminfo;
+struct sys_timer;
+
+struct machine_desc {
+	/*
+	 * Note! The first four elements are used
+	 * by assembler code in head.S, head-common.S
+	 */
+	unsigned int		nr;		/* architecture number	*/
+	unsigned int		phys_io;	/* start of physical io	*/
+	unsigned int		io_pg_offst;	/* byte offset for io 
+						 * page tabe entry	*/
+
+	const char		*name;		/* architecture name	*/
+	unsigned long		boot_params;	/* tagged list		*/
+
+	unsigned int		video_start;	/* start of video RAM	*/
+	unsigned int		video_end;	/* end of video RAM	*/
+
+	unsigned int		reserve_lp0 :1;	/* never has lp0	*/
+	unsigned int		reserve_lp1 :1;	/* never has lp1	*/
+	unsigned int		reserve_lp2 :1;	/* never has lp2	*/
+	unsigned int		soft_reboot :1;	/* soft reboot		*/
+	void			(*fixup)(struct machine_desc *,
+					 struct tag *, char **,
+					 struct meminfo *);
+	void			(*map_io)(void);/* IO mapping function	*/
+	void			(*init_irq)(void);
+	struct sys_timer	*timer;		/* system tick timer	*/
+	void			(*init_machine)(void);
+};
+
+/*
+ * Set of macros to define architecture features.  This is built into
+ * a table by the linker.
+ */
+#define MACHINE_START(_type,_name)			\
+static const struct machine_desc __mach_desc_##_type	\
+ __used							\
+ __attribute__((__section__(".arch.info.init"))) = {	\
+	.nr		= MACH_TYPE_##_type,		\
+	.name		= _name,
+
+#define MACHINE_END				\
+};
+
+#endif
diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
new file mode 100644
index 0000000..fc7278e
--- /dev/null
+++ b/arch/arm/include/asm/mach/dma.h
@@ -0,0 +1,57 @@
+/*
+ *  arch/arm/include/asm/mach/dma.h
+ *
+ *  Copyright (C) 1998-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  This header file describes the interface between the generic DMA handler
+ *  (dma.c) and the architecture-specific DMA backends (dma-*.c)
+ */
+
+struct dma_struct;
+typedef struct dma_struct dma_t;
+
+struct dma_ops {
+	int	(*request)(dmach_t, dma_t *);		/* optional */
+	void	(*free)(dmach_t, dma_t *);		/* optional */
+	void	(*enable)(dmach_t, dma_t *);		/* mandatory */
+	void 	(*disable)(dmach_t, dma_t *);		/* mandatory */
+	int	(*residue)(dmach_t, dma_t *);		/* optional */
+	int	(*setspeed)(dmach_t, dma_t *, int);	/* optional */
+	char	*type;
+};
+
+struct dma_struct {
+	void		*addr;		/* single DMA address		*/
+	unsigned long	count;		/* single DMA size		*/
+	struct scatterlist buf;		/* single DMA			*/
+	int		sgcount;	/* number of DMA SG		*/
+	struct scatterlist *sg;		/* DMA Scatter-Gather List	*/
+
+	unsigned int	active:1;	/* Transfer active		*/
+	unsigned int	invalid:1;	/* Address/Count changed	*/
+
+	dmamode_t	dma_mode;	/* DMA mode			*/
+	int		speed;		/* DMA speed			*/
+
+	unsigned int	lock;		/* Device is allocated		*/
+	const char	*device_id;	/* Device name			*/
+
+	unsigned int	dma_base;	/* Controller base address	*/
+	int		dma_irq;	/* Controller IRQ		*/
+	struct scatterlist cur_sg;	/* Current controller buffer	*/
+	unsigned int	state;
+
+	struct dma_ops	*d_ops;
+};
+
+/* Prototype: void arch_dma_init(dma)
+ * Purpose  : Initialise architecture specific DMA
+ * Params   : dma - pointer to array of DMA structures
+ */
+extern void arch_dma_init(dma_t *dma);
+
+extern void isa_init_dma(dma_t *dma);
diff --git a/arch/arm/include/asm/mach/flash.h b/arch/arm/include/asm/mach/flash.h
new file mode 100644
index 0000000..4ca69fe
--- /dev/null
+++ b/arch/arm/include/asm/mach/flash.h
@@ -0,0 +1,39 @@
+/*
+ *  arch/arm/include/asm/mach/flash.h
+ *
+ *  Copyright (C) 2003 Russell King, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ASMARM_MACH_FLASH_H
+#define ASMARM_MACH_FLASH_H
+
+struct mtd_partition;
+struct mtd_info;
+
+/*
+ * map_name:	the map probe function name
+ * name:	flash device name (eg, as used with mtdparts=)
+ * width:	width of mapped device
+ * init:	method called at driver/device initialisation
+ * exit:	method called at driver/device removal
+ * set_vpp:	method called to enable or disable VPP
+ * mmcontrol:	method called to enable or disable Sync. Burst Read in OneNAND
+ * parts:	optional array of mtd_partitions for static partitioning
+ * nr_parts:	number of mtd_partitions for static partitoning
+ */
+struct flash_platform_data {
+	const char	*map_name;
+	const char	*name;
+	unsigned int	width;
+	int		(*init)(void);
+	void		(*exit)(void);
+	void		(*set_vpp)(int on);
+	void		(*mmcontrol)(struct mtd_info *mtd, int sync_read);
+	struct mtd_partition *parts;
+	unsigned int	nr_parts;
+};
+
+#endif
diff --git a/arch/arm/include/asm/mach/irda.h b/arch/arm/include/asm/mach/irda.h
new file mode 100644
index 0000000..38f77b5
--- /dev/null
+++ b/arch/arm/include/asm/mach/irda.h
@@ -0,0 +1,20 @@
+/*
+ *  arch/arm/include/asm/mach/irda.h
+ *
+ *  Copyright (C) 2004 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARM_MACH_IRDA_H
+#define __ASM_ARM_MACH_IRDA_H
+
+struct irda_platform_data {
+	int (*startup)(struct device *);
+	void (*shutdown)(struct device *);
+	int (*set_power)(struct device *, unsigned int state);
+	void (*set_speed)(struct device *, unsigned int speed);
+};
+
+#endif
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
new file mode 100644
index 0000000..c57b52c
--- /dev/null
+++ b/arch/arm/include/asm/mach/irq.h
@@ -0,0 +1,54 @@
+/*
+ *  arch/arm/include/asm/mach/irq.h
+ *
+ *  Copyright (C) 1995-2000 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARM_MACH_IRQ_H
+#define __ASM_ARM_MACH_IRQ_H
+
+#include <linux/irq.h>
+
+struct seq_file;
+
+/*
+ * This is internal.  Do not use it.
+ */
+extern void (*init_arch_irq)(void);
+extern void init_FIQ(void);
+extern int show_fiq_list(struct seq_file *, void *);
+
+/*
+ * Obsolete inline function for calling irq descriptor handlers.
+ */
+static inline void desc_handle_irq(unsigned int irq, struct irq_desc *desc)
+{
+	desc->handle_irq(irq, desc);
+}
+
+void set_irq_flags(unsigned int irq, unsigned int flags);
+
+#define IRQF_VALID	(1 << 0)
+#define IRQF_PROBE	(1 << 1)
+#define IRQF_NOAUTOEN	(1 << 2)
+
+/*
+ * This is for easy migration, but should be changed in the source
+ */
+#define do_bad_IRQ(irq,desc)				\
+do {							\
+	spin_lock(&desc->lock);				\
+	handle_bad_irq(irq, desc);			\
+	spin_unlock(&desc->lock);			\
+} while(0)
+
+extern unsigned long irq_err_count;
+static inline void ack_bad_irq(int irq)
+{
+	irq_err_count++;
+}
+
+#endif
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
new file mode 100644
index 0000000..06f583b
--- /dev/null
+++ b/arch/arm/include/asm/mach/map.h
@@ -0,0 +1,36 @@
+/*
+ *  arch/arm/include/asm/map.h
+ *
+ *  Copyright (C) 1999-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Page table mapping constructs and function prototypes
+ */
+#include <asm/io.h>
+
+struct map_desc {
+	unsigned long virtual;
+	unsigned long pfn;
+	unsigned long length;
+	unsigned int type;
+};
+
+/* types 0-3 are defined in asm/io.h */
+#define MT_CACHECLEAN		4
+#define MT_MINICLEAN		5
+#define MT_LOW_VECTORS		6
+#define MT_HIGH_VECTORS		7
+#define MT_MEMORY		8
+#define MT_ROM			9
+
+#define MT_NONSHARED_DEVICE	MT_DEVICE_NONSHARED
+#define MT_IXP2000_DEVICE	MT_DEVICE_IXP2000
+
+#ifdef CONFIG_MMU
+extern void iotable_init(struct map_desc *, int);
+#else
+#define iotable_init(map,num)	do { } while (0)
+#endif
diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h
new file mode 100644
index 0000000..4da332b
--- /dev/null
+++ b/arch/arm/include/asm/mach/mmc.h
@@ -0,0 +1,15 @@
+/*
+ *  arch/arm/include/asm/mach/mmc.h
+ */
+#ifndef ASMARM_MACH_MMC_H
+#define ASMARM_MACH_MMC_H
+
+#include <linux/mmc/host.h>
+
+struct mmc_platform_data {
+	unsigned int ocr_mask;			/* available voltages */
+	u32 (*translate_vdd)(struct device *, unsigned int);
+	unsigned int (*status)(struct device *);
+};
+
+#endif
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
new file mode 100644
index 0000000..32da1ae
--- /dev/null
+++ b/arch/arm/include/asm/mach/pci.h
@@ -0,0 +1,72 @@
+/*
+ *  arch/arm/include/asm/mach/pci.h
+ *
+ *  Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+struct pci_sys_data;
+struct pci_bus;
+
+struct hw_pci {
+	struct list_head buses;
+	int		nr_controllers;
+	int		(*setup)(int nr, struct pci_sys_data *);
+	struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
+	void		(*preinit)(void);
+	void		(*postinit)(void);
+	u8		(*swizzle)(struct pci_dev *dev, u8 *pin);
+	int		(*map_irq)(struct pci_dev *dev, u8 slot, u8 pin);
+};
+
+/*
+ * Per-controller structure
+ */
+struct pci_sys_data {
+	struct list_head node;
+	int		busnr;		/* primary bus number			*/
+	u64		mem_offset;	/* bus->cpu memory mapping offset	*/
+	unsigned long	io_offset;	/* bus->cpu IO mapping offset		*/
+	struct pci_bus	*bus;		/* PCI bus				*/
+	struct resource *resource[3];	/* Primary PCI bus resources		*/
+					/* Bridge swizzling			*/
+	u8		(*swizzle)(struct pci_dev *, u8 *);
+					/* IRQ mapping				*/
+	int		(*map_irq)(struct pci_dev *, u8, u8);
+	struct hw_pci	*hw;
+};
+
+/*
+ * This is the standard PCI-PCI bridge swizzling algorithm.
+ */
+u8 pci_std_swizzle(struct pci_dev *dev, u8 *pinp);
+
+/*
+ * Call this with your hw_pci struct to initialise the PCI system.
+ */
+void pci_common_init(struct hw_pci *);
+
+/*
+ * PCI controllers
+ */
+extern int iop3xx_pci_setup(int nr, struct pci_sys_data *);
+extern struct pci_bus *iop3xx_pci_scan_bus(int nr, struct pci_sys_data *);
+extern void iop3xx_pci_preinit(void);
+extern void iop3xx_pci_preinit_cond(void);
+
+extern int dc21285_setup(int nr, struct pci_sys_data *);
+extern struct pci_bus *dc21285_scan_bus(int nr, struct pci_sys_data *);
+extern void dc21285_preinit(void);
+extern void dc21285_postinit(void);
+
+extern int via82c505_setup(int nr, struct pci_sys_data *);
+extern struct pci_bus *via82c505_scan_bus(int nr, struct pci_sys_data *);
+extern void via82c505_init(void *sysdata);
+
+extern int pci_v3_setup(int nr, struct pci_sys_data *);
+extern struct pci_bus *pci_v3_scan_bus(int nr, struct pci_sys_data *);
+extern void pci_v3_preinit(void);
+extern void pci_v3_postinit(void);
diff --git a/arch/arm/include/asm/mach/serial_at91.h b/arch/arm/include/asm/mach/serial_at91.h
new file mode 100644
index 0000000..ea6d063
--- /dev/null
+++ b/arch/arm/include/asm/mach/serial_at91.h
@@ -0,0 +1,33 @@
+/*
+ *  arch/arm/include/asm/mach/serial_at91.h
+ *
+ *  Based on serial_sa1100.h  by Nicolas Pitre
+ *
+ *  Copyright (C) 2002 ATMEL Rousset
+ *
+ *  Low level machine dependent UART functions.
+ */
+
+struct uart_port;
+
+/*
+ * This is a temporary structure for registering these
+ * functions; it is intended to be discarded after boot.
+ */
+struct atmel_port_fns {
+	void	(*set_mctrl)(struct uart_port *, u_int);
+	u_int	(*get_mctrl)(struct uart_port *);
+	void	(*enable_ms)(struct uart_port *);
+	void	(*pm)(struct uart_port *, u_int, u_int);
+	int	(*set_wake)(struct uart_port *, u_int);
+	int	(*open)(struct uart_port *);
+	void	(*close)(struct uart_port *);
+};
+
+#if defined(CONFIG_SERIAL_ATMEL)
+void atmel_register_uart_fns(struct atmel_port_fns *fns);
+#else
+#define atmel_register_uart_fns(fns) do { } while (0)
+#endif
+
+
diff --git a/arch/arm/include/asm/mach/serial_sa1100.h b/arch/arm/include/asm/mach/serial_sa1100.h
new file mode 100644
index 0000000..d09064b
--- /dev/null
+++ b/arch/arm/include/asm/mach/serial_sa1100.h
@@ -0,0 +1,31 @@
+/*
+ *  arch/arm/include/asm/mach/serial_sa1100.h
+ *
+ *  Author: Nicolas Pitre
+ *
+ * Moved and changed lots, Russell King
+ *
+ * Low level machine dependent UART functions.
+ */
+
+struct uart_port;
+struct uart_info;
+
+/*
+ * This is a temporary structure for registering these
+ * functions; it is intended to be discarded after boot.
+ */
+struct sa1100_port_fns {
+	void	(*set_mctrl)(struct uart_port *, u_int);
+	u_int	(*get_mctrl)(struct uart_port *);
+	void	(*pm)(struct uart_port *, u_int, u_int);
+	int	(*set_wake)(struct uart_port *, u_int);
+};
+
+#ifdef CONFIG_SERIAL_SA1100
+void sa1100_register_uart_fns(struct sa1100_port_fns *fns);
+void sa1100_register_uart(int idx, int port);
+#else
+#define sa1100_register_uart_fns(fns) do { } while (0)
+#define sa1100_register_uart(idx,port) do { } while (0)
+#endif
diff --git a/arch/arm/include/asm/mach/sharpsl_param.h b/arch/arm/include/asm/mach/sharpsl_param.h
new file mode 100644
index 0000000..7a24ecf
--- /dev/null
+++ b/arch/arm/include/asm/mach/sharpsl_param.h
@@ -0,0 +1,37 @@
+/*
+ * Hardware parameter area specific to Sharp SL series devices
+ *
+ * Copyright (c) 2005 Richard Purdie
+ *
+ * Based on Sharp's 2.4 kernel patches
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+struct sharpsl_param_info {
+  unsigned int comadj_keyword;
+  unsigned int comadj;
+
+  unsigned int uuid_keyword;
+  unsigned char uuid[16];
+
+  unsigned int touch_keyword;
+  unsigned int touch_xp;
+  unsigned int touch_yp;
+  unsigned int touch_xd;
+  unsigned int touch_yd;
+
+  unsigned int adadj_keyword;
+  unsigned int adadj;
+
+  unsigned int phad_keyword;
+  unsigned int phadadj;
+} __attribute__((packed));
+
+
+extern struct sharpsl_param_info sharpsl_param;
+extern void sharpsl_save_param(void);
+
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
new file mode 100644
index 0000000..b2cc1fc
--- /dev/null
+++ b/arch/arm/include/asm/mach/time.h
@@ -0,0 +1,57 @@
+/*
+ * arch/arm/include/asm/mach/time.h
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARM_MACH_TIME_H
+#define __ASM_ARM_MACH_TIME_H
+
+#include <linux/sysdev.h>
+
+/*
+ * This is our kernel timer structure.
+ *
+ * - init
+ *   Initialise the kernels jiffy timer source, claim interrupt
+ *   using setup_irq.  This is called early on during initialisation
+ *   while interrupts are still disabled on the local CPU.
+ * - suspend
+ *   Suspend the kernel jiffy timer source, if necessary.  This
+ *   is called with interrupts disabled, after all normal devices
+ *   have been suspended.  If no action is required, set this to
+ *   NULL.
+ * - resume
+ *   Resume the kernel jiffy timer source, if necessary.  This
+ *   is called with interrupts disabled before any normal devices
+ *   are resumed.  If no action is required, set this to NULL.
+ * - offset
+ *   Return the timer offset in microseconds since the last timer
+ *   interrupt.  Note: this must take account of any unprocessed
+ *   timer interrupt which may be pending.
+ */
+struct sys_timer {
+	struct sys_device	dev;
+	void			(*init)(void);
+	void			(*suspend)(void);
+	void			(*resume)(void);
+#ifndef CONFIG_GENERIC_TIME
+	unsigned long		(*offset)(void);
+#endif
+};
+
+extern struct sys_timer *system_timer;
+extern void timer_tick(void);
+
+/*
+ * Kernel time keeping support.
+ */
+struct timespec;
+extern int (*set_rtc)(void);
+extern void save_time_delta(struct timespec *delta, struct timespec *rtc);
+extern void restore_time_delta(struct timespec *delta, struct timespec *rtc);
+
+#endif
diff --git a/arch/arm/include/asm/mach/udc_pxa2xx.h b/arch/arm/include/asm/mach/udc_pxa2xx.h
new file mode 100644
index 0000000..270902c
--- /dev/null
+++ b/arch/arm/include/asm/mach/udc_pxa2xx.h
@@ -0,0 +1,29 @@
+/*
+ * arch/arm/include/asm/mach/udc_pxa2xx.h
+ *
+ * This supports machine-specific differences in how the PXA2xx
+ * USB Device Controller (UDC) is wired.
+ *
+ * It is set in linux/arch/arm/mach-pxa/<machine>.c or in
+ * linux/arch/mach-ixp4xx/<machine>.c and used in
+ * the probe routine of linux/drivers/usb/gadget/pxa2xx_udc.c
+ */
+
+struct pxa2xx_udc_mach_info {
+        int  (*udc_is_connected)(void);		/* do we see host? */
+        void (*udc_command)(int cmd);
+#define	PXA2XX_UDC_CMD_CONNECT		0	/* let host see us */
+#define	PXA2XX_UDC_CMD_DISCONNECT	1	/* so host won't see us */
+
+	/* Boards following the design guidelines in the developer's manual,
+	 * with on-chip GPIOs not Lubbock's weird hardware, can have a sane
+	 * VBUS IRQ and omit the methods above.  Store the GPIO number
+	 * here; for GPIO 0, also mask in one of the pxa_gpio_mode() bits.
+	 * Note that sometimes the signals go through inverters...
+	 */
+	bool	gpio_vbus_inverted;
+	u16	gpio_vbus;			/* high == vbus present */
+	bool	gpio_pullup_inverted;
+	u16	gpio_pullup;			/* high == pullup activated */
+};
+
diff --git a/arch/arm/include/asm/mc146818rtc.h b/arch/arm/include/asm/mc146818rtc.h
new file mode 100644
index 0000000..7b81e0c4
--- /dev/null
+++ b/arch/arm/include/asm/mc146818rtc.h
@@ -0,0 +1,28 @@
+/*
+ * Machine dependent access functions for RTC registers.
+ */
+#ifndef _ASM_MC146818RTC_H
+#define _ASM_MC146818RTC_H
+
+#include <asm/arch/irqs.h>
+#include <asm/io.h>
+
+#ifndef RTC_PORT
+#define RTC_PORT(x)	(0x70 + (x))
+#define RTC_ALWAYS_BCD	1	/* RTC operates in binary mode */
+#endif
+
+/*
+ * The yet supported machines all access the RTC index register via
+ * an ISA port access but the way to access the date register differs ...
+ */
+#define CMOS_READ(addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+inb_p(RTC_PORT(1)); \
+})
+#define CMOS_WRITE(val, addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+outb_p((val),RTC_PORT(1)); \
+})
+
+#endif /* _ASM_MC146818RTC_H */
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
new file mode 100644
index 0000000..9206922
--- /dev/null
+++ b/arch/arm/include/asm/memory.h
@@ -0,0 +1,334 @@
+/*
+ *  arch/arm/include/asm/memory.h
+ *
+ *  Copyright (C) 2000-2002 Russell King
+ *  modification for nommu, Hyok S. Choi, 2004
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Note: this file should not be included by non-asm/.h files
+ */
+#ifndef __ASM_ARM_MEMORY_H
+#define __ASM_ARM_MEMORY_H
+
+/*
+ * Allow for constants defined here to be used from assembly code
+ * by prepending the UL suffix only with actual C code compilation.
+ */
+#ifndef __ASSEMBLY__
+#define UL(x) (x##UL)
+#else
+#define UL(x) (x)
+#endif
+
+#include <linux/compiler.h>
+#include <asm/arch/memory.h>
+#include <asm/sizes.h>
+
+#ifdef CONFIG_MMU
+
+#ifndef TASK_SIZE
+/*
+ * TASK_SIZE - the maximum size of a user space task.
+ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
+ */
+#define TASK_SIZE		UL(0xbf000000)
+#define TASK_UNMAPPED_BASE	UL(0x40000000)
+#endif
+
+/*
+ * The maximum size of a 26-bit user space task.
+ */
+#define TASK_SIZE_26		UL(0x04000000)
+
+/*
+ * Page offset: 3GB
+ */
+#ifndef PAGE_OFFSET
+#define PAGE_OFFSET		UL(0xc0000000)
+#endif
+
+/*
+ * The module space lives between the addresses given by TASK_SIZE
+ * and PAGE_OFFSET - it must be within 32MB of the kernel text.
+ */
+#define MODULE_END		(PAGE_OFFSET)
+#define MODULE_START		(MODULE_END - 16*1048576)
+
+#if TASK_SIZE > MODULE_START
+#error Top of user space clashes with start of module space
+#endif
+
+/*
+ * The XIP kernel gets mapped at the bottom of the module vm area.
+ * Since we use sections to map it, this macro replaces the physical address
+ * with its virtual address while keeping offset from the base section.
+ */
+#define XIP_VIRT_ADDR(physaddr)  (MODULE_START + ((physaddr) & 0x000fffff))
+
+/*
+ * Allow 16MB-aligned ioremap pages
+ */
+#define IOREMAP_MAX_ORDER	24
+
+#else /* CONFIG_MMU */
+
+/*
+ * The limitation of user task size can grow up to the end of free ram region.
+ * It is difficult to define and perhaps will never meet the original meaning
+ * of this define that was meant to.
+ * Fortunately, there is no reference for this in noMMU mode, for now.
+ */
+#ifndef TASK_SIZE
+#define TASK_SIZE		(CONFIG_DRAM_SIZE)
+#endif
+
+#ifndef TASK_UNMAPPED_BASE
+#define TASK_UNMAPPED_BASE	UL(0x00000000)
+#endif
+
+#ifndef PHYS_OFFSET
+#define PHYS_OFFSET 		(CONFIG_DRAM_BASE)
+#endif
+
+#ifndef END_MEM
+#define END_MEM     		(CONFIG_DRAM_BASE + CONFIG_DRAM_SIZE)
+#endif
+
+#ifndef PAGE_OFFSET
+#define PAGE_OFFSET		(PHYS_OFFSET)
+#endif
+
+/*
+ * The module can be at any place in ram in nommu mode.
+ */
+#define MODULE_END		(END_MEM)
+#define MODULE_START		(PHYS_OFFSET)
+
+#endif /* !CONFIG_MMU */
+
+/*
+ * Size of DMA-consistent memory region.  Must be multiple of 2M,
+ * between 2MB and 14MB inclusive.
+ */
+#ifndef CONSISTENT_DMA_SIZE
+#define CONSISTENT_DMA_SIZE SZ_2M
+#endif
+
+/*
+ * Physical vs virtual RAM address space conversion.  These are
+ * private definitions which should NOT be used outside memory.h
+ * files.  Use virt_to_phys/phys_to_virt/__pa/__va instead.
+ */
+#ifndef __virt_to_phys
+#define __virt_to_phys(x)	((x) - PAGE_OFFSET + PHYS_OFFSET)
+#define __phys_to_virt(x)	((x) - PHYS_OFFSET + PAGE_OFFSET)
+#endif
+
+/*
+ * Convert a physical address to a Page Frame Number and back
+ */
+#define	__phys_to_pfn(paddr)	((paddr) >> PAGE_SHIFT)
+#define	__pfn_to_phys(pfn)	((pfn) << PAGE_SHIFT)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * The DMA mask corresponding to the maximum bus address allocatable
+ * using GFP_DMA.  The default here places no restriction on DMA
+ * allocations.  This must be the smallest DMA mask in the system,
+ * so a successful GFP_DMA allocation will always satisfy this.
+ */
+#ifndef ISA_DMA_THRESHOLD
+#define ISA_DMA_THRESHOLD	(0xffffffffULL)
+#endif
+
+#ifndef arch_adjust_zones
+#define arch_adjust_zones(node,size,holes) do { } while (0)
+#endif
+
+/*
+ * PFNs are used to describe any physical page; this means
+ * PFN 0 == physical address 0.
+ *
+ * This is the PFN of the first RAM page in the kernel
+ * direct-mapped view.  We assume this is the first page
+ * of RAM in the mem_map as well.
+ */
+#define PHYS_PFN_OFFSET	(PHYS_OFFSET >> PAGE_SHIFT)
+
+/*
+ * These are *only* valid on the kernel direct mapped RAM memory.
+ * Note: Drivers should NOT use these.  They are the wrong
+ * translation for translating DMA addresses.  Use the driver
+ * DMA support - see dma-mapping.h.
+ */
+static inline unsigned long virt_to_phys(void *x)
+{
+	return __virt_to_phys((unsigned long)(x));
+}
+
+static inline void *phys_to_virt(unsigned long x)
+{
+	return (void *)(__phys_to_virt((unsigned long)(x)));
+}
+
+/*
+ * Drivers should NOT use these either.
+ */
+#define __pa(x)			__virt_to_phys((unsigned long)(x))
+#define __va(x)			((void *)__phys_to_virt((unsigned long)(x)))
+#define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
+
+/*
+ * Virtual <-> DMA view memory address translations
+ * Again, these are *only* valid on the kernel direct mapped RAM
+ * memory.  Use of these is *deprecated* (and that doesn't mean
+ * use the __ prefixed forms instead.)  See dma-mapping.h.
+ */
+static inline __deprecated unsigned long virt_to_bus(void *x)
+{
+	return __virt_to_bus((unsigned long)x);
+}
+
+static inline __deprecated void *bus_to_virt(unsigned long x)
+{
+	return (void *)__bus_to_virt(x);
+}
+
+/*
+ * Conversion between a struct page and a physical address.
+ *
+ * Note: when converting an unknown physical address to a
+ * struct page, the resulting pointer must be validated
+ * using VALID_PAGE().  It must return an invalid struct page
+ * for any physical address not corresponding to a system
+ * RAM address.
+ *
+ *  page_to_pfn(page)	convert a struct page * to a PFN number
+ *  pfn_to_page(pfn)	convert a _valid_ PFN number to struct page *
+ *  pfn_valid(pfn)	indicates whether a PFN number is valid
+ *
+ *  virt_to_page(k)	convert a _valid_ virtual address to struct page *
+ *  virt_addr_valid(k)	indicates whether a virtual address is valid
+ */
+#ifndef CONFIG_DISCONTIGMEM
+
+#define ARCH_PFN_OFFSET		PHYS_PFN_OFFSET
+
+#ifndef CONFIG_SPARSEMEM
+#define pfn_valid(pfn)		((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr))
+#endif
+
+#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_addr_valid(kaddr)	((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
+
+#define PHYS_TO_NID(addr)	(0)
+
+#else /* CONFIG_DISCONTIGMEM */
+
+/*
+ * This is more complex.  We have a set of mem_map arrays spread
+ * around in memory.
+ */
+#include <linux/numa.h>
+
+#define arch_pfn_to_nid(pfn)	PFN_TO_NID(pfn)
+#define arch_local_page_offset(pfn, nid) LOCAL_MAP_NR((pfn) << PAGE_SHIFT)
+
+#define pfn_valid(pfn)						\
+	({							\
+		unsigned int nid = PFN_TO_NID(pfn);		\
+		int valid = nid < MAX_NUMNODES;			\
+		if (valid) {					\
+			pg_data_t *node = NODE_DATA(nid);	\
+			valid = (pfn - node->node_start_pfn) <	\
+				node->node_spanned_pages;	\
+		}						\
+		valid;						\
+	})
+
+#define virt_to_page(kaddr)					\
+	(ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr))
+
+#define virt_addr_valid(kaddr)	(KVADDR_TO_NID(kaddr) < MAX_NUMNODES)
+
+/*
+ * Common discontigmem stuff.
+ *  PHYS_TO_NID is used by the ARM kernel/setup.c
+ */
+#define PHYS_TO_NID(addr)	PFN_TO_NID((addr) >> PAGE_SHIFT)
+
+/*
+ * Given a kaddr, ADDR_TO_MAPBASE finds the owning node of the memory
+ * and returns the mem_map of that node.
+ */
+#define ADDR_TO_MAPBASE(kaddr)	NODE_MEM_MAP(KVADDR_TO_NID(kaddr))
+
+/*
+ * Given a page frame number, find the owning node of the memory
+ * and returns the mem_map of that node.
+ */
+#define PFN_TO_MAPBASE(pfn)	NODE_MEM_MAP(PFN_TO_NID(pfn))
+
+#ifdef NODE_MEM_SIZE_BITS
+#define NODE_MEM_SIZE_MASK	((1 << NODE_MEM_SIZE_BITS) - 1)
+
+/*
+ * Given a kernel address, find the home node of the underlying memory.
+ */
+#define KVADDR_TO_NID(addr) \
+	(((unsigned long)(addr) - PAGE_OFFSET) >> NODE_MEM_SIZE_BITS)
+
+/*
+ * Given a page frame number, convert it to a node id.
+ */
+#define PFN_TO_NID(pfn) \
+	(((pfn) - PHYS_PFN_OFFSET) >> (NODE_MEM_SIZE_BITS - PAGE_SHIFT))
+
+/*
+ * Given a kaddr, LOCAL_MEM_MAP finds the owning node of the memory
+ * and returns the index corresponding to the appropriate page in the
+ * node's mem_map.
+ */
+#define LOCAL_MAP_NR(addr) \
+	(((unsigned long)(addr) & NODE_MEM_SIZE_MASK) >> PAGE_SHIFT)
+
+#endif /* NODE_MEM_SIZE_BITS */
+
+#endif /* !CONFIG_DISCONTIGMEM */
+
+/*
+ * For BIO.  "will die".  Kill me when bio_to_phys() and bvec_to_phys() die.
+ */
+#define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
+
+/*
+ * Optional device DMA address remapping. Do _not_ use directly!
+ * We should really eliminate virt_to_bus() here - it's deprecated.
+ */
+#ifndef __arch_page_to_dma
+#define page_to_dma(dev, page)		((dma_addr_t)__virt_to_bus((unsigned long)page_address(page)))
+#define dma_to_virt(dev, addr)		((void *)__bus_to_virt(addr))
+#define virt_to_dma(dev, addr)		((dma_addr_t)__virt_to_bus((unsigned long)(addr)))
+#else
+#define page_to_dma(dev, page)		(__arch_page_to_dma(dev, page))
+#define dma_to_virt(dev, addr)		(__arch_dma_to_virt(dev, addr))
+#define virt_to_dma(dev, addr)		(__arch_virt_to_dma(dev, addr))
+#endif
+
+/*
+ * Optional coherency support.  Currently used only by selected
+ * Intel XSC3-based systems.
+ */
+#ifndef arch_is_coherent
+#define arch_is_coherent()		0
+#endif
+
+#endif
+
+#include <asm-generic/memory_model.h>
+
+#endif
diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h
new file mode 100644
index 0000000..54570d2
--- /dev/null
+++ b/arch/arm/include/asm/mman.h
@@ -0,0 +1,17 @@
+#ifndef __ARM_MMAN_H__
+#define __ARM_MMAN_H__
+
+#include <asm-generic/mman.h>
+
+#define MAP_GROWSDOWN	0x0100		/* stack-like segment */
+#define MAP_DENYWRITE	0x0800		/* ETXTBSY */
+#define MAP_EXECUTABLE	0x1000		/* mark it as an executable */
+#define MAP_LOCKED	0x2000		/* pages are locked */
+#define MAP_NORESERVE	0x4000		/* don't check for reservations */
+#define MAP_POPULATE	0x8000		/* populate (prefault) page tables */
+#define MAP_NONBLOCK	0x10000		/* do not block on IO */
+
+#define MCL_CURRENT	1		/* lock all current mappings */
+#define MCL_FUTURE	2		/* lock all future mappings */
+
+#endif /* __ARM_MMAN_H__ */
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
new file mode 100644
index 0000000..53099d4
--- /dev/null
+++ b/arch/arm/include/asm/mmu.h
@@ -0,0 +1,33 @@
+#ifndef __ARM_MMU_H
+#define __ARM_MMU_H
+
+#ifdef CONFIG_MMU
+
+typedef struct {
+#ifdef CONFIG_CPU_HAS_ASID
+	unsigned int id;
+#endif
+	unsigned int kvm_seq;
+} mm_context_t;
+
+#ifdef CONFIG_CPU_HAS_ASID
+#define ASID(mm)	((mm)->context.id & 255)
+#else
+#define ASID(mm)	(0)
+#endif
+
+#else
+
+/*
+ * From nommu.h:
+ *  Copyright (C) 2002, David McCullough <davidm@snapgear.com>
+ *  modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
+ */
+typedef struct {
+	struct vm_list_struct	*vmlist;
+	unsigned long		end_brk;
+} mm_context_t;
+
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
new file mode 100644
index 0000000..a301e44
--- /dev/null
+++ b/arch/arm/include/asm/mmu_context.h
@@ -0,0 +1,117 @@
+/*
+ *  arch/arm/include/asm/mmu_context.h
+ *
+ *  Copyright (C) 1996 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Changelog:
+ *   27-06-1996	RMK	Created
+ */
+#ifndef __ASM_ARM_MMU_CONTEXT_H
+#define __ASM_ARM_MMU_CONTEXT_H
+
+#include <linux/compiler.h>
+#include <asm/cacheflush.h>
+#include <asm/proc-fns.h>
+#include <asm-generic/mm_hooks.h>
+
+void __check_kvm_seq(struct mm_struct *mm);
+
+#ifdef CONFIG_CPU_HAS_ASID
+
+/*
+ * On ARMv6, we have the following structure in the Context ID:
+ *
+ * 31                         7          0
+ * +-------------------------+-----------+
+ * |      process ID         |   ASID    |
+ * +-------------------------+-----------+
+ * |              context ID             |
+ * +-------------------------------------+
+ *
+ * The ASID is used to tag entries in the CPU caches and TLBs.
+ * The context ID is used by debuggers and trace logic, and
+ * should be unique within all running processes.
+ */
+#define ASID_BITS		8
+#define ASID_MASK		((~0) << ASID_BITS)
+#define ASID_FIRST_VERSION	(1 << ASID_BITS)
+
+extern unsigned int cpu_last_asid;
+
+void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+void __new_context(struct mm_struct *mm);
+
+static inline void check_context(struct mm_struct *mm)
+{
+	if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
+		__new_context(mm);
+
+	if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
+		__check_kvm_seq(mm);
+}
+
+#define init_new_context(tsk,mm)	(__init_new_context(tsk,mm),0)
+
+#else
+
+static inline void check_context(struct mm_struct *mm)
+{
+	if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
+		__check_kvm_seq(mm);
+}
+
+#define init_new_context(tsk,mm)	0
+
+#endif
+
+#define destroy_context(mm)		do { } while(0)
+
+/*
+ * This is called when "tsk" is about to enter lazy TLB mode.
+ *
+ * mm:  describes the currently active mm context
+ * tsk: task which is entering lazy tlb
+ * cpu: cpu number which is entering lazy tlb
+ *
+ * tsk->mm will be NULL
+ */
+static inline void
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+/*
+ * This is the actual mm switch as far as the scheduler
+ * is concerned.  No registers are touched.  We avoid
+ * calling the CPU specific function when the mm hasn't
+ * actually changed.
+ */
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+	  struct task_struct *tsk)
+{
+#ifdef CONFIG_MMU
+	unsigned int cpu = smp_processor_id();
+
+#ifdef CONFIG_SMP
+	/* check for possible thread migration */
+	if (!cpus_empty(next->cpu_vm_mask) && !cpu_isset(cpu, next->cpu_vm_mask))
+		__flush_icache_all();
+#endif
+	if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) {
+		check_context(next);
+		cpu_switch_mm(next->pgd, next);
+		if (cache_is_vivt())
+			cpu_clear(cpu, prev->cpu_vm_mask);
+	}
+#endif
+}
+
+#define deactivate_mm(tsk,mm)	do { } while (0)
+#define activate_mm(prev,next)	switch_mm(prev, next, NULL)
+
+#endif
diff --git a/arch/arm/include/asm/mmzone.h b/arch/arm/include/asm/mmzone.h
new file mode 100644
index 0000000..f2fbb50
--- /dev/null
+++ b/arch/arm/include/asm/mmzone.h
@@ -0,0 +1,30 @@
+/*
+ *  arch/arm/include/asm/mmzone.h
+ *
+ *  1999-12-29	Nicolas Pitre		Created
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_MMZONE_H
+#define __ASM_MMZONE_H
+
+/*
+ * Currently defined in arch/arm/mm/discontig.c
+ */
+extern pg_data_t discontig_node_data[];
+
+/*
+ * Return a pointer to the node data for node n.
+ */
+#define NODE_DATA(nid)		(&discontig_node_data[nid])
+
+/*
+ * NODE_MEM_MAP gives the kaddr for the mem_map of the node.
+ */
+#define NODE_MEM_MAP(nid)	(NODE_DATA(nid)->node_mem_map)
+
+#include <asm/arch/memory.h>
+
+#endif
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
new file mode 100644
index 0000000..24b168d
--- /dev/null
+++ b/arch/arm/include/asm/module.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_ARM_MODULE_H
+#define _ASM_ARM_MODULE_H
+
+struct mod_arch_specific
+{
+	int foo;
+};
+
+#define Elf_Shdr	Elf32_Shdr
+#define Elf_Sym		Elf32_Sym
+#define Elf_Ehdr	Elf32_Ehdr
+
+/*
+ * Include the ARM architecture version.
+ */
+#define MODULE_ARCH_VERMAGIC	"ARMv" __stringify(__LINUX_ARM_ARCH__) " "
+
+#endif /* _ASM_ARM_MODULE_H */
diff --git a/arch/arm/include/asm/msgbuf.h b/arch/arm/include/asm/msgbuf.h
new file mode 100644
index 0000000..33b35b9
--- /dev/null
+++ b/arch/arm/include/asm/msgbuf.h
@@ -0,0 +1,31 @@
+#ifndef _ASMARM_MSGBUF_H
+#define _ASMARM_MSGBUF_H
+
+/* 
+ * The msqid64_ds structure for arm architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct msqid64_ds {
+	struct ipc64_perm msg_perm;
+	__kernel_time_t msg_stime;	/* last msgsnd time */
+	unsigned long	__unused1;
+	__kernel_time_t msg_rtime;	/* last msgrcv time */
+	unsigned long	__unused2;
+	__kernel_time_t msg_ctime;	/* last change time */
+	unsigned long	__unused3;
+	unsigned long  msg_cbytes;	/* current number of bytes on queue */
+	unsigned long  msg_qnum;	/* number of messages in queue */
+	unsigned long  msg_qbytes;	/* max number of bytes on queue */
+	__kernel_pid_t msg_lspid;	/* pid of last msgsnd */
+	__kernel_pid_t msg_lrpid;	/* last receive pid */
+	unsigned long  __unused4;
+	unsigned long  __unused5;
+};
+
+#endif /* _ASMARM_MSGBUF_H */
diff --git a/arch/arm/include/asm/mtd-xip.h b/arch/arm/include/asm/mtd-xip.h
new file mode 100644
index 0000000..9eb127c
--- /dev/null
+++ b/arch/arm/include/asm/mtd-xip.h
@@ -0,0 +1,26 @@
+/*
+ * MTD primitives for XIP support. Architecture specific functions
+ *
+ * Do not include this file directly. It's included from linux/mtd/xip.h
+ * 
+ * Author:	Nicolas Pitre
+ * Created:	Nov 2, 2004
+ * Copyright:	(C) 2004 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * $Id: xip.h,v 1.2 2004/12/01 15:49:10 nico Exp $
+ */
+
+#ifndef __ARM_MTD_XIP_H__
+#define __ARM_MTD_XIP_H__
+
+#include <asm/hardware.h>
+#include <asm/arch/mtd-xip.h>
+
+/* fill instruction prefetch */
+#define xip_iprefetch() 	do { asm volatile (".rep 8; nop; .endr"); } while (0)
+
+#endif /* __ARM_MTD_XIP_H__ */
diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h
new file mode 100644
index 0000000..93226cf
--- /dev/null
+++ b/arch/arm/include/asm/mutex.h
@@ -0,0 +1,127 @@
+/*
+ * arch/arm/include/asm/mutex.h
+ *
+ * ARM optimized mutex locking primitives
+ *
+ * Please look into asm-generic/mutex-xchg.h for a formal definition.
+ */
+#ifndef _ASM_MUTEX_H
+#define _ASM_MUTEX_H
+
+#if __LINUX_ARM_ARCH__ < 6
+/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
+# include <asm-generic/mutex-xchg.h>
+#else
+
+/*
+ * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
+ * atomic decrement (it is not a reliable atomic decrement but it satisfies
+ * the defined semantics for our purpose, while being smaller and faster
+ * than a real atomic decrement or atomic swap.  The idea is to attempt
+ * decrementing the lock value only once.  If once decremented it isn't zero,
+ * or if its store-back fails due to a dispute on the exclusive store, we
+ * simply bail out immediately through the slow path where the lock will be
+ * reattempted until it succeeds.
+ */
+static inline void
+__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+	int __ex_flag, __res;
+
+	__asm__ (
+
+		"ldrex	%0, [%2]	\n\t"
+		"sub	%0, %0, #1	\n\t"
+		"strex	%1, %0, [%2]	"
+
+		: "=&r" (__res), "=&r" (__ex_flag)
+		: "r" (&(count)->counter)
+		: "cc","memory" );
+
+	__res |= __ex_flag;
+	if (unlikely(__res != 0))
+		fail_fn(count);
+}
+
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+	int __ex_flag, __res;
+
+	__asm__ (
+
+		"ldrex	%0, [%2]	\n\t"
+		"sub	%0, %0, #1	\n\t"
+		"strex	%1, %0, [%2]	"
+
+		: "=&r" (__res), "=&r" (__ex_flag)
+		: "r" (&(count)->counter)
+		: "cc","memory" );
+
+	__res |= __ex_flag;
+	if (unlikely(__res != 0))
+		__res = fail_fn(count);
+	return __res;
+}
+
+/*
+ * Same trick is used for the unlock fast path. However the original value,
+ * rather than the result, is used to test for success in order to have
+ * better generated assembly.
+ */
+static inline void
+__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+	int __ex_flag, __res, __orig;
+
+	__asm__ (
+
+		"ldrex	%0, [%3]	\n\t"
+		"add	%1, %0, #1	\n\t"
+		"strex	%2, %1, [%3]	"
+
+		: "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
+		: "r" (&(count)->counter)
+		: "cc","memory" );
+
+	__orig |= __ex_flag;
+	if (unlikely(__orig != 0))
+		fail_fn(count);
+}
+
+/*
+ * If the unlock was done on a contended lock, or if the unlock simply fails
+ * then the mutex remains locked.
+ */
+#define __mutex_slowpath_needs_to_unlock()	1
+
+/*
+ * For __mutex_fastpath_trylock we use another construct which could be
+ * described as a "single value cmpxchg".
+ *
+ * This provides the needed trylock semantics like cmpxchg would, but it is
+ * lighter and less generic than a true cmpxchg implementation.
+ */
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+	int __ex_flag, __res, __orig;
+
+	__asm__ (
+
+		"1: ldrex	%0, [%3]	\n\t"
+		"subs		%1, %0, #1	\n\t"
+		"strexeq	%2, %1, [%3]	\n\t"
+		"movlt		%0, #0		\n\t"
+		"cmpeq		%2, #0		\n\t"
+		"bgt		1b		"
+
+		: "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
+		: "r" (&count->counter)
+		: "cc", "memory" );
+
+	return __orig;
+}
+
+#endif
+#endif
diff --git a/arch/arm/include/asm/nwflash.h b/arch/arm/include/asm/nwflash.h
new file mode 100644
index 0000000..04e5a55
--- /dev/null
+++ b/arch/arm/include/asm/nwflash.h
@@ -0,0 +1,9 @@
+#ifndef _FLASH_H
+#define _FLASH_H
+
+#define FLASH_MINOR		 160	/* MAJOR is 10 - miscdevice */
+#define CMD_WRITE_DISABLE	 0
+#define CMD_WRITE_ENABLE	 0x28
+#define CMD_WRITE_BASE64K_ENABLE 0x47
+
+#endif /* _FLASH_H */
diff --git a/arch/arm/include/asm/page-nommu.h b/arch/arm/include/asm/page-nommu.h
new file mode 100644
index 0000000..3574c0d
--- /dev/null
+++ b/arch/arm/include/asm/page-nommu.h
@@ -0,0 +1,49 @@
+/*
+ *  arch/arm/include/asm/page-nommu.h
+ *
+ *  Copyright (C) 2004 Hyok S. Choi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASMARM_PAGE_NOMMU_H
+#define _ASMARM_PAGE_NOMMU_H
+
+#if !defined(CONFIG_SMALL_TASKS) && PAGE_SHIFT < 13
+#define KTHREAD_SIZE (8192)
+#else
+#define KTHREAD_SIZE PAGE_SIZE
+#endif
+ 
+#define get_user_page(vaddr)		__get_free_page(GFP_KERNEL)
+#define free_user_page(page, addr)	free_page(addr)
+
+#define clear_page(page)	memset((page), 0, PAGE_SIZE)
+#define copy_page(to,from)	memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(page, vaddr, pg)	clear_page(page)
+#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t[2];
+typedef unsigned long pgprot_t;
+
+#define pte_val(x)      (x)
+#define pmd_val(x)      (x)
+#define pgd_val(x)	((x)[0])
+#define pgprot_val(x)   (x)
+
+#define __pte(x)        (x)
+#define __pmd(x)        (x)
+#define __pgprot(x)     (x)
+
+extern unsigned long memory_start;
+extern unsigned long memory_end;
+
+#endif
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
new file mode 100644
index 0000000..cf2e268
--- /dev/null
+++ b/arch/arm/include/asm/page.h
@@ -0,0 +1,199 @@
+/*
+ *  arch/arm/include/asm/page.h
+ *
+ *  Copyright (C) 1995-2003 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_PAGE_H
+#define _ASMARM_PAGE_H
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT		12
+#define PAGE_SIZE		(1UL << PAGE_SHIFT)
+#define PAGE_MASK		(~(PAGE_SIZE-1))
+
+#ifndef __ASSEMBLY__
+
+#ifndef CONFIG_MMU
+
+#include "page-nommu.h"
+
+#else
+
+#include <asm/glue.h>
+
+/*
+ *	User Space Model
+ *	================
+ *
+ *	This section selects the correct set of functions for dealing with
+ *	page-based copying and clearing for user space for the particular
+ *	processor(s) we're building for.
+ *
+ *	We have the following to choose from:
+ *	  v3		- ARMv3
+ *	  v4wt		- ARMv4 with writethrough cache, without minicache
+ *	  v4wb		- ARMv4 with writeback cache, without minicache
+ *	  v4_mc		- ARMv4 with minicache
+ *	  xscale	- Xscale
+ *	  xsc3		- XScalev3
+ */
+#undef _USER
+#undef MULTI_USER
+
+#ifdef CONFIG_CPU_COPY_V3
+# ifdef _USER
+#  define MULTI_USER 1
+# else
+#  define _USER v3
+# endif
+#endif
+
+#ifdef CONFIG_CPU_COPY_V4WT
+# ifdef _USER
+#  define MULTI_USER 1
+# else
+#  define _USER v4wt
+# endif
+#endif
+
+#ifdef CONFIG_CPU_COPY_V4WB
+# ifdef _USER
+#  define MULTI_USER 1
+# else
+#  define _USER v4wb
+# endif
+#endif
+
+#ifdef CONFIG_CPU_COPY_FEROCEON
+# ifdef _USER
+#  define MULTI_USER 1
+# else
+#  define _USER feroceon
+# endif
+#endif
+
+#ifdef CONFIG_CPU_SA1100
+# ifdef _USER
+#  define MULTI_USER 1
+# else
+#  define _USER v4_mc
+# endif
+#endif
+
+#ifdef CONFIG_CPU_XSCALE
+# ifdef _USER
+#  define MULTI_USER 1
+# else
+#  define _USER xscale_mc
+# endif
+#endif
+
+#ifdef CONFIG_CPU_XSC3
+# ifdef _USER
+#  define MULTI_USER 1
+# else
+#  define _USER xsc3_mc
+# endif
+#endif
+
+#ifdef CONFIG_CPU_COPY_V6
+# define MULTI_USER 1
+#endif
+
+#if !defined(_USER) && !defined(MULTI_USER)
+#error Unknown user operations model
+#endif
+
+struct cpu_user_fns {
+	void (*cpu_clear_user_page)(void *p, unsigned long user);
+	void (*cpu_copy_user_page)(void *to, const void *from,
+				   unsigned long user);
+};
+
+#ifdef MULTI_USER
+extern struct cpu_user_fns cpu_user;
+
+#define __cpu_clear_user_page	cpu_user.cpu_clear_user_page
+#define __cpu_copy_user_page	cpu_user.cpu_copy_user_page
+
+#else
+
+#define __cpu_clear_user_page	__glue(_USER,_clear_user_page)
+#define __cpu_copy_user_page	__glue(_USER,_copy_user_page)
+
+extern void __cpu_clear_user_page(void *p, unsigned long user);
+extern void __cpu_copy_user_page(void *to, const void *from,
+				 unsigned long user);
+#endif
+
+#define clear_user_page(addr,vaddr,pg)	 __cpu_clear_user_page(addr, vaddr)
+#define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr)
+
+#define clear_page(page)	memzero((void *)(page), PAGE_SIZE)
+extern void copy_page(void *to, const void *from);
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd[2]; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x)      ((x).pte)
+#define pmd_val(x)      ((x).pmd)
+#define pgd_val(x)	((x).pgd[0])
+#define pgprot_val(x)   ((x).pgprot)
+
+#define __pte(x)        ((pte_t) { (x) } )
+#define __pmd(x)        ((pmd_t) { (x) } )
+#define __pgprot(x)     ((pgprot_t) { (x) } )
+
+#else
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t[2];
+typedef unsigned long pgprot_t;
+
+#define pte_val(x)      (x)
+#define pmd_val(x)      (x)
+#define pgd_val(x)	((x)[0])
+#define pgprot_val(x)   (x)
+
+#define __pte(x)        (x)
+#define __pmd(x)        (x)
+#define __pgprot(x)     (x)
+
+#endif /* STRICT_MM_TYPECHECKS */
+
+#endif /* CONFIG_MMU */
+
+typedef struct page *pgtable_t;
+
+#include <asm/memory.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \
+				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+/*
+ * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
+ */
+#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
+#define ARCH_SLAB_MINALIGN 8
+#endif
+
+#include <asm-generic/page.h>
+
+#endif
diff --git a/arch/arm/include/asm/param.h b/arch/arm/include/asm/param.h
new file mode 100644
index 0000000..8b24bf94
--- /dev/null
+++ b/arch/arm/include/asm/param.h
@@ -0,0 +1,31 @@
+/*
+ *  arch/arm/include/asm/param.h
+ *
+ *  Copyright (C) 1995-1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_PARAM_H
+#define __ASM_PARAM_H
+
+#ifdef __KERNEL__
+# define HZ		CONFIG_HZ	/* Internal kernel timer frequency */
+# define USER_HZ	100		/* User interfaces are in "ticks" */
+# define CLOCKS_PER_SEC	(USER_HZ)	/* like times() */
+#else
+# define HZ		100
+#endif
+
+#define EXEC_PAGESIZE	4096
+
+#ifndef NOGROUP
+#define NOGROUP         (-1)
+#endif
+
+/* max length of hostname */
+#define MAXHOSTNAMELEN  64
+
+#endif
+
diff --git a/arch/arm/include/asm/parport.h b/arch/arm/include/asm/parport.h
new file mode 100644
index 0000000..26e94b0
--- /dev/null
+++ b/arch/arm/include/asm/parport.h
@@ -0,0 +1,18 @@
+/*
+ *  arch/arm/include/asm/parport.h: ARM-specific parport initialisation
+ *
+ *  Copyright (C) 1999, 2000  Tim Waugh <tim@cyberelk.demon.co.uk>
+ *
+ * This file should only be included by drivers/parport/parport_pc.c.
+ */
+
+#ifndef __ASMARM_PARPORT_H
+#define __ASMARM_PARPORT_H
+
+static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
+static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
+{
+	return parport_pc_find_isa_ports (autoirq, autodma);
+}
+
+#endif /* !(_ASMARM_PARPORT_H) */
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
new file mode 100644
index 0000000..2d84792
--- /dev/null
+++ b/arch/arm/include/asm/pci.h
@@ -0,0 +1,91 @@
+#ifndef ASMARM_PCI_H
+#define ASMARM_PCI_H
+
+#ifdef __KERNEL__
+#include <asm-generic/pci-dma-compat.h>
+
+#include <asm/hardware.h> /* for PCIBIOS_MIN_* */
+
+#define pcibios_scan_all_fns(a, b)	0
+
+#ifdef CONFIG_PCI_HOST_ITE8152
+/* ITE bridge requires setting latency timer to avoid early bus access
+   termination by PIC bus mater devices
+*/
+extern void pcibios_set_master(struct pci_dev *dev);
+#else
+static inline void pcibios_set_master(struct pci_dev *dev)
+{
+	/* No special bus mastering setup handling */
+}
+#endif
+
+static inline void pcibios_penalize_isa_irq(int irq, int active)
+{
+	/* We don't do dynamic PCI IRQ allocation */
+}
+
+/*
+ * The PCI address space does equal the physical memory address space.
+ * The networking and block device layers use this boolean for bounce
+ * buffer decisions.
+ */
+#define PCI_DMA_BUS_IS_PHYS     (0)
+
+/*
+ * Whether pci_unmap_{single,page} is a nop depends upon the
+ * configuration.
+ */
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	dma_addr_t ADDR_NAME;
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		__u32 LEN_NAME;
+#define pci_unmap_addr(PTR, ADDR_NAME)		((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)	(((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME)		((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)	(((PTR)->LEN_NAME) = (VAL))
+
+#ifdef CONFIG_PCI
+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
+					enum pci_dma_burst_strategy *strat,
+					unsigned long *strategy_parameter)
+{
+	*strat = PCI_DMA_BURST_INFINITY;
+	*strategy_parameter = ~0UL;
+}
+#endif
+
+#define HAVE_PCI_MMAP
+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+                               enum pci_mmap_state mmap_state, int write_combine);
+
+extern void
+pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+			 struct resource *res);
+
+extern void
+pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
+			struct pci_bus_region *region);
+
+static inline struct resource *
+pcibios_select_root(struct pci_dev *pdev, struct resource *res)
+{
+	struct resource *root = NULL;
+
+	if (res->flags & IORESOURCE_IO)
+		root = &ioport_resource;
+	if (res->flags & IORESOURCE_MEM)
+		root = &iomem_resource;
+
+	return root;
+}
+
+/*
+ * Dummy implementation; always return 0.
+ */
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+	return 0;
+}
+
+#endif /* __KERNEL__ */
+ 
+#endif
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
new file mode 100644
index 0000000..b4e32d8
--- /dev/null
+++ b/arch/arm/include/asm/percpu.h
@@ -0,0 +1,6 @@
+#ifndef __ARM_PERCPU
+#define __ARM_PERCPU
+
+#include <asm-generic/percpu.h>
+
+#endif
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
new file mode 100644
index 0000000..3dcd64b
--- /dev/null
+++ b/arch/arm/include/asm/pgalloc.h
@@ -0,0 +1,136 @@
+/*
+ *  arch/arm/include/asm/pgalloc.h
+ *
+ *  Copyright (C) 2000-2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_PGALLOC_H
+#define _ASMARM_PGALLOC_H
+
+#include <asm/domain.h>
+#include <asm/pgtable-hwdef.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#define check_pgt_cache()		do { } while (0)
+
+#ifdef CONFIG_MMU
+
+#define _PAGE_USER_TABLE	(PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
+#define _PAGE_KERNEL_TABLE	(PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
+
+/*
+ * Since we have only two-level page tables, these are trivial
+ */
+#define pmd_alloc_one(mm,addr)		({ BUG(); ((pmd_t *)2); })
+#define pmd_free(mm, pmd)		do { } while (0)
+#define pgd_populate(mm,pmd,pte)	BUG()
+
+extern pgd_t *get_pgd_slow(struct mm_struct *mm);
+extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
+
+#define pgd_alloc(mm)			get_pgd_slow(mm)
+#define pgd_free(mm, pgd)		free_pgd_slow(mm, pgd)
+
+/*
+ * Allocate one PTE table.
+ *
+ * This actually allocates two hardware PTE tables, but we wrap this up
+ * into one table thus:
+ *
+ *  +------------+
+ *  |  h/w pt 0  |
+ *  +------------+
+ *  |  h/w pt 1  |
+ *  +------------+
+ *  | Linux pt 0 |
+ *  +------------+
+ *  | Linux pt 1 |
+ *  +------------+
+ */
+static inline pte_t *
+pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
+{
+	pte_t *pte;
+
+	pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+	if (pte) {
+		clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE);
+		pte += PTRS_PER_PTE;
+	}
+
+	return pte;
+}
+
+static inline pgtable_t
+pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+	struct page *pte;
+
+	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+	if (pte) {
+		void *page = page_address(pte);
+		clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE);
+		pgtable_page_ctor(pte);
+	}
+
+	return pte;
+}
+
+/*
+ * Free one PTE table.
+ */
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+	if (pte) {
+		pte -= PTRS_PER_PTE;
+		free_page((unsigned long)pte);
+	}
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+{
+	pgtable_page_dtor(pte);
+	__free_page(pte);
+}
+
+static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval)
+{
+	pmdp[0] = __pmd(pmdval);
+	pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
+	flush_pmd_entry(pmdp);
+}
+
+/*
+ * Populate the pmdp entry with a pointer to the pte.  This pmd is part
+ * of the mm address space.
+ *
+ * Ensure that we always set both PMD entries.
+ */
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
+{
+	unsigned long pte_ptr = (unsigned long)ptep;
+
+	/*
+	 * The pmd must be loaded with the physical
+	 * address of the PTE table
+	 */
+	pte_ptr -= PTRS_PER_PTE * sizeof(void *);
+	__pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE);
+}
+
+static inline void
+pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
+{
+	__pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE);
+}
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+#endif /* CONFIG_MMU */
+
+#endif
diff --git a/arch/arm/include/asm/pgtable-hwdef.h b/arch/arm/include/asm/pgtable-hwdef.h
new file mode 100644
index 0000000..fd1521d
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-hwdef.h
@@ -0,0 +1,90 @@
+/*
+ *  arch/arm/include/asm/pgtable-hwdef.h
+ *
+ *  Copyright (C) 1995-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_PGTABLE_HWDEF_H
+#define _ASMARM_PGTABLE_HWDEF_H
+
+/*
+ * Hardware page table definitions.
+ *
+ * + Level 1 descriptor (PMD)
+ *   - common
+ */
+#define PMD_TYPE_MASK		(3 << 0)
+#define PMD_TYPE_FAULT		(0 << 0)
+#define PMD_TYPE_TABLE		(1 << 0)
+#define PMD_TYPE_SECT		(2 << 0)
+#define PMD_BIT4		(1 << 4)
+#define PMD_DOMAIN(x)		((x) << 5)
+#define PMD_PROTECTION		(1 << 9)	/* v5 */
+/*
+ *   - section
+ */
+#define PMD_SECT_BUFFERABLE	(1 << 2)
+#define PMD_SECT_CACHEABLE	(1 << 3)
+#define PMD_SECT_XN		(1 << 4)	/* v6 */
+#define PMD_SECT_AP_WRITE	(1 << 10)
+#define PMD_SECT_AP_READ	(1 << 11)
+#define PMD_SECT_TEX(x)		((x) << 12)	/* v5 */
+#define PMD_SECT_APX		(1 << 15)	/* v6 */
+#define PMD_SECT_S		(1 << 16)	/* v6 */
+#define PMD_SECT_nG		(1 << 17)	/* v6 */
+#define PMD_SECT_SUPER		(1 << 18)	/* v6 */
+
+#define PMD_SECT_UNCACHED	(0)
+#define PMD_SECT_BUFFERED	(PMD_SECT_BUFFERABLE)
+#define PMD_SECT_WT		(PMD_SECT_CACHEABLE)
+#define PMD_SECT_WB		(PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
+#define PMD_SECT_MINICACHE	(PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
+#define PMD_SECT_WBWA		(PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
+#define PMD_SECT_NONSHARED_DEV	(PMD_SECT_TEX(2))
+
+/*
+ *   - coarse table (not used)
+ */
+
+/*
+ * + Level 2 descriptor (PTE)
+ *   - common
+ */
+#define PTE_TYPE_MASK		(3 << 0)
+#define PTE_TYPE_FAULT		(0 << 0)
+#define PTE_TYPE_LARGE		(1 << 0)
+#define PTE_TYPE_SMALL		(2 << 0)
+#define PTE_TYPE_EXT		(3 << 0)	/* v5 */
+#define PTE_BUFFERABLE		(1 << 2)
+#define PTE_CACHEABLE		(1 << 3)
+
+/*
+ *   - extended small page/tiny page
+ */
+#define PTE_EXT_XN		(1 << 0)	/* v6 */
+#define PTE_EXT_AP_MASK		(3 << 4)
+#define PTE_EXT_AP0		(1 << 4)
+#define PTE_EXT_AP1		(2 << 4)
+#define PTE_EXT_AP_UNO_SRO	(0 << 4)
+#define PTE_EXT_AP_UNO_SRW	(PTE_EXT_AP0)
+#define PTE_EXT_AP_URO_SRW	(PTE_EXT_AP1)
+#define PTE_EXT_AP_URW_SRW	(PTE_EXT_AP1|PTE_EXT_AP0)
+#define PTE_EXT_TEX(x)		((x) << 6)	/* v5 */
+#define PTE_EXT_APX		(1 << 9)	/* v6 */
+#define PTE_EXT_COHERENT	(1 << 9)	/* XScale3 */
+#define PTE_EXT_SHARED		(1 << 10)	/* v6 */
+#define PTE_EXT_NG		(1 << 11)	/* v6 */
+
+/*
+ *   - small page
+ */
+#define PTE_SMALL_AP_MASK	(0xff << 4)
+#define PTE_SMALL_AP_UNO_SRO	(0x00 << 4)
+#define PTE_SMALL_AP_UNO_SRW	(0x55 << 4)
+#define PTE_SMALL_AP_URO_SRW	(0xaa << 4)
+#define PTE_SMALL_AP_URW_SRW	(0xff << 4)
+
+#endif
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
new file mode 100644
index 0000000..b011f2e
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -0,0 +1,118 @@
+/*
+ *  arch/arm/include/asm/pgtable-nommu.h
+ *
+ *  Copyright (C) 1995-2002 Russell King
+ *  Copyright (C) 2004  Hyok S. Choi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_PGTABLE_NOMMU_H
+#define _ASMARM_PGTABLE_NOMMU_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/slab.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+
+/*
+ * Trivial page table functions.
+ */
+#define pgd_present(pgd)	(1)
+#define pgd_none(pgd)		(0)
+#define pgd_bad(pgd)		(0)
+#define pgd_clear(pgdp)
+#define kern_addr_valid(addr)	(1)
+#define	pmd_offset(a, b)	((void *)0)
+/* FIXME */
+/*
+ * PMD_SHIFT determines the size of the area a second-level page table can map
+ * PGDIR_SHIFT determines what a third-level page table entry can map
+ */
+#define PGDIR_SHIFT		21
+
+#define PGDIR_SIZE		(1UL << PGDIR_SHIFT)
+#define PGDIR_MASK		(~(PGDIR_SIZE-1))
+/* FIXME */
+
+#define PAGE_NONE	__pgprot(0)
+#define PAGE_SHARED	__pgprot(0)
+#define PAGE_COPY	__pgprot(0)
+#define PAGE_READONLY	__pgprot(0)
+#define PAGE_KERNEL	__pgprot(0)
+
+#define swapper_pg_dir ((pgd_t *) 0)
+
+#define __swp_type(x)		(0)
+#define __swp_offset(x)		(0)
+#define __swp_entry(typ,off)	((swp_entry_t) { ((typ) | ((off) << 7)) })
+#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x)	((pte_t) { (x).val })
+
+
+typedef pte_t *pte_addr_t;
+
+static inline int pte_file(pte_t pte) { return 0; }
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+#define ZERO_PAGE(vaddr)	(virt_to_page(0))
+
+/*
+ * Mark the prot value as uncacheable and unbufferable.
+ */
+#define pgprot_noncached(prot)	__pgprot(0)
+#define pgprot_writecombine(prot) __pgprot(0)
+
+
+/*
+ * These would be in other places but having them here reduces the diffs.
+ */
+extern unsigned int kobjsize(const void *objp);
+
+/*
+ * No page table caches to initialise.
+ */
+#define pgtable_cache_init()	do { } while (0)
+#define io_remap_page_range	remap_page_range
+#define io_remap_pfn_range	remap_pfn_range
+
+
+/*
+ * All 32bit addresses are effectively valid for vmalloc...
+ * Sort of meaningless for non-VM targets.
+ */
+#define	VMALLOC_START	0
+#define	VMALLOC_END	0xffffffff
+
+#define FIRST_USER_ADDRESS      (0)
+
+#include <asm-generic/pgtable.h>
+
+#else 
+
+/*
+ * dummy tlb and user structures.
+ */
+#define v3_tlb_fns	(0)
+#define v4_tlb_fns	(0)
+#define v4wb_tlb_fns	(0)
+#define v4wbi_tlb_fns	(0)
+#define v6wbi_tlb_fns	(0)
+#define v7wbi_tlb_fns	(0)
+
+#define v3_user_fns	(0)
+#define v4_user_fns	(0)
+#define v4_mc_user_fns	(0)
+#define v4wb_user_fns	(0)
+#define v4wt_user_fns	(0)
+#define v6_user_fns	(0)
+#define xscale_mc_user_fns (0)
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* _ASMARM_PGTABLE_H */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
new file mode 100644
index 0000000..8ab060a
--- /dev/null
+++ b/arch/arm/include/asm/pgtable.h
@@ -0,0 +1,401 @@
+/*
+ *  arch/arm/include/asm/pgtable.h
+ *
+ *  Copyright (C) 1995-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_PGTABLE_H
+#define _ASMARM_PGTABLE_H
+
+#include <asm-generic/4level-fixup.h>
+#include <asm/proc-fns.h>
+
+#ifndef CONFIG_MMU
+
+#include "pgtable-nommu.h"
+
+#else
+
+#include <asm/memory.h>
+#include <asm/arch/vmalloc.h>
+#include <asm/pgtable-hwdef.h>
+
+/*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts.  That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ *
+ * Note that platforms may override VMALLOC_START, but they must provide
+ * VMALLOC_END.  VMALLOC_END defines the (exclusive) limit of this space,
+ * which may not overlap IO space.
+ */
+#ifndef VMALLOC_START
+#define VMALLOC_OFFSET		(8*1024*1024)
+#define VMALLOC_START		(((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
+#endif
+
+/*
+ * Hardware-wise, we have a two level page table structure, where the first
+ * level has 4096 entries, and the second level has 256 entries.  Each entry
+ * is one 32-bit word.  Most of the bits in the second level entry are used
+ * by hardware, and there aren't any "accessed" and "dirty" bits.
+ *
+ * Linux on the other hand has a three level page table structure, which can
+ * be wrapped to fit a two level page table structure easily - using the PGD
+ * and PTE only.  However, Linux also expects one "PTE" table per page, and
+ * at least a "dirty" bit.
+ *
+ * Therefore, we tweak the implementation slightly - we tell Linux that we
+ * have 2048 entries in the first level, each of which is 8 bytes (iow, two
+ * hardware pointers to the second level.)  The second level contains two
+ * hardware PTE tables arranged contiguously, followed by Linux versions
+ * which contain the state information Linux needs.  We, therefore, end up
+ * with 512 entries in the "PTE" level.
+ *
+ * This leads to the page tables having the following layout:
+ *
+ *    pgd             pte
+ * |        |
+ * +--------+ +0
+ * |        |-----> +------------+ +0
+ * +- - - - + +4    |  h/w pt 0  |
+ * |        |-----> +------------+ +1024
+ * +--------+ +8    |  h/w pt 1  |
+ * |        |       +------------+ +2048
+ * +- - - - +       | Linux pt 0 |
+ * |        |       +------------+ +3072
+ * +--------+       | Linux pt 1 |
+ * |        |       +------------+ +4096
+ *
+ * See L_PTE_xxx below for definitions of bits in the "Linux pt", and
+ * PTE_xxx for definitions of bits appearing in the "h/w pt".
+ *
+ * PMD_xxx definitions refer to bits in the first level page table.
+ *
+ * The "dirty" bit is emulated by only granting hardware write permission
+ * iff the page is marked "writable" and "dirty" in the Linux PTE.  This
+ * means that a write to a clean page will cause a permission fault, and
+ * the Linux MM layer will mark the page dirty via handle_pte_fault().
+ * For the hardware to notice the permission change, the TLB entry must
+ * be flushed, and ptep_set_access_flags() does that for us.
+ *
+ * The "accessed" or "young" bit is emulated by a similar method; we only
+ * allow accesses to the page if the "young" bit is set.  Accesses to the
+ * page will cause a fault, and handle_pte_fault() will set the young bit
+ * for us as long as the page is marked present in the corresponding Linux
+ * PTE entry.  Again, ptep_set_access_flags() will ensure that the TLB is
+ * up to date.
+ *
+ * However, when the "young" bit is cleared, we deny access to the page
+ * by clearing the hardware PTE.  Currently Linux does not flush the TLB
+ * for us in this case, which means the TLB will retain the transation
+ * until either the TLB entry is evicted under pressure, or a context
+ * switch which changes the user space mapping occurs.
+ */
+#define PTRS_PER_PTE		512
+#define PTRS_PER_PMD		1
+#define PTRS_PER_PGD		2048
+
+/*
+ * PMD_SHIFT determines the size of the area a second-level page table can map
+ * PGDIR_SHIFT determines what a third-level page table entry can map
+ */
+#define PMD_SHIFT		21
+#define PGDIR_SHIFT		21
+
+#define LIBRARY_TEXT_START	0x0c000000
+
+#ifndef __ASSEMBLY__
+extern void __pte_error(const char *file, int line, unsigned long val);
+extern void __pmd_error(const char *file, int line, unsigned long val);
+extern void __pgd_error(const char *file, int line, unsigned long val);
+
+#define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
+#define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd_val(pmd))
+#define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
+#endif /* !__ASSEMBLY__ */
+
+#define PMD_SIZE		(1UL << PMD_SHIFT)
+#define PMD_MASK		(~(PMD_SIZE-1))
+#define PGDIR_SIZE		(1UL << PGDIR_SHIFT)
+#define PGDIR_MASK		(~(PGDIR_SIZE-1))
+
+/*
+ * This is the lowest virtual address we can permit any user space
+ * mapping to be mapped at.  This is particularly important for
+ * non-high vector CPUs.
+ */
+#define FIRST_USER_ADDRESS	PAGE_SIZE
+
+#define FIRST_USER_PGD_NR	1
+#define USER_PTRS_PER_PGD	((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
+
+/*
+ * section address mask and size definitions.
+ */
+#define SECTION_SHIFT		20
+#define SECTION_SIZE		(1UL << SECTION_SHIFT)
+#define SECTION_MASK		(~(SECTION_SIZE-1))
+
+/*
+ * ARMv6 supersection address mask and size definitions.
+ */
+#define SUPERSECTION_SHIFT	24
+#define SUPERSECTION_SIZE	(1UL << SUPERSECTION_SHIFT)
+#define SUPERSECTION_MASK	(~(SUPERSECTION_SIZE-1))
+
+/*
+ * "Linux" PTE definitions.
+ *
+ * We keep two sets of PTEs - the hardware and the linux version.
+ * This allows greater flexibility in the way we map the Linux bits
+ * onto the hardware tables, and allows us to have YOUNG and DIRTY
+ * bits.
+ *
+ * The PTE table pointer refers to the hardware entries; the "Linux"
+ * entries are stored 1024 bytes below.
+ */
+#define L_PTE_PRESENT		(1 << 0)
+#define L_PTE_FILE		(1 << 1)	/* only when !PRESENT */
+#define L_PTE_YOUNG		(1 << 1)
+#define L_PTE_BUFFERABLE	(1 << 2)	/* matches PTE */
+#define L_PTE_CACHEABLE		(1 << 3)	/* matches PTE */
+#define L_PTE_USER		(1 << 4)
+#define L_PTE_WRITE		(1 << 5)
+#define L_PTE_EXEC		(1 << 6)
+#define L_PTE_DIRTY		(1 << 7)
+#define L_PTE_SHARED		(1 << 10)	/* shared(v6), coherent(xsc3) */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * The pgprot_* and protection_map entries will be fixed up in runtime
+ * to include the cachable and bufferable bits based on memory policy,
+ * as well as any architecture dependent bits like global/ASID and SMP
+ * shared mapping bits.
+ */
+#define _L_PTE_DEFAULT	L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_CACHEABLE | L_PTE_BUFFERABLE
+#define _L_PTE_READ	L_PTE_USER | L_PTE_EXEC
+
+extern pgprot_t		pgprot_user;
+extern pgprot_t		pgprot_kernel;
+
+#define PAGE_NONE	pgprot_user
+#define PAGE_COPY	__pgprot(pgprot_val(pgprot_user) | _L_PTE_READ)
+#define PAGE_SHARED	__pgprot(pgprot_val(pgprot_user) | _L_PTE_READ | \
+				 L_PTE_WRITE)
+#define PAGE_READONLY	__pgprot(pgprot_val(pgprot_user) | _L_PTE_READ)
+#define PAGE_KERNEL	pgprot_kernel
+
+#define __PAGE_NONE	__pgprot(_L_PTE_DEFAULT)
+#define __PAGE_COPY	__pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
+#define __PAGE_SHARED	__pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE)
+#define __PAGE_READONLY	__pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * The table below defines the page protection levels that we insert into our
+ * Linux page table version.  These get translated into the best that the
+ * architecture can perform.  Note that on most ARM hardware:
+ *  1) We cannot do execute protection
+ *  2) If we could do execute protection, then read is implied
+ *  3) write implies read permissions
+ */
+#define __P000  __PAGE_NONE
+#define __P001  __PAGE_READONLY
+#define __P010  __PAGE_COPY
+#define __P011  __PAGE_COPY
+#define __P100  __PAGE_READONLY
+#define __P101  __PAGE_READONLY
+#define __P110  __PAGE_COPY
+#define __P111  __PAGE_COPY
+
+#define __S000  __PAGE_NONE
+#define __S001  __PAGE_READONLY
+#define __S010  __PAGE_SHARED
+#define __S011  __PAGE_SHARED
+#define __S100  __PAGE_READONLY
+#define __S101  __PAGE_READONLY
+#define __S110  __PAGE_SHARED
+#define __S111  __PAGE_SHARED
+
+#ifndef __ASSEMBLY__
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern struct page *empty_zero_page;
+#define ZERO_PAGE(vaddr)	(empty_zero_page)
+
+#define pte_pfn(pte)		(pte_val(pte) >> PAGE_SHIFT)
+#define pfn_pte(pfn,prot)	(__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+
+#define pte_none(pte)		(!pte_val(pte))
+#define pte_clear(mm,addr,ptep)	set_pte_ext(ptep, __pte(0), 0)
+#define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
+#define pte_offset_kernel(dir,addr)	(pmd_page_vaddr(*(dir)) + __pte_index(addr))
+#define pte_offset_map(dir,addr)	(pmd_page_vaddr(*(dir)) + __pte_index(addr))
+#define pte_offset_map_nested(dir,addr)	(pmd_page_vaddr(*(dir)) + __pte_index(addr))
+#define pte_unmap(pte)		do { } while (0)
+#define pte_unmap_nested(pte)	do { } while (0)
+
+#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
+
+#define set_pte_at(mm,addr,ptep,pteval) do { \
+	set_pte_ext(ptep, pteval, (addr) >= TASK_SIZE ? 0 : PTE_EXT_NG); \
+ } while (0)
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+#define pte_present(pte)	(pte_val(pte) & L_PTE_PRESENT)
+#define pte_write(pte)		(pte_val(pte) & L_PTE_WRITE)
+#define pte_dirty(pte)		(pte_val(pte) & L_PTE_DIRTY)
+#define pte_young(pte)		(pte_val(pte) & L_PTE_YOUNG)
+#define pte_special(pte)	(0)
+
+/*
+ * The following only works if pte_present() is not true.
+ */
+#define pte_file(pte)		(pte_val(pte) & L_PTE_FILE)
+#define pte_to_pgoff(x)		(pte_val(x) >> 2)
+#define pgoff_to_pte(x)		__pte(((x) << 2) | L_PTE_FILE)
+
+#define PTE_FILE_MAX_BITS	30
+
+#define PTE_BIT_FUNC(fn,op) \
+static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+
+PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE);
+PTE_BIT_FUNC(mkwrite,   |= L_PTE_WRITE);
+PTE_BIT_FUNC(mkclean,   &= ~L_PTE_DIRTY);
+PTE_BIT_FUNC(mkdirty,   |= L_PTE_DIRTY);
+PTE_BIT_FUNC(mkold,     &= ~L_PTE_YOUNG);
+PTE_BIT_FUNC(mkyoung,   |= L_PTE_YOUNG);
+
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+/*
+ * Mark the prot value as uncacheable and unbufferable.
+ */
+#define pgprot_noncached(prot)	__pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE))
+#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE)
+
+#define pmd_none(pmd)		(!pmd_val(pmd))
+#define pmd_present(pmd)	(pmd_val(pmd))
+#define pmd_bad(pmd)		(pmd_val(pmd) & 2)
+
+#define copy_pmd(pmdpd,pmdps)		\
+	do {				\
+		pmdpd[0] = pmdps[0];	\
+		pmdpd[1] = pmdps[1];	\
+		flush_pmd_entry(pmdpd);	\
+	} while (0)
+
+#define pmd_clear(pmdp)			\
+	do {				\
+		pmdp[0] = __pmd(0);	\
+		pmdp[1] = __pmd(0);	\
+		clean_pmd_entry(pmdp);	\
+	} while (0)
+
+static inline pte_t *pmd_page_vaddr(pmd_t pmd)
+{
+	unsigned long ptr;
+
+	ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1);
+	ptr += PTRS_PER_PTE * sizeof(void *);
+
+	return __va(ptr);
+}
+
+#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
+
+/*
+ * Permanent address of a page. We never have highmem, so this is trivial.
+ */
+#define pages_to_mb(x)		((x) >> (20 - PAGE_SHIFT))
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pgd is never bad, and a pmd always exists (as it's folded
+ * into the pgd entry)
+ */
+#define pgd_none(pgd)		(0)
+#define pgd_bad(pgd)		(0)
+#define pgd_present(pgd)	(1)
+#define pgd_clear(pgdp)		do { } while (0)
+#define set_pgd(pgd,pgdp)	do { } while (0)
+
+/* to find an entry in a page-table-directory */
+#define pgd_index(addr)		((addr) >> PGDIR_SHIFT)
+
+#define pgd_offset(mm, addr)	((mm)->pgd+pgd_index(addr))
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
+
+/* Find an entry in the second-level page table.. */
+#define pmd_offset(dir, addr)	((pmd_t *)(dir))
+
+/* Find an entry in the third-level page table.. */
+#define __pte_index(addr)	(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+	const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER;
+	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
+	return pte;
+}
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+/* Encode and decode a swap entry.
+ *
+ * We support up to 32GB of swap on 4k machines
+ */
+#define __swp_type(x)		(((x).val >> 2) & 0x7f)
+#define __swp_offset(x)		((x).val >> 9)
+#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) })
+#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
+
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+/* FIXME: this is not correct */
+#define kern_addr_valid(addr)	(1)
+
+#include <asm-generic/pgtable.h>
+
+/*
+ * We provide our own arch_get_unmapped_area to cope with VIPT caches.
+ */
+#define HAVE_ARCH_UNMAPPED_AREA
+
+/*
+ * remap a physical page `pfn' of size `size' with page protection `prot'
+ * into virtual address `from'
+ */
+#define io_remap_pfn_range(vma,from,pfn,size,prot) \
+		remap_pfn_range(vma, from, pfn, size, prot)
+
+#define pgtable_cache_init() do { } while (0)
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_MMU */
+
+#endif /* _ASMARM_PGTABLE_H */
diff --git a/arch/arm/include/asm/poll.h b/arch/arm/include/asm/poll.h
new file mode 100644
index 0000000..c98509d
--- /dev/null
+++ b/arch/arm/include/asm/poll.h
@@ -0,0 +1 @@
+#include <asm-generic/poll.h>
diff --git a/arch/arm/include/asm/posix_types.h b/arch/arm/include/asm/posix_types.h
new file mode 100644
index 0000000..2446d23
--- /dev/null
+++ b/arch/arm/include/asm/posix_types.h
@@ -0,0 +1,77 @@
+/*
+ *  arch/arm/include/asm/posix_types.h
+ *
+ *  Copyright (C) 1996-1998 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Changelog:
+ *   27-06-1996	RMK	Created
+ */
+#ifndef __ARCH_ARM_POSIX_TYPES_H
+#define __ARCH_ARM_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc.  Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned long		__kernel_ino_t;
+typedef unsigned short		__kernel_mode_t;
+typedef unsigned short		__kernel_nlink_t;
+typedef long			__kernel_off_t;
+typedef int			__kernel_pid_t;
+typedef unsigned short		__kernel_ipc_pid_t;
+typedef unsigned short		__kernel_uid_t;
+typedef unsigned short		__kernel_gid_t;
+typedef unsigned int		__kernel_size_t;
+typedef int			__kernel_ssize_t;
+typedef int			__kernel_ptrdiff_t;
+typedef long			__kernel_time_t;
+typedef long			__kernel_suseconds_t;
+typedef long			__kernel_clock_t;
+typedef int			__kernel_timer_t;
+typedef int			__kernel_clockid_t;
+typedef int			__kernel_daddr_t;
+typedef char *			__kernel_caddr_t;
+typedef unsigned short		__kernel_uid16_t;
+typedef unsigned short		__kernel_gid16_t;
+typedef unsigned int		__kernel_uid32_t;
+typedef unsigned int		__kernel_gid32_t;
+
+typedef unsigned short		__kernel_old_uid_t;
+typedef unsigned short		__kernel_old_gid_t;
+typedef unsigned short		__kernel_old_dev_t;
+
+#ifdef __GNUC__
+typedef long long		__kernel_loff_t;
+#endif
+
+typedef struct {
+	int	val[2];
+} __kernel_fsid_t;
+
+#if defined(__KERNEL__)
+
+#undef	__FD_SET
+#define __FD_SET(fd, fdsetp) \
+		(((fd_set *)(fdsetp))->fds_bits[(fd) >> 5] |= (1<<((fd) & 31)))
+
+#undef	__FD_CLR
+#define __FD_CLR(fd, fdsetp) \
+		(((fd_set *)(fdsetp))->fds_bits[(fd) >> 5] &= ~(1<<((fd) & 31)))
+
+#undef	__FD_ISSET
+#define __FD_ISSET(fd, fdsetp) \
+		((((fd_set *)(fdsetp))->fds_bits[(fd) >> 5] & (1<<((fd) & 31))) != 0)
+
+#undef	__FD_ZERO
+#define __FD_ZERO(fdsetp) \
+		(memset (fdsetp, 0, sizeof (*(fd_set *)(fdsetp))))
+
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
new file mode 100644
index 0000000..db80203
--- /dev/null
+++ b/arch/arm/include/asm/proc-fns.h
@@ -0,0 +1,241 @@
+/*
+ *  arch/arm/include/asm/proc-fns.h
+ *
+ *  Copyright (C) 1997-1999 Russell King
+ *  Copyright (C) 2000 Deep Blue Solutions Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_PROCFNS_H
+#define __ASM_PROCFNS_H
+
+#ifdef __KERNEL__
+
+
+/*
+ * Work out if we need multiple CPU support
+ */
+#undef MULTI_CPU
+#undef CPU_NAME
+
+/*
+ * CPU_NAME - the prefix for CPU related functions
+ */
+
+#ifdef CONFIG_CPU_32
+# ifdef CONFIG_CPU_ARM610
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm6
+#  endif
+# endif
+# ifdef CONFIG_CPU_ARM7TDMI
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm7tdmi
+#  endif
+# endif
+# ifdef CONFIG_CPU_ARM710
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm7
+#  endif
+# endif
+# ifdef CONFIG_CPU_ARM720T
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm720
+#  endif
+# endif
+# ifdef CONFIG_CPU_ARM740T
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm740
+#  endif
+# endif
+# ifdef CONFIG_CPU_ARM9TDMI
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm9tdmi
+#  endif
+# endif
+# ifdef CONFIG_CPU_ARM920T
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm920
+#  endif
+# endif
+# ifdef CONFIG_CPU_ARM922T
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm922
+#  endif
+# endif
+# ifdef CONFIG_CPU_ARM925T
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm925
+#  endif
+# endif
+# ifdef CONFIG_CPU_ARM926T
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm926
+#  endif
+# endif
+# ifdef CONFIG_CPU_ARM940T
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm940
+#  endif
+# endif
+# ifdef CONFIG_CPU_ARM946E
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm946
+#  endif
+# endif
+# ifdef CONFIG_CPU_SA110
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_sa110
+#  endif
+# endif
+# ifdef CONFIG_CPU_SA1100
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_sa1100
+#  endif
+# endif
+# ifdef CONFIG_CPU_ARM1020
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm1020
+#  endif
+# endif
+# ifdef CONFIG_CPU_ARM1020E
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm1020e
+#  endif
+# endif
+# ifdef CONFIG_CPU_ARM1022
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm1022
+#  endif
+# endif
+# ifdef CONFIG_CPU_ARM1026
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm1026
+#  endif
+# endif
+# ifdef CONFIG_CPU_XSCALE
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_xscale
+#  endif
+# endif
+# ifdef CONFIG_CPU_XSC3
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_xsc3
+#  endif
+# endif
+# ifdef CONFIG_CPU_FEROCEON
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_feroceon
+#  endif
+# endif
+# ifdef CONFIG_CPU_V6
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_v6
+#  endif
+# endif
+# ifdef CONFIG_CPU_V7
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_v7
+#  endif
+# endif
+#endif
+
+#ifndef __ASSEMBLY__
+
+#ifndef MULTI_CPU
+#include <asm/cpu-single.h>
+#else
+#include <asm/cpu-multi32.h>
+#endif
+
+#include <asm/memory.h>
+
+#ifdef CONFIG_MMU
+
+#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
+
+#define cpu_get_pgd()	\
+	({						\
+		unsigned long pg;			\
+		__asm__("mrc	p15, 0, %0, c2, c0, 0"	\
+			 : "=r" (pg) : : "cc");		\
+		pg &= ~0x3fff;				\
+		(pgd_t *)phys_to_virt(pg);		\
+	})
+
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* __ASM_PROCFNS_H */
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
new file mode 100644
index 0000000..b01d5e7
--- /dev/null
+++ b/arch/arm/include/asm/processor.h
@@ -0,0 +1,131 @@
+/*
+ *  arch/arm/include/asm/processor.h
+ *
+ *  Copyright (C) 1995-1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARM_PROCESSOR_H
+#define __ASM_ARM_PROCESSOR_H
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l;})
+
+#ifdef __KERNEL__
+
+#include <asm/ptrace.h>
+#include <asm/types.h>
+
+#ifdef __KERNEL__
+#define STACK_TOP	((current->personality == PER_LINUX_32BIT) ? \
+			 TASK_SIZE : TASK_SIZE_26)
+#define STACK_TOP_MAX	TASK_SIZE
+#endif
+
+union debug_insn {
+	u32	arm;
+	u16	thumb;
+};
+
+struct debug_entry {
+	u32			address;
+	union debug_insn	insn;
+};
+
+struct debug_info {
+	int			nsaved;
+	struct debug_entry	bp[2];
+};
+
+struct thread_struct {
+							/* fault info	  */
+	unsigned long		address;
+	unsigned long		trap_no;
+	unsigned long		error_code;
+							/* debugging	  */
+	struct debug_info	debug;
+};
+
+#define INIT_THREAD  {	}
+
+#ifdef CONFIG_MMU
+#define nommu_start_thread(regs) do { } while (0)
+#else
+#define nommu_start_thread(regs) regs->ARM_r10 = current->mm->start_data
+#endif
+
+#define start_thread(regs,pc,sp)					\
+({									\
+	unsigned long *stack = (unsigned long *)sp;			\
+	set_fs(USER_DS);						\
+	memzero(regs->uregs, sizeof(regs->uregs));			\
+	if (current->personality & ADDR_LIMIT_32BIT)			\
+		regs->ARM_cpsr = USR_MODE;				\
+	else								\
+		regs->ARM_cpsr = USR26_MODE;				\
+	if (elf_hwcap & HWCAP_THUMB && pc & 1)				\
+		regs->ARM_cpsr |= PSR_T_BIT;				\
+	regs->ARM_pc = pc & ~1;		/* pc */			\
+	regs->ARM_sp = sp;		/* sp */			\
+	regs->ARM_r2 = stack[2];	/* r2 (envp) */			\
+	regs->ARM_r1 = stack[1];	/* r1 (argv) */			\
+	regs->ARM_r0 = stack[0];	/* r0 (argc) */			\
+	nommu_start_thread(regs);					\
+})
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk)	do { } while (0)
+
+unsigned long get_wchan(struct task_struct *p);
+
+#define cpu_relax()			barrier()
+
+/*
+ * Create a new kernel thread
+ */
+extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+
+#define task_pt_regs(p) \
+	((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
+
+#define KSTK_EIP(tsk)	task_pt_regs(tsk)->ARM_pc
+#define KSTK_ESP(tsk)	task_pt_regs(tsk)->ARM_sp
+
+/*
+ * Prefetching support - only ARMv5.
+ */
+#if __LINUX_ARM_ARCH__ >= 5
+
+#define ARCH_HAS_PREFETCH
+static inline void prefetch(const void *ptr)
+{
+	__asm__ __volatile__(
+		"pld\t%0"
+		:
+		: "o" (*(char *)ptr)
+		: "cc");
+}
+
+#define ARCH_HAS_PREFETCHW
+#define prefetchw(ptr)	prefetch(ptr)
+
+#define ARCH_HAS_SPINLOCK_PREFETCH
+#define spin_lock_prefetch(x) do { } while (0)
+
+#endif
+
+#endif
+
+#endif /* __ASM_ARM_PROCESSOR_H */
diff --git a/arch/arm/include/asm/procinfo.h b/arch/arm/include/asm/procinfo.h
new file mode 100644
index 0000000..ca52e58
--- /dev/null
+++ b/arch/arm/include/asm/procinfo.h
@@ -0,0 +1,49 @@
+/*
+ *  arch/arm/include/asm/procinfo.h
+ *
+ *  Copyright (C) 1996-1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_PROCINFO_H
+#define __ASM_PROCINFO_H
+
+#ifdef __KERNEL__
+
+struct cpu_tlb_fns;
+struct cpu_user_fns;
+struct cpu_cache_fns;
+struct processor;
+
+/*
+ * Note!  struct processor is always defined if we're
+ * using MULTI_CPU, otherwise this entry is unused,
+ * but still exists.
+ *
+ * NOTE! The following structure is defined by assembly
+ * language, NOT C code.  For more information, check:
+ *  arch/arm/mm/proc-*.S and arch/arm/kernel/head.S
+ */
+struct proc_info_list {
+	unsigned int		cpu_val;
+	unsigned int		cpu_mask;
+	unsigned long		__cpu_mm_mmu_flags;	/* used by head.S */
+	unsigned long		__cpu_io_mmu_flags;	/* used by head.S */
+	unsigned long		__cpu_flush;		/* used by head.S */
+	const char		*arch_name;
+	const char		*elf_name;
+	unsigned int		elf_hwcap;
+	const char		*cpu_name;
+	struct processor	*proc;
+	struct cpu_tlb_fns	*tlb;
+	struct cpu_user_fns	*user;
+	struct cpu_cache_fns	*cache;
+};
+
+#else	/* __KERNEL__ */
+#include <asm/elf.h>
+#warning "Please include asm/elf.h instead"
+#endif	/* __KERNEL__ */
+#endif
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
new file mode 100644
index 0000000..b415c0e
--- /dev/null
+++ b/arch/arm/include/asm/ptrace.h
@@ -0,0 +1,162 @@
+/*
+ *  arch/arm/include/asm/ptrace.h
+ *
+ *  Copyright (C) 1996-2003 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARM_PTRACE_H
+#define __ASM_ARM_PTRACE_H
+
+#include <asm/hwcap.h>
+
+#define PTRACE_GETREGS		12
+#define PTRACE_SETREGS		13
+#define PTRACE_GETFPREGS	14
+#define PTRACE_SETFPREGS	15
+/* PTRACE_ATTACH is 16 */
+/* PTRACE_DETACH is 17 */
+#define PTRACE_GETWMMXREGS	18
+#define PTRACE_SETWMMXREGS	19
+/* 20 is unused */
+#define PTRACE_OLDSETOPTIONS	21
+#define PTRACE_GET_THREAD_AREA	22
+#define PTRACE_SET_SYSCALL	23
+/* PTRACE_SYSCALL is 24 */
+#define PTRACE_GETCRUNCHREGS	25
+#define PTRACE_SETCRUNCHREGS	26
+
+/*
+ * PSR bits
+ */
+#define USR26_MODE	0x00000000
+#define FIQ26_MODE	0x00000001
+#define IRQ26_MODE	0x00000002
+#define SVC26_MODE	0x00000003
+#define USR_MODE	0x00000010
+#define FIQ_MODE	0x00000011
+#define IRQ_MODE	0x00000012
+#define SVC_MODE	0x00000013
+#define ABT_MODE	0x00000017
+#define UND_MODE	0x0000001b
+#define SYSTEM_MODE	0x0000001f
+#define MODE32_BIT	0x00000010
+#define MODE_MASK	0x0000001f
+#define PSR_T_BIT	0x00000020
+#define PSR_F_BIT	0x00000040
+#define PSR_I_BIT	0x00000080
+#define PSR_A_BIT	0x00000100
+#define PSR_J_BIT	0x01000000
+#define PSR_Q_BIT	0x08000000
+#define PSR_V_BIT	0x10000000
+#define PSR_C_BIT	0x20000000
+#define PSR_Z_BIT	0x40000000
+#define PSR_N_BIT	0x80000000
+#define PCMASK		0
+
+/*
+ * Groups of PSR bits
+ */
+#define PSR_f		0xff000000	/* Flags		*/
+#define PSR_s		0x00ff0000	/* Status		*/
+#define PSR_x		0x0000ff00	/* Extension		*/
+#define PSR_c		0x000000ff	/* Control		*/
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This struct defines the way the registers are stored on the
+ * stack during a system call.  Note that sizeof(struct pt_regs)
+ * has to be a multiple of 8.
+ */
+struct pt_regs {
+	long uregs[18];
+};
+
+#define ARM_cpsr	uregs[16]
+#define ARM_pc		uregs[15]
+#define ARM_lr		uregs[14]
+#define ARM_sp		uregs[13]
+#define ARM_ip		uregs[12]
+#define ARM_fp		uregs[11]
+#define ARM_r10		uregs[10]
+#define ARM_r9		uregs[9]
+#define ARM_r8		uregs[8]
+#define ARM_r7		uregs[7]
+#define ARM_r6		uregs[6]
+#define ARM_r5		uregs[5]
+#define ARM_r4		uregs[4]
+#define ARM_r3		uregs[3]
+#define ARM_r2		uregs[2]
+#define ARM_r1		uregs[1]
+#define ARM_r0		uregs[0]
+#define ARM_ORIG_r0	uregs[17]
+
+#ifdef __KERNEL__
+
+#define user_mode(regs)	\
+	(((regs)->ARM_cpsr & 0xf) == 0)
+
+#ifdef CONFIG_ARM_THUMB
+#define thumb_mode(regs) \
+	(((regs)->ARM_cpsr & PSR_T_BIT))
+#else
+#define thumb_mode(regs) (0)
+#endif
+
+#define isa_mode(regs) \
+	((((regs)->ARM_cpsr & PSR_J_BIT) >> 23) | \
+	 (((regs)->ARM_cpsr & PSR_T_BIT) >> 5))
+
+#define processor_mode(regs) \
+	((regs)->ARM_cpsr & MODE_MASK)
+
+#define interrupts_enabled(regs) \
+	(!((regs)->ARM_cpsr & PSR_I_BIT))
+
+#define fast_interrupts_enabled(regs) \
+	(!((regs)->ARM_cpsr & PSR_F_BIT))
+
+/* Are the current registers suitable for user mode?
+ * (used to maintain security in signal handlers)
+ */
+static inline int valid_user_regs(struct pt_regs *regs)
+{
+	if (user_mode(regs) && (regs->ARM_cpsr & PSR_I_BIT) == 0) {
+		regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
+		return 1;
+	}
+
+	/*
+	 * Force CPSR to something logical...
+	 */
+	regs->ARM_cpsr &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | PSR_T_BIT | MODE32_BIT;
+	if (!(elf_hwcap & HWCAP_26BIT))
+		regs->ARM_cpsr |= USR_MODE;
+
+	return 0;
+}
+
+#define pc_pointer(v) \
+	((v) & ~PCMASK)
+
+#define instruction_pointer(regs) \
+	(pc_pointer((regs)->ARM_pc))
+
+#ifdef CONFIG_SMP
+extern unsigned long profile_pc(struct pt_regs *regs);
+#else
+#define profile_pc(regs) instruction_pointer(regs)
+#endif
+
+#define predicate(x)		((x) & 0xf0000000)
+#define PREDICATE_ALWAYS	0xe0000000
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+#endif
+
diff --git a/arch/arm/include/asm/resource.h b/arch/arm/include/asm/resource.h
new file mode 100644
index 0000000..734b581
--- /dev/null
+++ b/arch/arm/include/asm/resource.h
@@ -0,0 +1,6 @@
+#ifndef _ARM_RESOURCE_H
+#define _ARM_RESOURCE_H
+
+#include <asm-generic/resource.h>
+
+#endif
diff --git a/arch/arm/include/asm/scatterlist.h b/arch/arm/include/asm/scatterlist.h
new file mode 100644
index 0000000..ca0a37d
--- /dev/null
+++ b/arch/arm/include/asm/scatterlist.h
@@ -0,0 +1,27 @@
+#ifndef _ASMARM_SCATTERLIST_H
+#define _ASMARM_SCATTERLIST_H
+
+#include <asm/memory.h>
+#include <asm/types.h>
+
+struct scatterlist {
+#ifdef CONFIG_DEBUG_SG
+	unsigned long	sg_magic;
+#endif
+	unsigned long	page_link;
+	unsigned int	offset;		/* buffer offset		 */
+	dma_addr_t	dma_address;	/* dma address			 */
+	unsigned int	length;		/* length			 */
+};
+
+/*
+ * These macros should be used after a pci_map_sg call has been done
+ * to get bus addresses of each of the SG entries and their lengths.
+ * You should only work with the number of sg entries pci_map_sg
+ * returns, or alternatively stop on the first sg_dma_len(sg) which
+ * is 0.
+ */
+#define sg_dma_address(sg)      ((sg)->dma_address)
+#define sg_dma_len(sg)          ((sg)->length)
+
+#endif /* _ASMARM_SCATTERLIST_H */
diff --git a/arch/arm/include/asm/sections.h b/arch/arm/include/asm/sections.h
new file mode 100644
index 0000000..2b8c516
--- /dev/null
+++ b/arch/arm/include/asm/sections.h
@@ -0,0 +1 @@
+#include <asm-generic/sections.h>
diff --git a/arch/arm/include/asm/segment.h b/arch/arm/include/asm/segment.h
new file mode 100644
index 0000000..9e24c21
--- /dev/null
+++ b/arch/arm/include/asm/segment.h
@@ -0,0 +1,11 @@
+#ifndef __ASM_ARM_SEGMENT_H
+#define __ASM_ARM_SEGMENT_H
+
+#define __KERNEL_CS   0x0
+#define __KERNEL_DS   0x0
+
+#define __USER_CS     0x1
+#define __USER_DS     0x1
+
+#endif /* __ASM_ARM_SEGMENT_H */
+
diff --git a/arch/arm/include/asm/sembuf.h b/arch/arm/include/asm/sembuf.h
new file mode 100644
index 0000000..1c02839
--- /dev/null
+++ b/arch/arm/include/asm/sembuf.h
@@ -0,0 +1,25 @@
+#ifndef _ASMARM_SEMBUF_H
+#define _ASMARM_SEMBUF_H
+
+/* 
+ * The semid64_ds structure for arm architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct semid64_ds {
+	struct ipc64_perm sem_perm;		/* permissions .. see ipc.h */
+	__kernel_time_t	sem_otime;		/* last semop time */
+	unsigned long	__unused1;
+	__kernel_time_t	sem_ctime;		/* last change time */
+	unsigned long	__unused2;
+	unsigned long	sem_nsems;		/* no. of semaphores in array */
+	unsigned long	__unused3;
+	unsigned long	__unused4;
+};
+
+#endif /* _ASMARM_SEMBUF_H */
diff --git a/arch/arm/include/asm/serial.h b/arch/arm/include/asm/serial.h
new file mode 100644
index 0000000..ebb0490
--- /dev/null
+++ b/arch/arm/include/asm/serial.h
@@ -0,0 +1,19 @@
+/*
+ *  arch/arm/include/asm/serial.h
+ *
+ *  Copyright (C) 1996 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Changelog:
+ *   15-10-1996	RMK	Created
+ */
+
+#ifndef __ASM_SERIAL_H
+#define __ASM_SERIAL_H
+
+#define BASE_BAUD	(1843200 / 16)
+
+#endif
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
new file mode 100644
index 0000000..7bbf105
--- /dev/null
+++ b/arch/arm/include/asm/setup.h
@@ -0,0 +1,226 @@
+/*
+ *  linux/include/asm/setup.h
+ *
+ *  Copyright (C) 1997-1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Structure passed to kernel to tell it about the
+ *  hardware it's running on.  See Documentation/arm/Setup
+ *  for more info.
+ */
+#ifndef __ASMARM_SETUP_H
+#define __ASMARM_SETUP_H
+
+#include <asm/types.h>
+
+#define COMMAND_LINE_SIZE 1024
+
+/* The list ends with an ATAG_NONE node. */
+#define ATAG_NONE	0x00000000
+
+struct tag_header {
+	__u32 size;
+	__u32 tag;
+};
+
+/* The list must start with an ATAG_CORE node */
+#define ATAG_CORE	0x54410001
+
+struct tag_core {
+	__u32 flags;		/* bit 0 = read-only */
+	__u32 pagesize;
+	__u32 rootdev;
+};
+
+/* it is allowed to have multiple ATAG_MEM nodes */
+#define ATAG_MEM	0x54410002
+
+struct tag_mem32 {
+	__u32	size;
+	__u32	start;	/* physical start address */
+};
+
+/* VGA text type displays */
+#define ATAG_VIDEOTEXT	0x54410003
+
+struct tag_videotext {
+	__u8		x;
+	__u8		y;
+	__u16		video_page;
+	__u8		video_mode;
+	__u8		video_cols;
+	__u16		video_ega_bx;
+	__u8		video_lines;
+	__u8		video_isvga;
+	__u16		video_points;
+};
+
+/* describes how the ramdisk will be used in kernel */
+#define ATAG_RAMDISK	0x54410004
+
+struct tag_ramdisk {
+	__u32 flags;	/* bit 0 = load, bit 1 = prompt */
+	__u32 size;	/* decompressed ramdisk size in _kilo_ bytes */
+	__u32 start;	/* starting block of floppy-based RAM disk image */
+};
+
+/* describes where the compressed ramdisk image lives (virtual address) */
+/*
+ * this one accidentally used virtual addresses - as such,
+ * it's deprecated.
+ */
+#define ATAG_INITRD	0x54410005
+
+/* describes where the compressed ramdisk image lives (physical address) */
+#define ATAG_INITRD2	0x54420005
+
+struct tag_initrd {
+	__u32 start;	/* physical start address */
+	__u32 size;	/* size of compressed ramdisk image in bytes */
+};
+
+/* board serial number. "64 bits should be enough for everybody" */
+#define ATAG_SERIAL	0x54410006
+
+struct tag_serialnr {
+	__u32 low;
+	__u32 high;
+};
+
+/* board revision */
+#define ATAG_REVISION	0x54410007
+
+struct tag_revision {
+	__u32 rev;
+};
+
+/* initial values for vesafb-type framebuffers. see struct screen_info
+ * in include/linux/tty.h
+ */
+#define ATAG_VIDEOLFB	0x54410008
+
+struct tag_videolfb {
+	__u16		lfb_width;
+	__u16		lfb_height;
+	__u16		lfb_depth;
+	__u16		lfb_linelength;
+	__u32		lfb_base;
+	__u32		lfb_size;
+	__u8		red_size;
+	__u8		red_pos;
+	__u8		green_size;
+	__u8		green_pos;
+	__u8		blue_size;
+	__u8		blue_pos;
+	__u8		rsvd_size;
+	__u8		rsvd_pos;
+};
+
+/* command line: \0 terminated string */
+#define ATAG_CMDLINE	0x54410009
+
+struct tag_cmdline {
+	char	cmdline[1];	/* this is the minimum size */
+};
+
+/* acorn RiscPC specific information */
+#define ATAG_ACORN	0x41000101
+
+struct tag_acorn {
+	__u32 memc_control_reg;
+	__u32 vram_pages;
+	__u8 sounddefault;
+	__u8 adfsdrives;
+};
+
+/* footbridge memory clock, see arch/arm/mach-footbridge/arch.c */
+#define ATAG_MEMCLK	0x41000402
+
+struct tag_memclk {
+	__u32 fmemclk;
+};
+
+struct tag {
+	struct tag_header hdr;
+	union {
+		struct tag_core		core;
+		struct tag_mem32	mem;
+		struct tag_videotext	videotext;
+		struct tag_ramdisk	ramdisk;
+		struct tag_initrd	initrd;
+		struct tag_serialnr	serialnr;
+		struct tag_revision	revision;
+		struct tag_videolfb	videolfb;
+		struct tag_cmdline	cmdline;
+
+		/*
+		 * Acorn specific
+		 */
+		struct tag_acorn	acorn;
+
+		/*
+		 * DC21285 specific
+		 */
+		struct tag_memclk	memclk;
+	} u;
+};
+
+struct tagtable {
+	__u32 tag;
+	int (*parse)(const struct tag *);
+};
+
+#define tag_member_present(tag,member)				\
+	((unsigned long)(&((struct tag *)0L)->member + 1)	\
+		<= (tag)->hdr.size * 4)
+
+#define tag_next(t)	((struct tag *)((__u32 *)(t) + (t)->hdr.size))
+#define tag_size(type)	((sizeof(struct tag_header) + sizeof(struct type)) >> 2)
+
+#define for_each_tag(t,base)		\
+	for (t = base; t->hdr.size; t = tag_next(t))
+
+#ifdef __KERNEL__
+
+#define __tag __used __attribute__((__section__(".taglist.init")))
+#define __tagtable(tag, fn) \
+static struct tagtable __tagtable_##fn __tag = { tag, fn }
+
+/*
+ * Memory map description
+ */
+#ifdef CONFIG_ARCH_LH7A40X
+# define NR_BANKS 16
+#else
+# define NR_BANKS 8
+#endif
+
+struct membank {
+	unsigned long start;
+	unsigned long size;
+	int           node;
+};
+
+struct meminfo {
+	int nr_banks;
+	struct membank bank[NR_BANKS];
+};
+
+/*
+ * Early command line parameters.
+ */
+struct early_params {
+	const char *arg;
+	void (*fn)(char **p);
+};
+
+#define __early_param(name,fn)					\
+static struct early_params __early_##fn __used			\
+__attribute__((__section__(".early_param.init"))) = { name, fn }
+
+#endif  /*  __KERNEL__  */
+
+#endif
diff --git a/arch/arm/include/asm/shmbuf.h b/arch/arm/include/asm/shmbuf.h
new file mode 100644
index 0000000..2e5c67b
--- /dev/null
+++ b/arch/arm/include/asm/shmbuf.h
@@ -0,0 +1,42 @@
+#ifndef _ASMARM_SHMBUF_H
+#define _ASMARM_SHMBUF_H
+
+/* 
+ * The shmid64_ds structure for arm architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct shmid64_ds {
+	struct ipc64_perm	shm_perm;	/* operation perms */
+	size_t			shm_segsz;	/* size of segment (bytes) */
+	__kernel_time_t		shm_atime;	/* last attach time */
+	unsigned long		__unused1;
+	__kernel_time_t		shm_dtime;	/* last detach time */
+	unsigned long		__unused2;
+	__kernel_time_t		shm_ctime;	/* last change time */
+	unsigned long		__unused3;
+	__kernel_pid_t		shm_cpid;	/* pid of creator */
+	__kernel_pid_t		shm_lpid;	/* pid of last operator */
+	unsigned long		shm_nattch;	/* no. of current attaches */
+	unsigned long		__unused4;
+	unsigned long		__unused5;
+};
+
+struct shminfo64 {
+	unsigned long	shmmax;
+	unsigned long	shmmin;
+	unsigned long	shmmni;
+	unsigned long	shmseg;
+	unsigned long	shmall;
+	unsigned long	__unused1;
+	unsigned long	__unused2;
+	unsigned long	__unused3;
+	unsigned long	__unused4;
+};
+
+#endif /* _ASMARM_SHMBUF_H */
diff --git a/arch/arm/include/asm/shmparam.h b/arch/arm/include/asm/shmparam.h
new file mode 100644
index 0000000..a5223b3
--- /dev/null
+++ b/arch/arm/include/asm/shmparam.h
@@ -0,0 +1,16 @@
+#ifndef _ASMARM_SHMPARAM_H
+#define _ASMARM_SHMPARAM_H
+
+/*
+ * This should be the size of the virtually indexed cache/ways,
+ * or page size, whichever is greater since the cache aliases
+ * every size/ways bytes.
+ */
+#define	SHMLBA	(4 * PAGE_SIZE)		 /* attach addr a multiple of this */
+
+/*
+ * Enforce SHMLBA in shmat
+ */
+#define __ARCH_FORCE_SHMLBA
+
+#endif /* _ASMARM_SHMPARAM_H */
diff --git a/arch/arm/include/asm/sigcontext.h b/arch/arm/include/asm/sigcontext.h
new file mode 100644
index 0000000..fc0b80b
--- /dev/null
+++ b/arch/arm/include/asm/sigcontext.h
@@ -0,0 +1,34 @@
+#ifndef _ASMARM_SIGCONTEXT_H
+#define _ASMARM_SIGCONTEXT_H
+
+/*
+ * Signal context structure - contains all info to do with the state
+ * before the signal handler was invoked.  Note: only add new entries
+ * to the end of the structure.
+ */
+struct sigcontext {
+	unsigned long trap_no;
+	unsigned long error_code;
+	unsigned long oldmask;
+	unsigned long arm_r0;
+	unsigned long arm_r1;
+	unsigned long arm_r2;
+	unsigned long arm_r3;
+	unsigned long arm_r4;
+	unsigned long arm_r5;
+	unsigned long arm_r6;
+	unsigned long arm_r7;
+	unsigned long arm_r8;
+	unsigned long arm_r9;
+	unsigned long arm_r10;
+	unsigned long arm_fp;
+	unsigned long arm_ip;
+	unsigned long arm_sp;
+	unsigned long arm_lr;
+	unsigned long arm_pc;
+	unsigned long arm_cpsr;
+	unsigned long fault_address;
+};
+
+
+#endif
diff --git a/arch/arm/include/asm/siginfo.h b/arch/arm/include/asm/siginfo.h
new file mode 100644
index 0000000..5e21852
--- /dev/null
+++ b/arch/arm/include/asm/siginfo.h
@@ -0,0 +1,6 @@
+#ifndef _ASMARM_SIGINFO_H
+#define _ASMARM_SIGINFO_H
+
+#include <asm-generic/siginfo.h>
+
+#endif
diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h
new file mode 100644
index 0000000..d0fb487
--- /dev/null
+++ b/arch/arm/include/asm/signal.h
@@ -0,0 +1,164 @@
+#ifndef _ASMARM_SIGNAL_H
+#define _ASMARM_SIGNAL_H
+
+#include <linux/types.h>
+
+/* Avoid too many header ordering problems.  */
+struct siginfo;
+
+#ifdef __KERNEL__
+/* Most things should be clean enough to redefine this at will, if care
+   is taken to make libc match.  */
+
+#define _NSIG		64
+#define _NSIG_BPW	32
+#define _NSIG_WORDS	(_NSIG / _NSIG_BPW)
+
+typedef unsigned long old_sigset_t;		/* at least 32 bits */
+
+typedef struct {
+	unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#else
+/* Here we must cater to libcs that poke about in kernel headers.  */
+
+#define NSIG		32
+typedef unsigned long sigset_t;
+
+#endif /* __KERNEL__ */
+
+#define SIGHUP		 1
+#define SIGINT		 2
+#define SIGQUIT		 3
+#define SIGILL		 4
+#define SIGTRAP		 5
+#define SIGABRT		 6
+#define SIGIOT		 6
+#define SIGBUS		 7
+#define SIGFPE		 8
+#define SIGKILL		 9
+#define SIGUSR1		10
+#define SIGSEGV		11
+#define SIGUSR2		12
+#define SIGPIPE		13
+#define SIGALRM		14
+#define SIGTERM		15
+#define SIGSTKFLT	16
+#define SIGCHLD		17
+#define SIGCONT		18
+#define SIGSTOP		19
+#define SIGTSTP		20
+#define SIGTTIN		21
+#define SIGTTOU		22
+#define SIGURG		23
+#define SIGXCPU		24
+#define SIGXFSZ		25
+#define SIGVTALRM	26
+#define SIGPROF		27
+#define SIGWINCH	28
+#define SIGIO		29
+#define SIGPOLL		SIGIO
+/*
+#define SIGLOST		29
+*/
+#define SIGPWR		30
+#define SIGSYS		31
+#define	SIGUNUSED	31
+
+/* These should not be considered constants from userland.  */
+#define SIGRTMIN	32
+#define SIGRTMAX	_NSIG
+
+#define SIGSWI		32
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_NOCLDSTOP		flag to turn off SIGCHLD when children stop.
+ * SA_NOCLDWAIT		flag on SIGCHLD to inhibit zombies.
+ * SA_SIGINFO		deliver the signal with SIGINFO structs
+ * SA_THIRTYTWO		delivers the signal in 32-bit mode, even if the task 
+ *			is running in 26-bit.
+ * SA_ONSTACK		allows alternate signal stacks (see sigaltstack(2)).
+ * SA_RESTART		flag to get restarting signals (which were the default long ago)
+ * SA_NODEFER		prevents the current signal from being masked in the handler.
+ * SA_RESETHAND		clears the handler when the signal is delivered.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP	0x00000001
+#define SA_NOCLDWAIT	0x00000002
+#define SA_SIGINFO	0x00000004
+#define SA_THIRTYTWO	0x02000000
+#define SA_RESTORER	0x04000000
+#define SA_ONSTACK	0x08000000
+#define SA_RESTART	0x10000000
+#define SA_NODEFER	0x40000000
+#define SA_RESETHAND	0x80000000
+
+#define SA_NOMASK	SA_NODEFER
+#define SA_ONESHOT	SA_RESETHAND
+
+
+/* 
+ * sigaltstack controls
+ */
+#define SS_ONSTACK	1
+#define SS_DISABLE	2
+
+#define MINSIGSTKSZ	2048
+#define SIGSTKSZ	8192
+
+#include <asm-generic/signal.h>
+
+#ifdef __KERNEL__
+struct old_sigaction {
+	__sighandler_t sa_handler;
+	old_sigset_t sa_mask;
+	unsigned long sa_flags;
+	__sigrestore_t sa_restorer;
+};
+
+struct sigaction {
+	__sighandler_t sa_handler;
+	unsigned long sa_flags;
+	__sigrestore_t sa_restorer;
+	sigset_t sa_mask;		/* mask last for extensibility */
+};
+
+struct k_sigaction {
+	struct sigaction sa;
+};
+
+#else
+/* Here we must cater to libcs that poke about in kernel headers.  */
+
+struct sigaction {
+	union {
+	  __sighandler_t _sa_handler;
+	  void (*_sa_sigaction)(int, struct siginfo *, void *);
+	} _u;
+	sigset_t sa_mask;
+	unsigned long sa_flags;
+	void (*sa_restorer)(void);
+};
+
+#define sa_handler	_u._sa_handler
+#define sa_sigaction	_u._sa_sigaction
+
+#endif /* __KERNEL__ */
+
+typedef struct sigaltstack {
+	void __user *ss_sp;
+	int ss_flags;
+	size_t ss_size;
+} stack_t;
+
+#ifdef __KERNEL__
+#include <asm/sigcontext.h>
+#define ptrace_signal_deliver(regs, cookie) do { } while (0)
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/sizes.h b/arch/arm/include/asm/sizes.h
new file mode 100644
index 0000000..503843d
--- /dev/null
+++ b/arch/arm/include/asm/sizes.h
@@ -0,0 +1,56 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/* DO NOT EDIT!! - this file automatically generated
+ *                 from .s file by awk -f s2h.awk
+ */
+/*  Size definitions
+ *  Copyright (C) ARM Limited 1998. All rights reserved.
+ */
+
+#ifndef __sizes_h
+#define __sizes_h                       1
+
+/* handy sizes */
+#define SZ_16				0x00000010
+#define SZ_256				0x00000100
+#define SZ_512				0x00000200
+
+#define SZ_1K                           0x00000400
+#define SZ_4K                           0x00001000
+#define SZ_8K                           0x00002000
+#define SZ_16K                          0x00004000
+#define SZ_64K                          0x00010000
+#define SZ_128K                         0x00020000
+#define SZ_256K                         0x00040000
+#define SZ_512K                         0x00080000
+
+#define SZ_1M                           0x00100000
+#define SZ_2M                           0x00200000
+#define SZ_4M                           0x00400000
+#define SZ_8M                           0x00800000
+#define SZ_16M                          0x01000000
+#define SZ_32M                          0x02000000
+#define SZ_64M                          0x04000000
+#define SZ_128M                         0x08000000
+#define SZ_256M                         0x10000000
+#define SZ_512M                         0x20000000
+
+#define SZ_1G                           0x40000000
+#define SZ_2G                           0x80000000
+
+#endif
+
+/*         END */
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
new file mode 100644
index 0000000..cc12a52
--- /dev/null
+++ b/arch/arm/include/asm/smp.h
@@ -0,0 +1,147 @@
+/*
+ *  arch/arm/include/asm/smp.h
+ *
+ *  Copyright (C) 2004-2005 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARM_SMP_H
+#define __ASM_ARM_SMP_H
+
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/thread_info.h>
+
+#include <asm/arch/smp.h>
+
+#ifndef CONFIG_SMP
+# error "<asm/smp.h> included in non-SMP build"
+#endif
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+/*
+ * at the moment, there's not a big penalty for changing CPUs
+ * (the >big< penalty is running SMP in the first place)
+ */
+#define PROC_CHANGE_PENALTY		15
+
+struct seq_file;
+
+/*
+ * generate IPI list text
+ */
+extern void show_ipi_list(struct seq_file *p);
+
+/*
+ * Called from assembly code, this handles an IPI.
+ */
+asmlinkage void do_IPI(struct pt_regs *regs);
+
+/*
+ * Setup the SMP cpu_possible_map
+ */
+extern void smp_init_cpus(void);
+
+/*
+ * Move global data into per-processor storage.
+ */
+extern void smp_store_cpu_info(unsigned int cpuid);
+
+/*
+ * Raise an IPI cross call on CPUs in callmap.
+ */
+extern void smp_cross_call(cpumask_t callmap);
+
+/*
+ * Broadcast a timer interrupt to the other CPUs.
+ */
+extern void smp_send_timer(void);
+
+/*
+ * Broadcast a clock event to other CPUs.
+ */
+extern void smp_timer_broadcast(cpumask_t mask);
+
+/*
+ * Boot a secondary CPU, and assign it the specified idle task.
+ * This also gives us the initial stack to use for this CPU.
+ */
+extern int boot_secondary(unsigned int cpu, struct task_struct *);
+
+/*
+ * Called from platform specific assembly code, this is the
+ * secondary CPU entry point.
+ */
+asmlinkage void secondary_start_kernel(void);
+
+/*
+ * Perform platform specific initialisation of the specified CPU.
+ */
+extern void platform_secondary_init(unsigned int cpu);
+
+/*
+ * Initial data for bringing up a secondary CPU.
+ */
+struct secondary_data {
+	unsigned long pgdir;
+	void *stack;
+};
+extern struct secondary_data secondary_data;
+
+extern int __cpu_disable(void);
+extern int mach_cpu_disable(unsigned int cpu);
+
+extern void __cpu_die(unsigned int cpu);
+extern void cpu_die(void);
+
+extern void platform_cpu_die(unsigned int cpu);
+extern int platform_cpu_kill(unsigned int cpu);
+extern void platform_cpu_enable(unsigned int cpu);
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi(cpumask_t mask);
+
+/*
+ * Local timer interrupt handling function (can be IPI'ed).
+ */
+extern void local_timer_interrupt(void);
+
+#ifdef CONFIG_LOCAL_TIMERS
+
+/*
+ * Stop a local timer interrupt.
+ */
+extern void local_timer_stop(unsigned int cpu);
+
+/*
+ * Platform provides this to acknowledge a local timer IRQ
+ */
+extern int local_timer_ack(void);
+
+#else
+
+static inline void local_timer_stop(unsigned int cpu)
+{
+}
+
+#endif
+
+/*
+ * Setup a local timer interrupt for a CPU.
+ */
+extern void local_timer_setup(unsigned int cpu);
+
+/*
+ * show local interrupt info
+ */
+extern void show_local_irqs(struct seq_file *);
+
+/*
+ * Called from assembly, this is the local timer IRQ handler
+ */
+asmlinkage void do_local_timer(struct pt_regs *);
+
+#endif /* ifndef __ASM_ARM_SMP_H */
diff --git a/arch/arm/include/asm/socket.h b/arch/arm/include/asm/socket.h
new file mode 100644
index 0000000..6817be9
--- /dev/null
+++ b/arch/arm/include/asm/socket.h
@@ -0,0 +1,57 @@
+#ifndef _ASMARM_SOCKET_H
+#define _ASMARM_SOCKET_H
+
+#include <asm/sockios.h>
+
+/* For setsockopt(2) */
+#define SOL_SOCKET	1
+
+#define SO_DEBUG	1
+#define SO_REUSEADDR	2
+#define SO_TYPE		3
+#define SO_ERROR	4
+#define SO_DONTROUTE	5
+#define SO_BROADCAST	6
+#define SO_SNDBUF	7
+#define SO_RCVBUF	8
+#define SO_SNDBUFFORCE	32
+#define SO_RCVBUFFORCE	33
+#define SO_KEEPALIVE	9
+#define SO_OOBINLINE	10
+#define SO_NO_CHECK	11
+#define SO_PRIORITY	12
+#define SO_LINGER	13
+#define SO_BSDCOMPAT	14
+/* To add :#define SO_REUSEPORT 15 */
+#define SO_PASSCRED	16
+#define SO_PEERCRED	17
+#define SO_RCVLOWAT	18
+#define SO_SNDLOWAT	19
+#define SO_RCVTIMEO	20
+#define SO_SNDTIMEO	21
+
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+#define SO_SECURITY_AUTHENTICATION		22
+#define SO_SECURITY_ENCRYPTION_TRANSPORT	23
+#define SO_SECURITY_ENCRYPTION_NETWORK		24
+
+#define SO_BINDTODEVICE 25
+
+/* Socket filtering */
+#define SO_ATTACH_FILTER        26
+#define SO_DETACH_FILTER        27
+
+#define SO_PEERNAME             28
+#define SO_TIMESTAMP		29
+#define SCM_TIMESTAMP		SO_TIMESTAMP
+
+#define SO_ACCEPTCONN		30
+
+#define SO_PEERSEC		31
+#define SO_PASSSEC		34
+#define SO_TIMESTAMPNS		35
+#define SCM_TIMESTAMPNS		SO_TIMESTAMPNS
+
+#define SO_MARK			36
+
+#endif /* _ASM_SOCKET_H */
diff --git a/arch/arm/include/asm/sockios.h b/arch/arm/include/asm/sockios.h
new file mode 100644
index 0000000..a2588a2
--- /dev/null
+++ b/arch/arm/include/asm/sockios.h
@@ -0,0 +1,13 @@
+#ifndef __ARCH_ARM_SOCKIOS_H
+#define __ARCH_ARM_SOCKIOS_H
+
+/* Socket-level I/O control calls. */
+#define FIOSETOWN 	0x8901
+#define SIOCSPGRP	0x8902
+#define FIOGETOWN	0x8903
+#define SIOCGPGRP	0x8904
+#define SIOCATMARK	0x8905
+#define SIOCGSTAMP	0x8906		/* Get stamp (timeval) */
+#define SIOCGSTAMPNS	0x8907		/* Get stamp (timespec) */
+
+#endif
diff --git a/arch/arm/include/asm/sparsemem.h b/arch/arm/include/asm/sparsemem.h
new file mode 100644
index 0000000..2771581
--- /dev/null
+++ b/arch/arm/include/asm/sparsemem.h
@@ -0,0 +1,10 @@
+#ifndef ASMARM_SPARSEMEM_H
+#define ASMARM_SPARSEMEM_H
+
+#include <asm/memory.h>
+
+#define MAX_PHYSADDR_BITS	32
+#define MAX_PHYSMEM_BITS	32
+#define SECTION_SIZE_BITS	NODE_MEM_SIZE_BITS
+
+#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
new file mode 100644
index 0000000..2b41ebb
--- /dev/null
+++ b/arch/arm/include/asm/spinlock.h
@@ -0,0 +1,224 @@
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#if __LINUX_ARM_ARCH__ < 6
+#error SMP not supported on pre-ARMv6 CPUs
+#endif
+
+/*
+ * ARMv6 Spin-locking.
+ *
+ * We exclusively read the old value.  If it is zero, we may have
+ * won the lock, so we try exclusively storing it.  A memory barrier
+ * is required after we get a lock, and before we release it, because
+ * V6 CPUs are assumed to have weakly ordered memory.
+ *
+ * Unlocked value: 0
+ * Locked value: 1
+ */
+
+#define __raw_spin_is_locked(x)		((x)->lock != 0)
+#define __raw_spin_unlock_wait(lock) \
+	do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+"1:	ldrex	%0, [%1]\n"
+"	teq	%0, #0\n"
+#ifdef CONFIG_CPU_32v6K
+"	wfene\n"
+#endif
+"	strexeq	%0, %2, [%1]\n"
+"	teqeq	%0, #0\n"
+"	bne	1b"
+	: "=&r" (tmp)
+	: "r" (&lock->lock), "r" (1)
+	: "cc");
+
+	smp_mb();
+}
+
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+"	ldrex	%0, [%1]\n"
+"	teq	%0, #0\n"
+"	strexeq	%0, %2, [%1]"
+	: "=&r" (tmp)
+	: "r" (&lock->lock), "r" (1)
+	: "cc");
+
+	if (tmp == 0) {
+		smp_mb();
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+{
+	smp_mb();
+
+	__asm__ __volatile__(
+"	str	%1, [%0]\n"
+#ifdef CONFIG_CPU_32v6K
+"	mcr	p15, 0, %1, c7, c10, 4\n" /* DSB */
+"	sev"
+#endif
+	:
+	: "r" (&lock->lock), "r" (0)
+	: "cc");
+}
+
+/*
+ * RWLOCKS
+ *
+ *
+ * Write locks are easy - we just set bit 31.  When unlocking, we can
+ * just write zero since the lock is exclusively held.
+ */
+
+static inline void __raw_write_lock(raw_rwlock_t *rw)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+"1:	ldrex	%0, [%1]\n"
+"	teq	%0, #0\n"
+#ifdef CONFIG_CPU_32v6K
+"	wfene\n"
+#endif
+"	strexeq	%0, %2, [%1]\n"
+"	teq	%0, #0\n"
+"	bne	1b"
+	: "=&r" (tmp)
+	: "r" (&rw->lock), "r" (0x80000000)
+	: "cc");
+
+	smp_mb();
+}
+
+static inline int __raw_write_trylock(raw_rwlock_t *rw)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+"1:	ldrex	%0, [%1]\n"
+"	teq	%0, #0\n"
+"	strexeq	%0, %2, [%1]"
+	: "=&r" (tmp)
+	: "r" (&rw->lock), "r" (0x80000000)
+	: "cc");
+
+	if (tmp == 0) {
+		smp_mb();
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
+{
+	smp_mb();
+
+	__asm__ __volatile__(
+	"str	%1, [%0]\n"
+#ifdef CONFIG_CPU_32v6K
+"	mcr	p15, 0, %1, c7, c10, 4\n" /* DSB */
+"	sev\n"
+#endif
+	:
+	: "r" (&rw->lock), "r" (0)
+	: "cc");
+}
+
+/* write_can_lock - would write_trylock() succeed? */
+#define __raw_write_can_lock(x)		((x)->lock == 0)
+
+/*
+ * Read locks are a bit more hairy:
+ *  - Exclusively load the lock value.
+ *  - Increment it.
+ *  - Store new lock value if positive, and we still own this location.
+ *    If the value is negative, we've already failed.
+ *  - If we failed to store the value, we want a negative result.
+ *  - If we failed, try again.
+ * Unlocking is similarly hairy.  We may have multiple read locks
+ * currently active.  However, we know we won't have any write
+ * locks.
+ */
+static inline void __raw_read_lock(raw_rwlock_t *rw)
+{
+	unsigned long tmp, tmp2;
+
+	__asm__ __volatile__(
+"1:	ldrex	%0, [%2]\n"
+"	adds	%0, %0, #1\n"
+"	strexpl	%1, %0, [%2]\n"
+#ifdef CONFIG_CPU_32v6K
+"	wfemi\n"
+#endif
+"	rsbpls	%0, %1, #0\n"
+"	bmi	1b"
+	: "=&r" (tmp), "=&r" (tmp2)
+	: "r" (&rw->lock)
+	: "cc");
+
+	smp_mb();
+}
+
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
+{
+	unsigned long tmp, tmp2;
+
+	smp_mb();
+
+	__asm__ __volatile__(
+"1:	ldrex	%0, [%2]\n"
+"	sub	%0, %0, #1\n"
+"	strex	%1, %0, [%2]\n"
+"	teq	%1, #0\n"
+"	bne	1b"
+#ifdef CONFIG_CPU_32v6K
+"\n	cmp	%0, #0\n"
+"	mcreq   p15, 0, %0, c7, c10, 4\n"
+"	seveq"
+#endif
+	: "=&r" (tmp), "=&r" (tmp2)
+	: "r" (&rw->lock)
+	: "cc");
+}
+
+static inline int __raw_read_trylock(raw_rwlock_t *rw)
+{
+	unsigned long tmp, tmp2 = 1;
+
+	__asm__ __volatile__(
+"1:	ldrex	%0, [%2]\n"
+"	adds	%0, %0, #1\n"
+"	strexpl	%1, %0, [%2]\n"
+	: "=&r" (tmp), "+r" (tmp2)
+	: "r" (&rw->lock)
+	: "cc");
+
+	smp_mb();
+	return tmp2 == 0;
+}
+
+/* read_can_lock - would read_trylock() succeed? */
+#define __raw_read_can_lock(x)		((x)->lock < 0x80000000)
+
+#define _raw_spin_relax(lock)	cpu_relax()
+#define _raw_read_relax(lock)	cpu_relax()
+#define _raw_write_relax(lock)	cpu_relax()
+
+#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
new file mode 100644
index 0000000..43e83f6
--- /dev/null
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -0,0 +1,20 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+	volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED	{ 0 }
+
+typedef struct {
+	volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED		{ 0 }
+
+#endif
diff --git a/arch/arm/include/asm/stat.h b/arch/arm/include/asm/stat.h
new file mode 100644
index 0000000..42c0c13
--- /dev/null
+++ b/arch/arm/include/asm/stat.h
@@ -0,0 +1,87 @@
+#ifndef _ASMARM_STAT_H
+#define _ASMARM_STAT_H
+
+struct __old_kernel_stat {
+	unsigned short st_dev;
+	unsigned short st_ino;
+	unsigned short st_mode;
+	unsigned short st_nlink;
+	unsigned short st_uid;
+	unsigned short st_gid;
+	unsigned short st_rdev;
+	unsigned long  st_size;
+	unsigned long  st_atime;
+	unsigned long  st_mtime;
+	unsigned long  st_ctime;
+};
+
+#define STAT_HAVE_NSEC 
+
+struct stat {
+#if defined(__ARMEB__)
+	unsigned short st_dev;
+	unsigned short __pad1;
+#else
+	unsigned long  st_dev;
+#endif
+	unsigned long  st_ino;
+	unsigned short st_mode;
+	unsigned short st_nlink;
+	unsigned short st_uid;
+	unsigned short st_gid;
+#if defined(__ARMEB__)
+	unsigned short st_rdev;
+	unsigned short __pad2;
+#else
+	unsigned long  st_rdev;
+#endif
+	unsigned long  st_size;
+	unsigned long  st_blksize;
+	unsigned long  st_blocks;
+	unsigned long  st_atime;
+	unsigned long  st_atime_nsec;
+	unsigned long  st_mtime;
+	unsigned long  st_mtime_nsec;
+	unsigned long  st_ctime;
+	unsigned long  st_ctime_nsec;
+	unsigned long  __unused4;
+	unsigned long  __unused5;
+};
+
+/* This matches struct stat64 in glibc2.1, hence the absolutely
+ * insane amounts of padding around dev_t's.
+ * Note: The kernel zero's the padded region because glibc might read them
+ * in the hope that the kernel has stretched to using larger sizes.
+ */
+struct stat64 {
+	unsigned long long	st_dev;
+	unsigned char   __pad0[4];
+
+#define STAT64_HAS_BROKEN_ST_INO	1
+	unsigned long	__st_ino;
+	unsigned int	st_mode;
+	unsigned int	st_nlink;
+
+	unsigned long	st_uid;
+	unsigned long	st_gid;
+
+	unsigned long long	st_rdev;
+	unsigned char   __pad3[4];
+
+	long long	st_size;
+	unsigned long	st_blksize;
+	unsigned long long st_blocks;	/* Number 512-byte blocks allocated. */
+
+	unsigned long	st_atime;
+	unsigned long	st_atime_nsec;
+
+	unsigned long	st_mtime;
+	unsigned long	st_mtime_nsec;
+
+	unsigned long	st_ctime;
+	unsigned long	st_ctime_nsec;
+
+	unsigned long long	st_ino;
+};
+
+#endif
diff --git a/arch/arm/include/asm/statfs.h b/arch/arm/include/asm/statfs.h
new file mode 100644
index 0000000..a02e6a8
--- /dev/null
+++ b/arch/arm/include/asm/statfs.h
@@ -0,0 +1,42 @@
+#ifndef _ASMARM_STATFS_H
+#define _ASMARM_STATFS_H
+
+#ifndef __KERNEL_STRICT_NAMES
+# include <linux/types.h>
+typedef __kernel_fsid_t	fsid_t;
+#endif
+
+struct statfs {
+	__u32 f_type;
+	__u32 f_bsize;
+	__u32 f_blocks;
+	__u32 f_bfree;
+	__u32 f_bavail;
+	__u32 f_files;
+	__u32 f_ffree;
+	__kernel_fsid_t f_fsid;
+	__u32 f_namelen;
+	__u32 f_frsize;
+	__u32 f_spare[5];
+};
+
+/*
+ * With EABI there is 4 bytes of padding added to this structure.
+ * Let's pack it so the padding goes away to simplify dual ABI support.
+ * Note that user space does NOT have to pack this structure.
+ */
+struct statfs64 {
+	__u32 f_type;
+	__u32 f_bsize;
+	__u64 f_blocks;
+	__u64 f_bfree;
+	__u64 f_bavail;
+	__u64 f_files;
+	__u64 f_ffree;
+	__kernel_fsid_t f_fsid;
+	__u32 f_namelen;
+	__u32 f_frsize;
+	__u32 f_spare[5];
+} __attribute__ ((packed,aligned(4)));
+
+#endif
diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
new file mode 100644
index 0000000..e50c4a3
--- /dev/null
+++ b/arch/arm/include/asm/string.h
@@ -0,0 +1,50 @@
+#ifndef __ASM_ARM_STRING_H
+#define __ASM_ARM_STRING_H
+
+/*
+ * We don't do inline string functions, since the
+ * optimised inline asm versions are not small.
+ */
+
+#define __HAVE_ARCH_STRRCHR
+extern char * strrchr(const char * s, int c);
+
+#define __HAVE_ARCH_STRCHR
+extern char * strchr(const char * s, int c);
+
+#define __HAVE_ARCH_MEMCPY
+extern void * memcpy(void *, const void *, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMMOVE
+extern void * memmove(void *, const void *, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMCHR
+extern void * memchr(const void *, int, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMZERO
+#define __HAVE_ARCH_MEMSET
+extern void * memset(void *, int, __kernel_size_t);
+
+extern void __memzero(void *ptr, __kernel_size_t n);
+
+#define memset(p,v,n)							\
+	({								\
+	 	void *__p = (p); size_t __n = n;			\
+		if ((__n) != 0) {					\
+			if (__builtin_constant_p((v)) && (v) == 0)	\
+				__memzero((__p),(__n));			\
+			else						\
+				memset((__p),(v),(__n));		\
+		}							\
+		(__p);							\
+	})
+
+#define memzero(p,n) 							\
+	({ 								\
+	 	void *__p = (p); size_t __n = n;			\
+	 	if ((__n) != 0) 					\
+	 		__memzero((__p),(__n)); 			\
+	 	(__p); 							\
+	 })
+
+#endif
diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h
new file mode 100644
index 0000000..cf0d0bd
--- /dev/null
+++ b/arch/arm/include/asm/suspend.h
@@ -0,0 +1,4 @@
+#ifndef _ASMARM_SUSPEND_H
+#define _ASMARM_SUSPEND_H
+
+#endif
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
new file mode 100644
index 0000000..514af79
--- /dev/null
+++ b/arch/arm/include/asm/system.h
@@ -0,0 +1,388 @@
+#ifndef __ASM_ARM_SYSTEM_H
+#define __ASM_ARM_SYSTEM_H
+
+#ifdef __KERNEL__
+
+#include <asm/memory.h>
+
+#define CPU_ARCH_UNKNOWN	0
+#define CPU_ARCH_ARMv3		1
+#define CPU_ARCH_ARMv4		2
+#define CPU_ARCH_ARMv4T		3
+#define CPU_ARCH_ARMv5		4
+#define CPU_ARCH_ARMv5T		5
+#define CPU_ARCH_ARMv5TE	6
+#define CPU_ARCH_ARMv5TEJ	7
+#define CPU_ARCH_ARMv6		8
+#define CPU_ARCH_ARMv7		9
+
+/*
+ * CR1 bits (CP#15 CR1)
+ */
+#define CR_M	(1 << 0)	/* MMU enable				*/
+#define CR_A	(1 << 1)	/* Alignment abort enable		*/
+#define CR_C	(1 << 2)	/* Dcache enable			*/
+#define CR_W	(1 << 3)	/* Write buffer enable			*/
+#define CR_P	(1 << 4)	/* 32-bit exception handler		*/
+#define CR_D	(1 << 5)	/* 32-bit data address range		*/
+#define CR_L	(1 << 6)	/* Implementation defined		*/
+#define CR_B	(1 << 7)	/* Big endian				*/
+#define CR_S	(1 << 8)	/* System MMU protection		*/
+#define CR_R	(1 << 9)	/* ROM MMU protection			*/
+#define CR_F	(1 << 10)	/* Implementation defined		*/
+#define CR_Z	(1 << 11)	/* Implementation defined		*/
+#define CR_I	(1 << 12)	/* Icache enable			*/
+#define CR_V	(1 << 13)	/* Vectors relocated to 0xffff0000	*/
+#define CR_RR	(1 << 14)	/* Round Robin cache replacement	*/
+#define CR_L4	(1 << 15)	/* LDR pc can set T bit			*/
+#define CR_DT	(1 << 16)
+#define CR_IT	(1 << 18)
+#define CR_ST	(1 << 19)
+#define CR_FI	(1 << 21)	/* Fast interrupt (lower latency mode)	*/
+#define CR_U	(1 << 22)	/* Unaligned access operation		*/
+#define CR_XP	(1 << 23)	/* Extended page tables			*/
+#define CR_VE	(1 << 24)	/* Vectored interrupts			*/
+
+#define CPUID_ID	0
+#define CPUID_CACHETYPE	1
+#define CPUID_TCM	2
+#define CPUID_TLBTYPE	3
+
+/*
+ * This is used to ensure the compiler did actually allocate the register we
+ * asked it for some inline assembly sequences.  Apparently we can't trust
+ * the compiler from one version to another so a bit of paranoia won't hurt.
+ * This string is meant to be concatenated with the inline asm string and
+ * will cause compilation to stop on mismatch.
+ * (for details, see gcc PR 15089)
+ */
+#define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"
+
+#ifndef __ASSEMBLY__
+
+#include <linux/linkage.h>
+#include <linux/stringify.h>
+#include <linux/irqflags.h>
+
+#ifdef CONFIG_CPU_CP15
+#define read_cpuid(reg)							\
+	({								\
+		unsigned int __val;					\
+		asm("mrc	p15, 0, %0, c0, c0, " __stringify(reg)	\
+		    : "=r" (__val)					\
+		    :							\
+		    : "cc");						\
+		__val;							\
+	})
+#else
+extern unsigned int processor_id;
+#define read_cpuid(reg) (processor_id)
+#endif
+
+/*
+ * The CPU ID never changes at run time, so we might as well tell the
+ * compiler that it's constant.  Use this function to read the CPU ID
+ * rather than directly reading processor_id or read_cpuid() directly.
+ */
+static inline unsigned int read_cpuid_id(void) __attribute_const__;
+
+static inline unsigned int read_cpuid_id(void)
+{
+	return read_cpuid(CPUID_ID);
+}
+
+#define __exception	__attribute__((section(".exception.text")))
+
+struct thread_info;
+struct task_struct;
+
+/* information about the system we're running on */
+extern unsigned int system_rev;
+extern unsigned int system_serial_low;
+extern unsigned int system_serial_high;
+extern unsigned int mem_fclk_21285;
+
+struct pt_regs;
+
+void die(const char *msg, struct pt_regs *regs, int err)
+		__attribute__((noreturn));
+
+struct siginfo;
+void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
+		unsigned long err, unsigned long trap);
+
+void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
+				       struct pt_regs *),
+		     int sig, const char *name);
+
+#define xchg(ptr,x) \
+	((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+extern asmlinkage void __backtrace(void);
+extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
+
+struct mm_struct;
+extern void show_pte(struct mm_struct *mm, unsigned long addr);
+extern void __show_regs(struct pt_regs *);
+
+extern int cpu_architecture(void);
+extern void cpu_init(void);
+
+void arm_machine_restart(char mode);
+extern void (*arm_pm_restart)(char str);
+
+/*
+ * Intel's XScale3 core supports some v6 features (supersections, L2)
+ * but advertises itself as v5 as it does not support the v6 ISA.  For
+ * this reason, we need a way to explicitly test for this type of CPU.
+ */
+#ifndef CONFIG_CPU_XSC3
+#define cpu_is_xsc3()	0
+#else
+static inline int cpu_is_xsc3(void)
+{
+	extern unsigned int processor_id;
+
+	if ((processor_id & 0xffffe000) == 0x69056000)
+		return 1;
+
+	return 0;
+}
+#endif
+
+#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
+#define	cpu_is_xscale()	0
+#else
+#define	cpu_is_xscale()	1
+#endif
+
+#define UDBG_UNDEFINED	(1 << 0)
+#define UDBG_SYSCALL	(1 << 1)
+#define UDBG_BADABORT	(1 << 2)
+#define UDBG_SEGV	(1 << 3)
+#define UDBG_BUS	(1 << 4)
+
+extern unsigned int user_debug;
+
+#if __LINUX_ARM_ARCH__ >= 4
+#define vectors_high()	(cr_alignment & CR_V)
+#else
+#define vectors_high()	(0)
+#endif
+
+#if __LINUX_ARM_ARCH__ >= 7
+#define isb() __asm__ __volatile__ ("isb" : : : "memory")
+#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
+#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
+#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
+#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
+				    : : "r" (0) : "memory")
+#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+				    : : "r" (0) : "memory")
+#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
+				    : : "r" (0) : "memory")
+#else
+#define isb() __asm__ __volatile__ ("" : : : "memory")
+#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+				    : : "r" (0) : "memory")
+#define dmb() __asm__ __volatile__ ("" : : : "memory")
+#endif
+
+#ifndef CONFIG_SMP
+#define mb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
+#define rmb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
+#define wmb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
+#define smp_mb()	barrier()
+#define smp_rmb()	barrier()
+#define smp_wmb()	barrier()
+#else
+#define mb()		dmb()
+#define rmb()		dmb()
+#define wmb()		dmb()
+#define smp_mb()	dmb()
+#define smp_rmb()	dmb()
+#define smp_wmb()	dmb()
+#endif
+#define read_barrier_depends()		do { } while(0)
+#define smp_read_barrier_depends()	do { } while(0)
+
+#define set_mb(var, value)	do { var = value; smp_mb(); } while (0)
+#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
+
+extern unsigned long cr_no_alignment;	/* defined in entry-armv.S */
+extern unsigned long cr_alignment;	/* defined in entry-armv.S */
+
+static inline unsigned int get_cr(void)
+{
+	unsigned int val;
+	asm("mrc p15, 0, %0, c1, c0, 0	@ get CR" : "=r" (val) : : "cc");
+	return val;
+}
+
+static inline void set_cr(unsigned int val)
+{
+	asm volatile("mcr p15, 0, %0, c1, c0, 0	@ set CR"
+	  : : "r" (val) : "cc");
+	isb();
+}
+
+#ifndef CONFIG_SMP
+extern void adjust_cr(unsigned long mask, unsigned long set);
+#endif
+
+#define CPACC_FULL(n)		(3 << (n * 2))
+#define CPACC_SVC(n)		(1 << (n * 2))
+#define CPACC_DISABLE(n)	(0 << (n * 2))
+
+static inline unsigned int get_copro_access(void)
+{
+	unsigned int val;
+	asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
+	  : "=r" (val) : : "cc");
+	return val;
+}
+
+static inline void set_copro_access(unsigned int val)
+{
+	asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
+	  : : "r" (val) : "cc");
+	isb();
+}
+
+/*
+ * switch_mm() may do a full cache flush over the context switch,
+ * so enable interrupts over the context switch to avoid high
+ * latency.
+ */
+#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
+
+/*
+ * switch_to(prev, next) should switch from task `prev' to `next'
+ * `prev' will never be the same as `next'.  schedule() itself
+ * contains the memory barrier to tell GCC not to cache `current'.
+ */
+extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
+
+#define switch_to(prev,next,last)					\
+do {									\
+	last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));	\
+} while (0)
+
+#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
+/*
+ * On the StrongARM, "swp" is terminally broken since it bypasses the
+ * cache totally.  This means that the cache becomes inconsistent, and,
+ * since we use normal loads/stores as well, this is really bad.
+ * Typically, this causes oopsen in filp_close, but could have other,
+ * more disasterous effects.  There are two work-arounds:
+ *  1. Disable interrupts and emulate the atomic swap
+ *  2. Clean the cache, perform atomic swap, flush the cache
+ *
+ * We choose (1) since its the "easiest" to achieve here and is not
+ * dependent on the processor type.
+ *
+ * NOTE that this solution won't work on an SMP system, so explcitly
+ * forbid it here.
+ */
+#define swp_is_buggy
+#endif
+
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+{
+	extern void __bad_xchg(volatile void *, int);
+	unsigned long ret;
+#ifdef swp_is_buggy
+	unsigned long flags;
+#endif
+#if __LINUX_ARM_ARCH__ >= 6
+	unsigned int tmp;
+#endif
+
+	switch (size) {
+#if __LINUX_ARM_ARCH__ >= 6
+	case 1:
+		asm volatile("@	__xchg1\n"
+		"1:	ldrexb	%0, [%3]\n"
+		"	strexb	%1, %2, [%3]\n"
+		"	teq	%1, #0\n"
+		"	bne	1b"
+			: "=&r" (ret), "=&r" (tmp)
+			: "r" (x), "r" (ptr)
+			: "memory", "cc");
+		break;
+	case 4:
+		asm volatile("@	__xchg4\n"
+		"1:	ldrex	%0, [%3]\n"
+		"	strex	%1, %2, [%3]\n"
+		"	teq	%1, #0\n"
+		"	bne	1b"
+			: "=&r" (ret), "=&r" (tmp)
+			: "r" (x), "r" (ptr)
+			: "memory", "cc");
+		break;
+#elif defined(swp_is_buggy)
+#ifdef CONFIG_SMP
+#error SMP is not supported on this platform
+#endif
+	case 1:
+		raw_local_irq_save(flags);
+		ret = *(volatile unsigned char *)ptr;
+		*(volatile unsigned char *)ptr = x;
+		raw_local_irq_restore(flags);
+		break;
+
+	case 4:
+		raw_local_irq_save(flags);
+		ret = *(volatile unsigned long *)ptr;
+		*(volatile unsigned long *)ptr = x;
+		raw_local_irq_restore(flags);
+		break;
+#else
+	case 1:
+		asm volatile("@	__xchg1\n"
+		"	swpb	%0, %1, [%2]"
+			: "=&r" (ret)
+			: "r" (x), "r" (ptr)
+			: "memory", "cc");
+		break;
+	case 4:
+		asm volatile("@	__xchg4\n"
+		"	swp	%0, %1, [%2]"
+			: "=&r" (ret)
+			: "r" (x), "r" (ptr)
+			: "memory", "cc");
+		break;
+#endif
+	default:
+		__bad_xchg(ptr, size), ret = 0;
+		break;
+	}
+
+	return ret;
+}
+
+extern void disable_hlt(void);
+extern void enable_hlt(void);
+
+#include <asm-generic/cmpxchg-local.h>
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n)				  	       \
+	((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+			(unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
+#ifndef CONFIG_SMP
+#include <asm-generic/cmpxchg.h>
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#define arch_align_stack(x) (x)
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/arm/include/asm/termbits.h b/arch/arm/include/asm/termbits.h
new file mode 100644
index 0000000..f784d11
--- /dev/null
+++ b/arch/arm/include/asm/termbits.h
@@ -0,0 +1,197 @@
+#ifndef __ASM_ARM_TERMBITS_H
+#define __ASM_ARM_TERMBITS_H
+
+typedef unsigned char	cc_t;
+typedef unsigned int	speed_t;
+typedef unsigned int	tcflag_t;
+
+#define NCCS 19
+struct termios {
+	tcflag_t c_iflag;		/* input mode flags */
+	tcflag_t c_oflag;		/* output mode flags */
+	tcflag_t c_cflag;		/* control mode flags */
+	tcflag_t c_lflag;		/* local mode flags */
+	cc_t c_line;			/* line discipline */
+	cc_t c_cc[NCCS];		/* control characters */
+};
+
+struct termios2 {
+	tcflag_t c_iflag;		/* input mode flags */
+	tcflag_t c_oflag;		/* output mode flags */
+	tcflag_t c_cflag;		/* control mode flags */
+	tcflag_t c_lflag;		/* local mode flags */
+	cc_t c_line;			/* line discipline */
+	cc_t c_cc[NCCS];		/* control characters */
+	speed_t c_ispeed;		/* input speed */
+	speed_t c_ospeed;		/* output speed */
+};
+
+struct ktermios {
+	tcflag_t c_iflag;		/* input mode flags */
+	tcflag_t c_oflag;		/* output mode flags */
+	tcflag_t c_cflag;		/* control mode flags */
+	tcflag_t c_lflag;		/* local mode flags */
+	cc_t c_line;			/* line discipline */
+	cc_t c_cc[NCCS];		/* control characters */
+	speed_t c_ispeed;		/* input speed */
+	speed_t c_ospeed;		/* output speed */
+};
+
+
+/* c_cc characters */
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
+
+/* c_iflag bits */
+#define IGNBRK	0000001
+#define BRKINT	0000002
+#define IGNPAR	0000004
+#define PARMRK	0000010
+#define INPCK	0000020
+#define ISTRIP	0000040
+#define INLCR	0000100
+#define IGNCR	0000200
+#define ICRNL	0000400
+#define IUCLC	0001000
+#define IXON	0002000
+#define IXANY	0004000
+#define IXOFF	0010000
+#define IMAXBEL	0020000
+#define IUTF8	0040000
+
+/* c_oflag bits */
+#define OPOST	0000001
+#define OLCUC	0000002
+#define ONLCR	0000004
+#define OCRNL	0000010
+#define ONOCR	0000020
+#define ONLRET	0000040
+#define OFILL	0000100
+#define OFDEL	0000200
+#define NLDLY	0000400
+#define   NL0	0000000
+#define   NL1	0000400
+#define CRDLY	0003000
+#define   CR0	0000000
+#define   CR1	0001000
+#define   CR2	0002000
+#define   CR3	0003000
+#define TABDLY	0014000
+#define   TAB0	0000000
+#define   TAB1	0004000
+#define   TAB2	0010000
+#define   TAB3	0014000
+#define   XTABS	0014000
+#define BSDLY	0020000
+#define   BS0	0000000
+#define   BS1	0020000
+#define VTDLY	0040000
+#define   VT0	0000000
+#define   VT1	0040000
+#define FFDLY	0100000
+#define   FF0	0000000
+#define   FF1	0100000
+
+/* c_cflag bit meaning */
+#define CBAUD	0010017
+#define  B0	0000000		/* hang up */
+#define  B50	0000001
+#define  B75	0000002
+#define  B110	0000003
+#define  B134	0000004
+#define  B150	0000005
+#define  B200	0000006
+#define  B300	0000007
+#define  B600	0000010
+#define  B1200	0000011
+#define  B1800	0000012
+#define  B2400	0000013
+#define  B4800	0000014
+#define  B9600	0000015
+#define  B19200	0000016
+#define  B38400	0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE	0000060
+#define   CS5	0000000
+#define   CS6	0000020
+#define   CS7	0000040
+#define   CS8	0000060
+#define CSTOPB	0000100
+#define CREAD	0000200
+#define PARENB	0000400
+#define PARODD	0001000
+#define HUPCL	0002000
+#define CLOCAL	0004000
+#define CBAUDEX 0010000
+#define    BOTHER 0010000
+#define    B57600 0010001
+#define   B115200 0010002
+#define   B230400 0010003
+#define   B460800 0010004
+#define   B500000 0010005
+#define   B576000 0010006
+#define   B921600 0010007
+#define  B1000000 0010010
+#define  B1152000 0010011
+#define  B1500000 0010012
+#define  B2000000 0010013
+#define  B2500000 0010014
+#define  B3000000 0010015
+#define  B3500000 0010016
+#define  B4000000 0010017
+#define CIBAUD	  002003600000		/* input baud rate */
+#define CMSPAR    010000000000		/* mark or space (stick) parity */
+#define CRTSCTS	  020000000000		/* flow control */
+
+#define IBSHIFT	   16
+
+/* c_lflag bits */
+#define ISIG	0000001
+#define ICANON	0000002
+#define XCASE	0000004
+#define ECHO	0000010
+#define ECHOE	0000020
+#define ECHOK	0000040
+#define ECHONL	0000100
+#define NOFLSH	0000200
+#define TOSTOP	0000400
+#define ECHOCTL	0001000
+#define ECHOPRT	0002000
+#define ECHOKE	0004000
+#define FLUSHO	0010000
+#define PENDIN	0040000
+#define IEXTEN	0100000
+
+/* tcflow() and TCXONC use these */
+#define	TCOOFF		0
+#define	TCOON		1
+#define	TCIOFF		2
+#define	TCION		3
+
+/* tcflush() and TCFLSH use these */
+#define	TCIFLUSH	0
+#define	TCOFLUSH	1
+#define	TCIOFLUSH	2
+
+/* tcsetattr uses these */
+#define	TCSANOW		0
+#define	TCSADRAIN	1
+#define	TCSAFLUSH	2
+
+#endif	/* __ASM_ARM_TERMBITS_H */
diff --git a/arch/arm/include/asm/termios.h b/arch/arm/include/asm/termios.h
new file mode 100644
index 0000000..293e3f1
--- /dev/null
+++ b/arch/arm/include/asm/termios.h
@@ -0,0 +1,92 @@
+#ifndef __ASM_ARM_TERMIOS_H
+#define __ASM_ARM_TERMIOS_H
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct winsize {
+	unsigned short ws_row;
+	unsigned short ws_col;
+	unsigned short ws_xpixel;
+	unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+	unsigned short c_iflag;		/* input mode flags */
+	unsigned short c_oflag;		/* output mode flags */
+	unsigned short c_cflag;		/* control mode flags */
+	unsigned short c_lflag;		/* local mode flags */
+	unsigned char c_line;		/* line discipline */
+	unsigned char c_cc[NCC];	/* control characters */
+};
+
+#ifdef __KERNEL__
+/*	intr=^C		quit=^|		erase=del	kill=^U
+	eof=^D		vtime=\0	vmin=\1		sxtc=\0
+	start=^Q	stop=^S		susp=^Z		eol=\0
+	reprint=^R	discard=^U	werase=^W	lnext=^V
+	eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+#endif
+
+/* modem lines */
+#define TIOCM_LE	0x001
+#define TIOCM_DTR	0x002
+#define TIOCM_RTS	0x004
+#define TIOCM_ST	0x008
+#define TIOCM_SR	0x010
+#define TIOCM_CTS	0x020
+#define TIOCM_CAR	0x040
+#define TIOCM_RNG	0x080
+#define TIOCM_DSR	0x100
+#define TIOCM_CD	TIOCM_CAR
+#define TIOCM_RI	TIOCM_RNG
+#define TIOCM_OUT1	0x2000
+#define TIOCM_OUT2	0x4000
+#define TIOCM_LOOP	0x8000
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+#ifdef __KERNEL__
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+#define SET_LOW_TERMIOS_BITS(termios, termio, x) {		\
+	unsigned short __tmp;					\
+	get_user(__tmp,&(termio)->x);				\
+	*(unsigned short *) &(termios)->x = __tmp;		\
+}
+
+#define user_termio_to_kernel_termios(termios, termio) \
+({ \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
+	copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
+})
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+#define kernel_termios_to_user_termio(termio, termios) \
+({ \
+	put_user((termios)->c_iflag, &(termio)->c_iflag); \
+	put_user((termios)->c_oflag, &(termio)->c_oflag); \
+	put_user((termios)->c_cflag, &(termio)->c_cflag); \
+	put_user((termios)->c_lflag, &(termio)->c_lflag); \
+	put_user((termios)->c_line,  &(termio)->c_line); \
+	copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
+})
+
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
+#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
+
+#endif	/* __KERNEL__ */
+
+#endif	/* __ASM_ARM_TERMIOS_H */
diff --git a/arch/arm/include/asm/therm.h b/arch/arm/include/asm/therm.h
new file mode 100644
index 0000000..f002f01
--- /dev/null
+++ b/arch/arm/include/asm/therm.h
@@ -0,0 +1,28 @@
+/*
+ * arch/arm/include/asm/therm.h: Definitions for Dallas Semiconductor
+ *  DS1620 thermometer driver (as used in the Rebel.com NetWinder)
+ */
+#ifndef __ASM_THERM_H
+#define __ASM_THERM_H
+
+/* ioctl numbers for /dev/therm */
+#define CMD_SET_THERMOSTATE	0x53
+#define CMD_GET_THERMOSTATE	0x54
+#define CMD_GET_STATUS		0x56
+#define CMD_GET_TEMPERATURE	0x57
+#define CMD_SET_THERMOSTATE2	0x58
+#define CMD_GET_THERMOSTATE2	0x59
+#define CMD_GET_TEMPERATURE2	0x5a
+#define CMD_GET_FAN		0x5b
+#define CMD_SET_FAN		0x5c
+
+#define FAN_OFF			0
+#define FAN_ON			1
+#define FAN_ALWAYS_ON		2
+
+struct therm {
+	int hi;
+	int lo;
+};
+
+#endif
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
new file mode 100644
index 0000000..e56fa48
--- /dev/null
+++ b/arch/arm/include/asm/thread_info.h
@@ -0,0 +1,153 @@
+/*
+ *  arch/arm/include/asm/thread_info.h
+ *
+ *  Copyright (C) 2002 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARM_THREAD_INFO_H
+#define __ASM_ARM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <asm/fpstate.h>
+
+#define THREAD_SIZE_ORDER	1
+#define THREAD_SIZE		8192
+#define THREAD_START_SP		(THREAD_SIZE - 8)
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+struct exec_domain;
+
+#include <asm/types.h>
+#include <asm/domain.h>
+
+typedef unsigned long mm_segment_t;
+
+struct cpu_context_save {
+	__u32	r4;
+	__u32	r5;
+	__u32	r6;
+	__u32	r7;
+	__u32	r8;
+	__u32	r9;
+	__u32	sl;
+	__u32	fp;
+	__u32	sp;
+	__u32	pc;
+	__u32	extra[2];		/* Xscale 'acc' register, etc */
+};
+
+/*
+ * low level task data that entry.S needs immediate access to.
+ * __switch_to() assumes cpu_context follows immediately after cpu_domain.
+ */
+struct thread_info {
+	unsigned long		flags;		/* low level flags */
+	int			preempt_count;	/* 0 => preemptable, <0 => bug */
+	mm_segment_t		addr_limit;	/* address limit */
+	struct task_struct	*task;		/* main task structure */
+	struct exec_domain	*exec_domain;	/* execution domain */
+	__u32			cpu;		/* cpu */
+	__u32			cpu_domain;	/* cpu domain */
+	struct cpu_context_save	cpu_context;	/* cpu context */
+	__u32			syscall;	/* syscall number */
+	__u8			used_cp[16];	/* thread used copro */
+	unsigned long		tp_value;
+	struct crunch_state	crunchstate;
+	union fp_state		fpstate __attribute__((aligned(8)));
+	union vfp_state		vfpstate;
+#ifdef CONFIG_ARM_THUMBEE
+	unsigned long		thumbee_state;	/* ThumbEE Handler Base register */
+#endif
+	struct restart_block	restart_block;
+};
+
+#define INIT_THREAD_INFO(tsk)						\
+{									\
+	.task		= &tsk,						\
+	.exec_domain	= &default_exec_domain,				\
+	.flags		= 0,						\
+	.preempt_count	= 1,						\
+	.addr_limit	= KERNEL_DS,					\
+	.cpu_domain	= domain_val(DOMAIN_USER, DOMAIN_MANAGER) |	\
+			  domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) |	\
+			  domain_val(DOMAIN_IO, DOMAIN_CLIENT),		\
+	.restart_block	= {						\
+		.fn	= do_no_restart_syscall,			\
+	},								\
+}
+
+#define init_thread_info	(init_thread_union.thread_info)
+#define init_stack		(init_thread_union.stack)
+
+/*
+ * how to get the thread information struct from C
+ */
+static inline struct thread_info *current_thread_info(void) __attribute_const__;
+
+static inline struct thread_info *current_thread_info(void)
+{
+	register unsigned long sp asm ("sp");
+	return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+}
+
+#define thread_saved_pc(tsk)	\
+	((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc)))
+#define thread_saved_fp(tsk)	\
+	((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
+
+extern void crunch_task_disable(struct thread_info *);
+extern void crunch_task_copy(struct thread_info *, void *);
+extern void crunch_task_restore(struct thread_info *, void *);
+extern void crunch_task_release(struct thread_info *);
+
+extern void iwmmxt_task_disable(struct thread_info *);
+extern void iwmmxt_task_copy(struct thread_info *, void *);
+extern void iwmmxt_task_restore(struct thread_info *, void *);
+extern void iwmmxt_task_release(struct thread_info *);
+extern void iwmmxt_task_switch(struct thread_info *);
+
+#endif
+
+/*
+ * We use bit 30 of the preempt_count to indicate that kernel
+ * preemption is occurring.  See <asm/hardirq.h>.
+ */
+#define PREEMPT_ACTIVE	0x40000000
+
+/*
+ * thread information flags:
+ *  TIF_SYSCALL_TRACE	- syscall trace active
+ *  TIF_SIGPENDING	- signal pending
+ *  TIF_NEED_RESCHED	- rescheduling necessary
+ *  TIF_USEDFPU		- FPU was used by this task this quantum (SMP)
+ *  TIF_POLLING_NRFLAG	- true if poll_idle() is polling TIF_NEED_RESCHED
+ */
+#define TIF_SIGPENDING		0
+#define TIF_NEED_RESCHED	1
+#define TIF_SYSCALL_TRACE	8
+#define TIF_POLLING_NRFLAG	16
+#define TIF_USING_IWMMXT	17
+#define TIF_MEMDIE		18
+#define TIF_FREEZE		19
+
+#define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
+#define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
+#define _TIF_USING_IWMMXT	(1 << TIF_USING_IWMMXT)
+#define _TIF_FREEZE		(1 << TIF_FREEZE)
+
+/*
+ * Change these and you break ASM code in entry-common.S
+ */
+#define _TIF_WORK_MASK		0x000000ff
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_ARM_THREAD_INFO_H */
diff --git a/arch/arm/include/asm/thread_notify.h b/arch/arm/include/asm/thread_notify.h
new file mode 100644
index 0000000..f27379d
--- /dev/null
+++ b/arch/arm/include/asm/thread_notify.h
@@ -0,0 +1,48 @@
+/*
+ *  arch/arm/include/asm/thread_notify.h
+ *
+ *  Copyright (C) 2006 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ASMARM_THREAD_NOTIFY_H
+#define ASMARM_THREAD_NOTIFY_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <linux/notifier.h>
+#include <asm/thread_info.h>
+
+static inline int thread_register_notifier(struct notifier_block *n)
+{
+	extern struct atomic_notifier_head thread_notify_head;
+	return atomic_notifier_chain_register(&thread_notify_head, n);
+}
+
+static inline void thread_unregister_notifier(struct notifier_block *n)
+{
+	extern struct atomic_notifier_head thread_notify_head;
+	atomic_notifier_chain_unregister(&thread_notify_head, n);
+}
+
+static inline void thread_notify(unsigned long rc, struct thread_info *thread)
+{
+	extern struct atomic_notifier_head thread_notify_head;
+	atomic_notifier_call_chain(&thread_notify_head, rc, thread);
+}
+
+#endif
+
+/*
+ * These are the reason codes for the thread notifier.
+ */
+#define THREAD_NOTIFY_FLUSH	0
+#define THREAD_NOTIFY_RELEASE	1
+#define THREAD_NOTIFY_SWITCH	2
+
+#endif
+#endif
diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h
new file mode 100644
index 0000000..e50e292
--- /dev/null
+++ b/arch/arm/include/asm/timex.h
@@ -0,0 +1,24 @@
+/*
+ *  arch/arm/include/asm/timex.h
+ *
+ *  Copyright (C) 1997,1998 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Architecture Specific TIME specifications
+ */
+#ifndef _ASMARM_TIMEX_H
+#define _ASMARM_TIMEX_H
+
+#include <asm/arch/timex.h>
+
+typedef unsigned long cycles_t;
+
+static inline cycles_t get_cycles (void)
+{
+	return 0;
+}
+
+#endif
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
new file mode 100644
index 0000000..857f1df
--- /dev/null
+++ b/arch/arm/include/asm/tlb.h
@@ -0,0 +1,94 @@
+/*
+ *  arch/arm/include/asm/tlb.h
+ *
+ *  Copyright (C) 2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Experimentation shows that on a StrongARM, it appears to be faster
+ *  to use the "invalidate whole tlb" rather than "invalidate single
+ *  tlb" for this.
+ *
+ *  This appears true for both the process fork+exit case, as well as
+ *  the munmap-large-area case.
+ */
+#ifndef __ASMARM_TLB_H
+#define __ASMARM_TLB_H
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#ifndef CONFIG_MMU
+
+#include <linux/pagemap.h>
+#include <asm-generic/tlb.h>
+
+#else /* !CONFIG_MMU */
+
+#include <asm/pgalloc.h>
+
+/*
+ * TLB handling.  This allows us to remove pages from the page
+ * tables, and efficiently handle the TLB issues.
+ */
+struct mmu_gather {
+	struct mm_struct	*mm;
+	unsigned int		fullmm;
+};
+
+DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+static inline struct mmu_gather *
+tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
+{
+	struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
+
+	tlb->mm = mm;
+	tlb->fullmm = full_mm_flush;
+
+	return tlb;
+}
+
+static inline void
+tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+	if (tlb->fullmm)
+		flush_tlb_mm(tlb->mm);
+
+	/* keep the page table cache within bounds */
+	check_pgt_cache();
+
+	put_cpu_var(mmu_gathers);
+}
+
+#define tlb_remove_tlb_entry(tlb,ptep,address)	do { } while (0)
+
+/*
+ * In the case of tlb vma handling, we can optimise these away in the
+ * case where we're doing a full MM flush.  When we're doing a munmap,
+ * the vmas are adjusted to only cover the region to be torn down.
+ */
+static inline void
+tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+	if (!tlb->fullmm)
+		flush_cache_range(vma, vma->vm_start, vma->vm_end);
+}
+
+static inline void
+tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+	if (!tlb->fullmm)
+		flush_tlb_range(vma, vma->vm_start, vma->vm_end);
+}
+
+#define tlb_remove_page(tlb,page)	free_page_and_swap_cache(page)
+#define pte_free_tlb(tlb, ptep)		pte_free((tlb)->mm, ptep)
+#define pmd_free_tlb(tlb, pmdp)		pmd_free((tlb)->mm, pmdp)
+
+#define tlb_migrate_finish(mm)		do { } while (0)
+
+#endif /* CONFIG_MMU */
+#endif
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
new file mode 100644
index 0000000..0d0d40f
--- /dev/null
+++ b/arch/arm/include/asm/tlbflush.h
@@ -0,0 +1,500 @@
+/*
+ *  arch/arm/include/asm/tlbflush.h
+ *
+ *  Copyright (C) 1999-2003 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_TLBFLUSH_H
+#define _ASMARM_TLBFLUSH_H
+
+
+#ifndef CONFIG_MMU
+
+#define tlb_flush(tlb)	((void) tlb)
+
+#else /* CONFIG_MMU */
+
+#include <asm/glue.h>
+
+#define TLB_V3_PAGE	(1 << 0)
+#define TLB_V4_U_PAGE	(1 << 1)
+#define TLB_V4_D_PAGE	(1 << 2)
+#define TLB_V4_I_PAGE	(1 << 3)
+#define TLB_V6_U_PAGE	(1 << 4)
+#define TLB_V6_D_PAGE	(1 << 5)
+#define TLB_V6_I_PAGE	(1 << 6)
+
+#define TLB_V3_FULL	(1 << 8)
+#define TLB_V4_U_FULL	(1 << 9)
+#define TLB_V4_D_FULL	(1 << 10)
+#define TLB_V4_I_FULL	(1 << 11)
+#define TLB_V6_U_FULL	(1 << 12)
+#define TLB_V6_D_FULL	(1 << 13)
+#define TLB_V6_I_FULL	(1 << 14)
+
+#define TLB_V6_U_ASID	(1 << 16)
+#define TLB_V6_D_ASID	(1 << 17)
+#define TLB_V6_I_ASID	(1 << 18)
+
+#define TLB_L2CLEAN_FR	(1 << 29)		/* Feroceon */
+#define TLB_DCLEAN	(1 << 30)
+#define TLB_WB		(1 << 31)
+
+/*
+ *	MMU TLB Model
+ *	=============
+ *
+ *	We have the following to choose from:
+ *	  v3    - ARMv3
+ *	  v4    - ARMv4 without write buffer
+ *	  v4wb  - ARMv4 with write buffer without I TLB flush entry instruction
+ *	  v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
+ *	  fr    - Feroceon (v4wbi with non-outer-cacheable page table walks)
+ *	  v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
+ */
+#undef _TLB
+#undef MULTI_TLB
+
+#define v3_tlb_flags	(TLB_V3_FULL | TLB_V3_PAGE)
+
+#ifdef CONFIG_CPU_TLB_V3
+# define v3_possible_flags	v3_tlb_flags
+# define v3_always_flags	v3_tlb_flags
+# ifdef _TLB
+#  define MULTI_TLB 1
+# else
+#  define _TLB v3
+# endif
+#else
+# define v3_possible_flags	0
+# define v3_always_flags	(-1UL)
+#endif
+
+#define v4_tlb_flags	(TLB_V4_U_FULL | TLB_V4_U_PAGE)
+
+#ifdef CONFIG_CPU_TLB_V4WT
+# define v4_possible_flags	v4_tlb_flags
+# define v4_always_flags	v4_tlb_flags
+# ifdef _TLB
+#  define MULTI_TLB 1
+# else
+#  define _TLB v4
+# endif
+#else
+# define v4_possible_flags	0
+# define v4_always_flags	(-1UL)
+#endif
+
+#define v4wbi_tlb_flags	(TLB_WB | TLB_DCLEAN | \
+			 TLB_V4_I_FULL | TLB_V4_D_FULL | \
+			 TLB_V4_I_PAGE | TLB_V4_D_PAGE)
+
+#ifdef CONFIG_CPU_TLB_V4WBI
+# define v4wbi_possible_flags	v4wbi_tlb_flags
+# define v4wbi_always_flags	v4wbi_tlb_flags
+# ifdef _TLB
+#  define MULTI_TLB 1
+# else
+#  define _TLB v4wbi
+# endif
+#else
+# define v4wbi_possible_flags	0
+# define v4wbi_always_flags	(-1UL)
+#endif
+
+#define fr_tlb_flags	(TLB_WB | TLB_DCLEAN | TLB_L2CLEAN_FR | \
+			 TLB_V4_I_FULL | TLB_V4_D_FULL | \
+			 TLB_V4_I_PAGE | TLB_V4_D_PAGE)
+
+#ifdef CONFIG_CPU_TLB_FEROCEON
+# define fr_possible_flags	fr_tlb_flags
+# define fr_always_flags	fr_tlb_flags
+# ifdef _TLB
+#  define MULTI_TLB 1
+# else
+#  define _TLB v4wbi
+# endif
+#else
+# define fr_possible_flags	0
+# define fr_always_flags	(-1UL)
+#endif
+
+#define v4wb_tlb_flags	(TLB_WB | TLB_DCLEAN | \
+			 TLB_V4_I_FULL | TLB_V4_D_FULL | \
+			 TLB_V4_D_PAGE)
+
+#ifdef CONFIG_CPU_TLB_V4WB
+# define v4wb_possible_flags	v4wb_tlb_flags
+# define v4wb_always_flags	v4wb_tlb_flags
+# ifdef _TLB
+#  define MULTI_TLB 1
+# else
+#  define _TLB v4wb
+# endif
+#else
+# define v4wb_possible_flags	0
+# define v4wb_always_flags	(-1UL)
+#endif
+
+#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
+			 TLB_V6_I_FULL | TLB_V6_D_FULL | \
+			 TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
+			 TLB_V6_I_ASID | TLB_V6_D_ASID)
+
+#ifdef CONFIG_CPU_TLB_V6
+# define v6wbi_possible_flags	v6wbi_tlb_flags
+# define v6wbi_always_flags	v6wbi_tlb_flags
+# ifdef _TLB
+#  define MULTI_TLB 1
+# else
+#  define _TLB v6wbi
+# endif
+#else
+# define v6wbi_possible_flags	0
+# define v6wbi_always_flags	(-1UL)
+#endif
+
+#ifdef CONFIG_CPU_TLB_V7
+# define v7wbi_possible_flags	v6wbi_tlb_flags
+# define v7wbi_always_flags	v6wbi_tlb_flags
+# ifdef _TLB
+#  define MULTI_TLB 1
+# else
+#  define _TLB v7wbi
+# endif
+#else
+# define v7wbi_possible_flags	0
+# define v7wbi_always_flags	(-1UL)
+#endif
+
+#ifndef _TLB
+#error Unknown TLB model
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+
+struct cpu_tlb_fns {
+	void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
+	void (*flush_kern_range)(unsigned long, unsigned long);
+	unsigned long tlb_flags;
+};
+
+/*
+ * Select the calling method
+ */
+#ifdef MULTI_TLB
+
+#define __cpu_flush_user_tlb_range	cpu_tlb.flush_user_range
+#define __cpu_flush_kern_tlb_range	cpu_tlb.flush_kern_range
+
+#else
+
+#define __cpu_flush_user_tlb_range	__glue(_TLB,_flush_user_tlb_range)
+#define __cpu_flush_kern_tlb_range	__glue(_TLB,_flush_kern_tlb_range)
+
+extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
+extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
+
+#endif
+
+extern struct cpu_tlb_fns cpu_tlb;
+
+#define __cpu_tlb_flags			cpu_tlb.tlb_flags
+
+/*
+ *	TLB Management
+ *	==============
+ *
+ *	The arch/arm/mm/tlb-*.S files implement these methods.
+ *
+ *	The TLB specific code is expected to perform whatever tests it
+ *	needs to determine if it should invalidate the TLB for each
+ *	call.  Start addresses are inclusive and end addresses are
+ *	exclusive; it is safe to round these addresses down.
+ *
+ *	flush_tlb_all()
+ *
+ *		Invalidate the entire TLB.
+ *
+ *	flush_tlb_mm(mm)
+ *
+ *		Invalidate all TLB entries in a particular address
+ *		space.
+ *		- mm	- mm_struct describing address space
+ *
+ *	flush_tlb_range(mm,start,end)
+ *
+ *		Invalidate a range of TLB entries in the specified
+ *		address space.
+ *		- mm	- mm_struct describing address space
+ *		- start - start address (may not be aligned)
+ *		- end	- end address (exclusive, may not be aligned)
+ *
+ *	flush_tlb_page(vaddr,vma)
+ *
+ *		Invalidate the specified page in the specified address range.
+ *		- vaddr - virtual address (may not be aligned)
+ *		- vma	- vma_struct describing address range
+ *
+ *	flush_kern_tlb_page(kaddr)
+ *
+ *		Invalidate the TLB entry for the specified page.  The address
+ *		will be in the kernels virtual memory space.  Current uses
+ *		only require the D-TLB to be invalidated.
+ *		- kaddr - Kernel virtual memory address
+ */
+
+/*
+ * We optimise the code below by:
+ *  - building a set of TLB flags that might be set in __cpu_tlb_flags
+ *  - building a set of TLB flags that will always be set in __cpu_tlb_flags
+ *  - if we're going to need __cpu_tlb_flags, access it once and only once
+ *
+ * This allows us to build optimal assembly for the single-CPU type case,
+ * and as close to optimal given the compiler constrants for multi-CPU
+ * case.  We could do better for the multi-CPU case if the compiler
+ * implemented the "%?" method, but this has been discontinued due to too
+ * many people getting it wrong.
+ */
+#define possible_tlb_flags	(v3_possible_flags | \
+				 v4_possible_flags | \
+				 v4wbi_possible_flags | \
+				 fr_possible_flags | \
+				 v4wb_possible_flags | \
+				 v6wbi_possible_flags)
+
+#define always_tlb_flags	(v3_always_flags & \
+				 v4_always_flags & \
+				 v4wbi_always_flags & \
+				 fr_always_flags & \
+				 v4wb_always_flags & \
+				 v6wbi_always_flags)
+
+#define tlb_flag(f)	((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
+
+static inline void local_flush_tlb_all(void)
+{
+	const int zero = 0;
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	if (tlb_flag(TLB_WB))
+		dsb();
+
+	if (tlb_flag(TLB_V3_FULL))
+		asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
+	if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL))
+		asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc");
+	if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL))
+		asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc");
+	if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL))
+		asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
+
+	if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
+		     TLB_V6_I_PAGE | TLB_V6_D_PAGE |
+		     TLB_V6_I_ASID | TLB_V6_D_ASID)) {
+		/* flush the branch target cache */
+		asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
+		dsb();
+		isb();
+	}
+}
+
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+	const int zero = 0;
+	const int asid = ASID(mm);
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	if (tlb_flag(TLB_WB))
+		dsb();
+
+	if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) {
+		if (tlb_flag(TLB_V3_FULL))
+			asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
+		if (tlb_flag(TLB_V4_U_FULL))
+			asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc");
+		if (tlb_flag(TLB_V4_D_FULL))
+			asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc");
+		if (tlb_flag(TLB_V4_I_FULL))
+			asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
+	}
+
+	if (tlb_flag(TLB_V6_U_ASID))
+		asm("mcr p15, 0, %0, c8, c7, 2" : : "r" (asid) : "cc");
+	if (tlb_flag(TLB_V6_D_ASID))
+		asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc");
+	if (tlb_flag(TLB_V6_I_ASID))
+		asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc");
+
+	if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
+		     TLB_V6_I_PAGE | TLB_V6_D_PAGE |
+		     TLB_V6_I_ASID | TLB_V6_D_ASID)) {
+		/* flush the branch target cache */
+		asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
+		dsb();
+	}
+}
+
+static inline void
+local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+	const int zero = 0;
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
+
+	if (tlb_flag(TLB_WB))
+		dsb();
+
+	if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+		if (tlb_flag(TLB_V3_PAGE))
+			asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc");
+		if (tlb_flag(TLB_V4_U_PAGE))
+			asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc");
+		if (tlb_flag(TLB_V4_D_PAGE))
+			asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc");
+		if (tlb_flag(TLB_V4_I_PAGE))
+			asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
+		if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
+			asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
+	}
+
+	if (tlb_flag(TLB_V6_U_PAGE))
+		asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc");
+	if (tlb_flag(TLB_V6_D_PAGE))
+		asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc");
+	if (tlb_flag(TLB_V6_I_PAGE))
+		asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
+
+	if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
+		     TLB_V6_I_PAGE | TLB_V6_D_PAGE |
+		     TLB_V6_I_ASID | TLB_V6_D_ASID)) {
+		/* flush the branch target cache */
+		asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
+		dsb();
+	}
+}
+
+static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
+{
+	const int zero = 0;
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	kaddr &= PAGE_MASK;
+
+	if (tlb_flag(TLB_WB))
+		dsb();
+
+	if (tlb_flag(TLB_V3_PAGE))
+		asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc");
+	if (tlb_flag(TLB_V4_U_PAGE))
+		asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc");
+	if (tlb_flag(TLB_V4_D_PAGE))
+		asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc");
+	if (tlb_flag(TLB_V4_I_PAGE))
+		asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc");
+	if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
+		asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
+
+	if (tlb_flag(TLB_V6_U_PAGE))
+		asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc");
+	if (tlb_flag(TLB_V6_D_PAGE))
+		asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc");
+	if (tlb_flag(TLB_V6_I_PAGE))
+		asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc");
+
+	if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
+		     TLB_V6_I_PAGE | TLB_V6_D_PAGE |
+		     TLB_V6_I_ASID | TLB_V6_D_ASID)) {
+		/* flush the branch target cache */
+		asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
+		dsb();
+		isb();
+	}
+}
+
+/*
+ *	flush_pmd_entry
+ *
+ *	Flush a PMD entry (word aligned, or double-word aligned) to
+ *	RAM if the TLB for the CPU we are running on requires this.
+ *	This is typically used when we are creating PMD entries.
+ *
+ *	clean_pmd_entry
+ *
+ *	Clean (but don't drain the write buffer) if the CPU requires
+ *	these operations.  This is typically used when we are removing
+ *	PMD entries.
+ */
+static inline void flush_pmd_entry(pmd_t *pmd)
+{
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	if (tlb_flag(TLB_DCLEAN))
+		asm("mcr	p15, 0, %0, c7, c10, 1	@ flush_pmd"
+			: : "r" (pmd) : "cc");
+
+	if (tlb_flag(TLB_L2CLEAN_FR))
+		asm("mcr	p15, 1, %0, c15, c9, 1  @ L2 flush_pmd"
+			: : "r" (pmd) : "cc");
+
+	if (tlb_flag(TLB_WB))
+		dsb();
+}
+
+static inline void clean_pmd_entry(pmd_t *pmd)
+{
+	const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+	if (tlb_flag(TLB_DCLEAN))
+		asm("mcr	p15, 0, %0, c7, c10, 1	@ flush_pmd"
+			: : "r" (pmd) : "cc");
+
+	if (tlb_flag(TLB_L2CLEAN_FR))
+		asm("mcr	p15, 1, %0, c15, c9, 1  @ L2 flush_pmd"
+			: : "r" (pmd) : "cc");
+}
+
+#undef tlb_flag
+#undef always_tlb_flags
+#undef possible_tlb_flags
+
+/*
+ * Convert calls to our calling convention.
+ */
+#define local_flush_tlb_range(vma,start,end)	__cpu_flush_user_tlb_range(start,end,vma)
+#define local_flush_tlb_kernel_range(s,e)	__cpu_flush_kern_tlb_range(s,e)
+
+#ifndef CONFIG_SMP
+#define flush_tlb_all		local_flush_tlb_all
+#define flush_tlb_mm		local_flush_tlb_mm
+#define flush_tlb_page		local_flush_tlb_page
+#define flush_tlb_kernel_page	local_flush_tlb_kernel_page
+#define flush_tlb_range		local_flush_tlb_range
+#define flush_tlb_kernel_range	local_flush_tlb_kernel_range
+#else
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
+extern void flush_tlb_kernel_page(unsigned long kaddr);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+#endif
+
+/*
+ * if PG_dcache_dirty is set for the page, we need to ensure that any
+ * cache entries for the kernels virtual memory range are written
+ * back to the page.
+ */
+extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
+
+#endif
+
+#endif /* CONFIG_MMU */
+
+#endif
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
new file mode 100644
index 0000000..accbd7c
--- /dev/null
+++ b/arch/arm/include/asm/topology.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_ARM_TOPOLOGY_H
+#define _ASM_ARM_TOPOLOGY_H
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
new file mode 100644
index 0000000..aa399ae
--- /dev/null
+++ b/arch/arm/include/asm/traps.h
@@ -0,0 +1,29 @@
+#ifndef _ASMARM_TRAP_H
+#define _ASMARM_TRAP_H
+
+#include <linux/list.h>
+
+struct undef_hook {
+	struct list_head node;
+	u32 instr_mask;
+	u32 instr_val;
+	u32 cpsr_mask;
+	u32 cpsr_val;
+	int (*fn)(struct pt_regs *regs, unsigned int instr);
+};
+
+void register_undef_hook(struct undef_hook *hook);
+void unregister_undef_hook(struct undef_hook *hook);
+
+static inline int in_exception_text(unsigned long ptr)
+{
+	extern char __exception_text_start[];
+	extern char __exception_text_end[];
+
+	return ptr >= (unsigned long)&__exception_text_start &&
+	       ptr < (unsigned long)&__exception_text_end;
+}
+
+extern void __init early_trap_init(void);
+
+#endif
diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/asm/types.h
new file mode 100644
index 0000000..345df01
--- /dev/null
+++ b/arch/arm/include/asm/types.h
@@ -0,0 +1,31 @@
+#ifndef __ASM_ARM_TYPES_H
+#define __ASM_ARM_TYPES_H
+
+#include <asm-generic/int-ll64.h>
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned short umode_t;
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __KERNEL__
+
+#define BITS_PER_LONG 32
+
+#ifndef __ASSEMBLY__
+
+/* Dma addresses are 32-bits wide.  */
+
+typedef u32 dma_addr_t;
+typedef u32 dma64_addr_t;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif
+
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
new file mode 100644
index 0000000..d0f51ff
--- /dev/null
+++ b/arch/arm/include/asm/uaccess.h
@@ -0,0 +1,444 @@
+/*
+ *  arch/arm/include/asm/uaccess.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_UACCESS_H
+#define _ASMARM_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <linux/sched.h>
+#include <asm/errno.h>
+#include <asm/memory.h>
+#include <asm/domain.h>
+#include <asm/system.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue.  No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path.  This means when everything is well,
+ * we don't even have to jump over them.  Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry
+{
+	unsigned long insn, fixup;
+};
+
+extern int fixup_exception(struct pt_regs *regs);
+
+/*
+ * These two are intentionally not defined anywhere - if the kernel
+ * code generates any references to them, that's a bug.
+ */
+extern int __get_user_bad(void);
+extern int __put_user_bad(void);
+
+/*
+ * Note that this is actually 0x1,0000,0000
+ */
+#define KERNEL_DS	0x00000000
+#define get_ds()	(KERNEL_DS)
+
+#ifdef CONFIG_MMU
+
+#define USER_DS		TASK_SIZE
+#define get_fs()	(current_thread_info()->addr_limit)
+
+static inline void set_fs(mm_segment_t fs)
+{
+	current_thread_info()->addr_limit = fs;
+	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
+}
+
+#define segment_eq(a,b)	((a) == (b))
+
+#define __addr_ok(addr) ({ \
+	unsigned long flag; \
+	__asm__("cmp %2, %0; movlo %0, #0" \
+		: "=&r" (flag) \
+		: "0" (current_thread_info()->addr_limit), "r" (addr) \
+		: "cc"); \
+	(flag == 0); })
+
+/* We use 33-bit arithmetic here... */
+#define __range_ok(addr,size) ({ \
+	unsigned long flag, roksum; \
+	__chk_user_ptr(addr);	\
+	__asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
+		: "=&r" (flag), "=&r" (roksum) \
+		: "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
+		: "cc"); \
+	flag; })
+
+/*
+ * Single-value transfer routines.  They automatically use the right
+ * size if we just have the right pointer type.  Note that the functions
+ * which read from user space (*get_*) need to take care not to leak
+ * kernel data even if the calling code is buggy and fails to check
+ * the return value.  This means zeroing out the destination variable
+ * or buffer on error.  Normally this is done out of line by the
+ * fixup code, but there are a few places where it intrudes on the
+ * main code path.  When we only write to user space, there is no
+ * problem.
+ */
+extern int __get_user_1(void *);
+extern int __get_user_2(void *);
+extern int __get_user_4(void *);
+
+#define __get_user_x(__r2,__p,__e,__s,__i...)				\
+	   __asm__ __volatile__ (					\
+		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
+		"bl	__get_user_" #__s				\
+		: "=&r" (__e), "=r" (__r2)				\
+		: "0" (__p)						\
+		: __i, "cc")
+
+#define get_user(x,p)							\
+	({								\
+		register const typeof(*(p)) __user *__p asm("r0") = (p);\
+		register unsigned long __r2 asm("r2");			\
+		register int __e asm("r0");				\
+		switch (sizeof(*(__p))) {				\
+		case 1:							\
+			__get_user_x(__r2, __p, __e, 1, "lr");		\
+	       		break;						\
+		case 2:							\
+			__get_user_x(__r2, __p, __e, 2, "r3", "lr");	\
+			break;						\
+		case 4:							\
+	       		__get_user_x(__r2, __p, __e, 4, "lr");		\
+			break;						\
+		default: __e = __get_user_bad(); break;			\
+		}							\
+		x = (typeof(*(p))) __r2;				\
+		__e;							\
+	})
+
+extern int __put_user_1(void *, unsigned int);
+extern int __put_user_2(void *, unsigned int);
+extern int __put_user_4(void *, unsigned int);
+extern int __put_user_8(void *, unsigned long long);
+
+#define __put_user_x(__r2,__p,__e,__s)					\
+	   __asm__ __volatile__ (					\
+		__asmeq("%0", "r0") __asmeq("%2", "r2")			\
+		"bl	__put_user_" #__s				\
+		: "=&r" (__e)						\
+		: "0" (__p), "r" (__r2)					\
+		: "ip", "lr", "cc")
+
+#define put_user(x,p)							\
+	({								\
+		register const typeof(*(p)) __r2 asm("r2") = (x);	\
+		register const typeof(*(p)) __user *__p asm("r0") = (p);\
+		register int __e asm("r0");				\
+		switch (sizeof(*(__p))) {				\
+		case 1:							\
+			__put_user_x(__r2, __p, __e, 1);		\
+			break;						\
+		case 2:							\
+			__put_user_x(__r2, __p, __e, 2);		\
+			break;						\
+		case 4:							\
+			__put_user_x(__r2, __p, __e, 4);		\
+			break;						\
+		case 8:							\
+			__put_user_x(__r2, __p, __e, 8);		\
+			break;						\
+		default: __e = __put_user_bad(); break;			\
+		}							\
+		__e;							\
+	})
+
+#else /* CONFIG_MMU */
+
+/*
+ * uClinux has only one addr space, so has simplified address limits.
+ */
+#define USER_DS			KERNEL_DS
+
+#define segment_eq(a,b)		(1)
+#define __addr_ok(addr)		(1)
+#define __range_ok(addr,size)	(0)
+#define get_fs()		(KERNEL_DS)
+
+static inline void set_fs(mm_segment_t fs)
+{
+}
+
+#define get_user(x,p)	__get_user(x,p)
+#define put_user(x,p)	__put_user(x,p)
+
+#endif /* CONFIG_MMU */
+
+#define access_ok(type,addr,size)	(__range_ok(addr,size) == 0)
+
+/*
+ * The "__xxx" versions of the user access functions do not verify the
+ * address space - it must have been done previously with a separate
+ * "access_ok()" call.
+ *
+ * The "xxx_error" versions set the third argument to EFAULT if an
+ * error occurs, and leave it unchanged on success.  Note that these
+ * versions are void (ie, don't return a value as such).
+ */
+#define __get_user(x,ptr)						\
+({									\
+	long __gu_err = 0;						\
+	__get_user_err((x),(ptr),__gu_err);				\
+	__gu_err;							\
+})
+
+#define __get_user_error(x,ptr,err)					\
+({									\
+	__get_user_err((x),(ptr),err);					\
+	(void) 0;							\
+})
+
+#define __get_user_err(x,ptr,err)					\
+do {									\
+	unsigned long __gu_addr = (unsigned long)(ptr);			\
+	unsigned long __gu_val;						\
+	__chk_user_ptr(ptr);						\
+	switch (sizeof(*(ptr))) {					\
+	case 1:	__get_user_asm_byte(__gu_val,__gu_addr,err);	break;	\
+	case 2:	__get_user_asm_half(__gu_val,__gu_addr,err);	break;	\
+	case 4:	__get_user_asm_word(__gu_val,__gu_addr,err);	break;	\
+	default: (__gu_val) = __get_user_bad();				\
+	}								\
+	(x) = (__typeof__(*(ptr)))__gu_val;				\
+} while (0)
+
+#define __get_user_asm_byte(x,addr,err)				\
+	__asm__ __volatile__(					\
+	"1:	ldrbt	%1,[%2],#0\n"				\
+	"2:\n"							\
+	"	.section .fixup,\"ax\"\n"			\
+	"	.align	2\n"					\
+	"3:	mov	%0, %3\n"				\
+	"	mov	%1, #0\n"				\
+	"	b	2b\n"					\
+	"	.previous\n"					\
+	"	.section __ex_table,\"a\"\n"			\
+	"	.align	3\n"					\
+	"	.long	1b, 3b\n"				\
+	"	.previous"					\
+	: "+r" (err), "=&r" (x)					\
+	: "r" (addr), "i" (-EFAULT)				\
+	: "cc")
+
+#ifndef __ARMEB__
+#define __get_user_asm_half(x,__gu_addr,err)			\
+({								\
+	unsigned long __b1, __b2;				\
+	__get_user_asm_byte(__b1, __gu_addr, err);		\
+	__get_user_asm_byte(__b2, __gu_addr + 1, err);		\
+	(x) = __b1 | (__b2 << 8);				\
+})
+#else
+#define __get_user_asm_half(x,__gu_addr,err)			\
+({								\
+	unsigned long __b1, __b2;				\
+	__get_user_asm_byte(__b1, __gu_addr, err);		\
+	__get_user_asm_byte(__b2, __gu_addr + 1, err);		\
+	(x) = (__b1 << 8) | __b2;				\
+})
+#endif
+
+#define __get_user_asm_word(x,addr,err)				\
+	__asm__ __volatile__(					\
+	"1:	ldrt	%1,[%2],#0\n"				\
+	"2:\n"							\
+	"	.section .fixup,\"ax\"\n"			\
+	"	.align	2\n"					\
+	"3:	mov	%0, %3\n"				\
+	"	mov	%1, #0\n"				\
+	"	b	2b\n"					\
+	"	.previous\n"					\
+	"	.section __ex_table,\"a\"\n"			\
+	"	.align	3\n"					\
+	"	.long	1b, 3b\n"				\
+	"	.previous"					\
+	: "+r" (err), "=&r" (x)					\
+	: "r" (addr), "i" (-EFAULT)				\
+	: "cc")
+
+#define __put_user(x,ptr)						\
+({									\
+	long __pu_err = 0;						\
+	__put_user_err((x),(ptr),__pu_err);				\
+	__pu_err;							\
+})
+
+#define __put_user_error(x,ptr,err)					\
+({									\
+	__put_user_err((x),(ptr),err);					\
+	(void) 0;							\
+})
+
+#define __put_user_err(x,ptr,err)					\
+do {									\
+	unsigned long __pu_addr = (unsigned long)(ptr);			\
+	__typeof__(*(ptr)) __pu_val = (x);				\
+	__chk_user_ptr(ptr);						\
+	switch (sizeof(*(ptr))) {					\
+	case 1: __put_user_asm_byte(__pu_val,__pu_addr,err);	break;	\
+	case 2: __put_user_asm_half(__pu_val,__pu_addr,err);	break;	\
+	case 4: __put_user_asm_word(__pu_val,__pu_addr,err);	break;	\
+	case 8:	__put_user_asm_dword(__pu_val,__pu_addr,err);	break;	\
+	default: __put_user_bad();					\
+	}								\
+} while (0)
+
+#define __put_user_asm_byte(x,__pu_addr,err)			\
+	__asm__ __volatile__(					\
+	"1:	strbt	%1,[%2],#0\n"				\
+	"2:\n"							\
+	"	.section .fixup,\"ax\"\n"			\
+	"	.align	2\n"					\
+	"3:	mov	%0, %3\n"				\
+	"	b	2b\n"					\
+	"	.previous\n"					\
+	"	.section __ex_table,\"a\"\n"			\
+	"	.align	3\n"					\
+	"	.long	1b, 3b\n"				\
+	"	.previous"					\
+	: "+r" (err)						\
+	: "r" (x), "r" (__pu_addr), "i" (-EFAULT)		\
+	: "cc")
+
+#ifndef __ARMEB__
+#define __put_user_asm_half(x,__pu_addr,err)			\
+({								\
+	unsigned long __temp = (unsigned long)(x);		\
+	__put_user_asm_byte(__temp, __pu_addr, err);		\
+	__put_user_asm_byte(__temp >> 8, __pu_addr + 1, err);	\
+})
+#else
+#define __put_user_asm_half(x,__pu_addr,err)			\
+({								\
+	unsigned long __temp = (unsigned long)(x);		\
+	__put_user_asm_byte(__temp >> 8, __pu_addr, err);	\
+	__put_user_asm_byte(__temp, __pu_addr + 1, err);	\
+})
+#endif
+
+#define __put_user_asm_word(x,__pu_addr,err)			\
+	__asm__ __volatile__(					\
+	"1:	strt	%1,[%2],#0\n"				\
+	"2:\n"							\
+	"	.section .fixup,\"ax\"\n"			\
+	"	.align	2\n"					\
+	"3:	mov	%0, %3\n"				\
+	"	b	2b\n"					\
+	"	.previous\n"					\
+	"	.section __ex_table,\"a\"\n"			\
+	"	.align	3\n"					\
+	"	.long	1b, 3b\n"				\
+	"	.previous"					\
+	: "+r" (err)						\
+	: "r" (x), "r" (__pu_addr), "i" (-EFAULT)		\
+	: "cc")
+
+#ifndef __ARMEB__
+#define	__reg_oper0	"%R2"
+#define	__reg_oper1	"%Q2"
+#else
+#define	__reg_oper0	"%Q2"
+#define	__reg_oper1	"%R2"
+#endif
+
+#define __put_user_asm_dword(x,__pu_addr,err)			\
+	__asm__ __volatile__(					\
+	"1:	strt	" __reg_oper1 ", [%1], #4\n"		\
+	"2:	strt	" __reg_oper0 ", [%1], #0\n"		\
+	"3:\n"							\
+	"	.section .fixup,\"ax\"\n"			\
+	"	.align	2\n"					\
+	"4:	mov	%0, %3\n"				\
+	"	b	3b\n"					\
+	"	.previous\n"					\
+	"	.section __ex_table,\"a\"\n"			\
+	"	.align	3\n"					\
+	"	.long	1b, 4b\n"				\
+	"	.long	2b, 4b\n"				\
+	"	.previous"					\
+	: "+r" (err), "+r" (__pu_addr)				\
+	: "r" (x), "i" (-EFAULT)				\
+	: "cc")
+
+
+#ifdef CONFIG_MMU
+extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
+extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+#else
+#define __copy_from_user(to,from,n)	(memcpy(to, (void __force *)from, n), 0)
+#define __copy_to_user(to,from,n)	(memcpy((void __force *)to, from, n), 0)
+#define __clear_user(addr,n)		(memset((void __force *)addr, 0, n), 0)
+#endif
+
+extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count);
+extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
+
+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	if (access_ok(VERIFY_READ, from, n))
+		n = __copy_from_user(to, from, n);
+	else /* security hole - plug it */
+		memzero(to, n);
+	return n;
+}
+
+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	if (access_ok(VERIFY_WRITE, to, n))
+		n = __copy_to_user(to, from, n);
+	return n;
+}
+
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
+
+static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
+{
+	if (access_ok(VERIFY_WRITE, to, n))
+		n = __clear_user(to, n);
+	return n;
+}
+
+static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count)
+{
+	long res = -EFAULT;
+	if (access_ok(VERIFY_READ, src, 1))
+		res = __strncpy_from_user(dst, src, count);
+	return res;
+}
+
+#define strlen_user(s)	strnlen_user(s, ~0UL >> 1)
+
+static inline long __must_check strnlen_user(const char __user *s, long n)
+{
+	unsigned long res = 0;
+
+	if (__addr_ok(s))
+		res = __strnlen_user(s, n);
+
+	return res;
+}
+
+#endif /* _ASMARM_UACCESS_H */
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h
new file mode 100644
index 0000000..bf65e9f
--- /dev/null
+++ b/arch/arm/include/asm/ucontext.h
@@ -0,0 +1,103 @@
+#ifndef _ASMARM_UCONTEXT_H
+#define _ASMARM_UCONTEXT_H
+
+#include <asm/fpstate.h>
+
+/*
+ * struct sigcontext only has room for the basic registers, but struct
+ * ucontext now has room for all registers which need to be saved and
+ * restored.  Coprocessor registers are stored in uc_regspace.  Each
+ * coprocessor's saved state should start with a documented 32-bit magic
+ * number, followed by a 32-bit word giving the coproccesor's saved size.
+ * uc_regspace may be expanded if necessary, although this takes some
+ * coordination with glibc.
+ */
+
+struct ucontext {
+	unsigned long	  uc_flags;
+	struct ucontext  *uc_link;
+	stack_t		  uc_stack;
+	struct sigcontext uc_mcontext;
+	sigset_t	  uc_sigmask;
+	/* Allow for uc_sigmask growth.  Glibc uses a 1024-bit sigset_t.  */
+	int		  __unused[32 - (sizeof (sigset_t) / sizeof (int))];
+	/* Last for extensibility.  Eight byte aligned because some
+	   coprocessors require eight byte alignment.  */
+ 	unsigned long	  uc_regspace[128] __attribute__((__aligned__(8)));
+};
+
+#ifdef __KERNEL__
+
+/*
+ * Coprocessor save state.  The magic values and specific
+ * coprocessor's layouts are part of the userspace ABI.  Each one of
+ * these should be a multiple of eight bytes and aligned to eight
+ * bytes, to prevent unpredictable padding in the signal frame.
+ */
+
+#ifdef CONFIG_CRUNCH
+#define CRUNCH_MAGIC		0x5065cf03
+#define CRUNCH_STORAGE_SIZE	(CRUNCH_SIZE + 8)
+
+struct crunch_sigframe {
+	unsigned long	magic;
+	unsigned long	size;
+	struct crunch_state	storage;
+} __attribute__((__aligned__(8)));
+#endif
+
+#ifdef CONFIG_IWMMXT
+/* iwmmxt_area is 0x98 bytes long, preceeded by 8 bytes of signature */
+#define IWMMXT_MAGIC		0x12ef842a
+#define IWMMXT_STORAGE_SIZE	(IWMMXT_SIZE + 8)
+
+struct iwmmxt_sigframe {
+	unsigned long	magic;
+	unsigned long	size;
+	struct iwmmxt_struct storage;
+} __attribute__((__aligned__(8)));
+#endif /* CONFIG_IWMMXT */
+
+#ifdef CONFIG_VFP
+#if __LINUX_ARM_ARCH__ < 6
+/* For ARM pre-v6, we use fstmiax and fldmiax.  This adds one extra
+ * word after the registers, and a word of padding at the end for
+ * alignment.  */
+#define VFP_MAGIC		0x56465001
+#define VFP_STORAGE_SIZE	152
+#else
+#define VFP_MAGIC		0x56465002
+#define VFP_STORAGE_SIZE	144
+#endif
+
+struct vfp_sigframe
+{
+	unsigned long		magic;
+	unsigned long		size;
+	union vfp_state		storage;
+};
+#endif /* CONFIG_VFP */
+
+/*
+ * Auxiliary signal frame.  This saves stuff like FP state.
+ * The layout of this structure is not part of the user ABI,
+ * because the config options aren't.  uc_regspace is really
+ * one of these.
+ */
+struct aux_sigframe {
+#ifdef CONFIG_CRUNCH
+	struct crunch_sigframe	crunch;
+#endif
+#ifdef CONFIG_IWMMXT
+	struct iwmmxt_sigframe	iwmmxt;
+#endif
+#if 0 && defined CONFIG_VFP /* Not yet saved.  */
+	struct vfp_sigframe	vfp;
+#endif
+	/* Something that isn't a valid magic number for any coprocessor.  */
+	unsigned long		end_magic;
+} __attribute__((__aligned__(8)));
+
+#endif
+
+#endif /* !_ASMARM_UCONTEXT_H */
diff --git a/arch/arm/include/asm/unaligned.h b/arch/arm/include/asm/unaligned.h
new file mode 100644
index 0000000..44593a8
--- /dev/null
+++ b/arch/arm/include/asm/unaligned.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_ARM_UNALIGNED_H
+#define _ASM_ARM_UNALIGNED_H
+
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/generic.h>
+
+/*
+ * Select endianness
+ */
+#ifndef __ARMEB__
+#define get_unaligned	__get_unaligned_le
+#define put_unaligned	__put_unaligned_le
+#else
+#define get_unaligned	__get_unaligned_be
+#define put_unaligned	__put_unaligned_be
+#endif
+
+#endif /* _ASM_ARM_UNALIGNED_H */
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
new file mode 100644
index 0000000..f95fbb2
--- /dev/null
+++ b/arch/arm/include/asm/unistd.h
@@ -0,0 +1,450 @@
+/*
+ *  arch/arm/include/asm/unistd.h
+ *
+ *  Copyright (C) 2001-2005 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Please forward _all_ changes to this file to rmk@arm.linux.org.uk,
+ * no matter what the change is.  Thanks!
+ */
+#ifndef __ASM_ARM_UNISTD_H
+#define __ASM_ARM_UNISTD_H
+
+#define __NR_OABI_SYSCALL_BASE	0x900000
+
+#if defined(__thumb__) || defined(__ARM_EABI__)
+#define __NR_SYSCALL_BASE	0
+#else
+#define __NR_SYSCALL_BASE	__NR_OABI_SYSCALL_BASE
+#endif
+
+/*
+ * This file contains the system call numbers.
+ */
+
+#define __NR_restart_syscall		(__NR_SYSCALL_BASE+  0)
+#define __NR_exit			(__NR_SYSCALL_BASE+  1)
+#define __NR_fork			(__NR_SYSCALL_BASE+  2)
+#define __NR_read			(__NR_SYSCALL_BASE+  3)
+#define __NR_write			(__NR_SYSCALL_BASE+  4)
+#define __NR_open			(__NR_SYSCALL_BASE+  5)
+#define __NR_close			(__NR_SYSCALL_BASE+  6)
+					/* 7 was sys_waitpid */
+#define __NR_creat			(__NR_SYSCALL_BASE+  8)
+#define __NR_link			(__NR_SYSCALL_BASE+  9)
+#define __NR_unlink			(__NR_SYSCALL_BASE+ 10)
+#define __NR_execve			(__NR_SYSCALL_BASE+ 11)
+#define __NR_chdir			(__NR_SYSCALL_BASE+ 12)
+#define __NR_time			(__NR_SYSCALL_BASE+ 13)
+#define __NR_mknod			(__NR_SYSCALL_BASE+ 14)
+#define __NR_chmod			(__NR_SYSCALL_BASE+ 15)
+#define __NR_lchown			(__NR_SYSCALL_BASE+ 16)
+					/* 17 was sys_break */
+					/* 18 was sys_stat */
+#define __NR_lseek			(__NR_SYSCALL_BASE+ 19)
+#define __NR_getpid			(__NR_SYSCALL_BASE+ 20)
+#define __NR_mount			(__NR_SYSCALL_BASE+ 21)
+#define __NR_umount			(__NR_SYSCALL_BASE+ 22)
+#define __NR_setuid			(__NR_SYSCALL_BASE+ 23)
+#define __NR_getuid			(__NR_SYSCALL_BASE+ 24)
+#define __NR_stime			(__NR_SYSCALL_BASE+ 25)
+#define __NR_ptrace			(__NR_SYSCALL_BASE+ 26)
+#define __NR_alarm			(__NR_SYSCALL_BASE+ 27)
+					/* 28 was sys_fstat */
+#define __NR_pause			(__NR_SYSCALL_BASE+ 29)
+#define __NR_utime			(__NR_SYSCALL_BASE+ 30)
+					/* 31 was sys_stty */
+					/* 32 was sys_gtty */
+#define __NR_access			(__NR_SYSCALL_BASE+ 33)
+#define __NR_nice			(__NR_SYSCALL_BASE+ 34)
+					/* 35 was sys_ftime */
+#define __NR_sync			(__NR_SYSCALL_BASE+ 36)
+#define __NR_kill			(__NR_SYSCALL_BASE+ 37)
+#define __NR_rename			(__NR_SYSCALL_BASE+ 38)
+#define __NR_mkdir			(__NR_SYSCALL_BASE+ 39)
+#define __NR_rmdir			(__NR_SYSCALL_BASE+ 40)
+#define __NR_dup			(__NR_SYSCALL_BASE+ 41)
+#define __NR_pipe			(__NR_SYSCALL_BASE+ 42)
+#define __NR_times			(__NR_SYSCALL_BASE+ 43)
+					/* 44 was sys_prof */
+#define __NR_brk			(__NR_SYSCALL_BASE+ 45)
+#define __NR_setgid			(__NR_SYSCALL_BASE+ 46)
+#define __NR_getgid			(__NR_SYSCALL_BASE+ 47)
+					/* 48 was sys_signal */
+#define __NR_geteuid			(__NR_SYSCALL_BASE+ 49)
+#define __NR_getegid			(__NR_SYSCALL_BASE+ 50)
+#define __NR_acct			(__NR_SYSCALL_BASE+ 51)
+#define __NR_umount2			(__NR_SYSCALL_BASE+ 52)
+					/* 53 was sys_lock */
+#define __NR_ioctl			(__NR_SYSCALL_BASE+ 54)
+#define __NR_fcntl			(__NR_SYSCALL_BASE+ 55)
+					/* 56 was sys_mpx */
+#define __NR_setpgid			(__NR_SYSCALL_BASE+ 57)
+					/* 58 was sys_ulimit */
+					/* 59 was sys_olduname */
+#define __NR_umask			(__NR_SYSCALL_BASE+ 60)
+#define __NR_chroot			(__NR_SYSCALL_BASE+ 61)
+#define __NR_ustat			(__NR_SYSCALL_BASE+ 62)
+#define __NR_dup2			(__NR_SYSCALL_BASE+ 63)
+#define __NR_getppid			(__NR_SYSCALL_BASE+ 64)
+#define __NR_getpgrp			(__NR_SYSCALL_BASE+ 65)
+#define __NR_setsid			(__NR_SYSCALL_BASE+ 66)
+#define __NR_sigaction			(__NR_SYSCALL_BASE+ 67)
+					/* 68 was sys_sgetmask */
+					/* 69 was sys_ssetmask */
+#define __NR_setreuid			(__NR_SYSCALL_BASE+ 70)
+#define __NR_setregid			(__NR_SYSCALL_BASE+ 71)
+#define __NR_sigsuspend			(__NR_SYSCALL_BASE+ 72)
+#define __NR_sigpending			(__NR_SYSCALL_BASE+ 73)
+#define __NR_sethostname		(__NR_SYSCALL_BASE+ 74)
+#define __NR_setrlimit			(__NR_SYSCALL_BASE+ 75)
+#define __NR_getrlimit			(__NR_SYSCALL_BASE+ 76)	/* Back compat 2GB limited rlimit */
+#define __NR_getrusage			(__NR_SYSCALL_BASE+ 77)
+#define __NR_gettimeofday		(__NR_SYSCALL_BASE+ 78)
+#define __NR_settimeofday		(__NR_SYSCALL_BASE+ 79)
+#define __NR_getgroups			(__NR_SYSCALL_BASE+ 80)
+#define __NR_setgroups			(__NR_SYSCALL_BASE+ 81)
+#define __NR_select			(__NR_SYSCALL_BASE+ 82)
+#define __NR_symlink			(__NR_SYSCALL_BASE+ 83)
+					/* 84 was sys_lstat */
+#define __NR_readlink			(__NR_SYSCALL_BASE+ 85)
+#define __NR_uselib			(__NR_SYSCALL_BASE+ 86)
+#define __NR_swapon			(__NR_SYSCALL_BASE+ 87)
+#define __NR_reboot			(__NR_SYSCALL_BASE+ 88)
+#define __NR_readdir			(__NR_SYSCALL_BASE+ 89)
+#define __NR_mmap			(__NR_SYSCALL_BASE+ 90)
+#define __NR_munmap			(__NR_SYSCALL_BASE+ 91)
+#define __NR_truncate			(__NR_SYSCALL_BASE+ 92)
+#define __NR_ftruncate			(__NR_SYSCALL_BASE+ 93)
+#define __NR_fchmod			(__NR_SYSCALL_BASE+ 94)
+#define __NR_fchown			(__NR_SYSCALL_BASE+ 95)
+#define __NR_getpriority		(__NR_SYSCALL_BASE+ 96)
+#define __NR_setpriority		(__NR_SYSCALL_BASE+ 97)
+					/* 98 was sys_profil */
+#define __NR_statfs			(__NR_SYSCALL_BASE+ 99)
+#define __NR_fstatfs			(__NR_SYSCALL_BASE+100)
+					/* 101 was sys_ioperm */
+#define __NR_socketcall			(__NR_SYSCALL_BASE+102)
+#define __NR_syslog			(__NR_SYSCALL_BASE+103)
+#define __NR_setitimer			(__NR_SYSCALL_BASE+104)
+#define __NR_getitimer			(__NR_SYSCALL_BASE+105)
+#define __NR_stat			(__NR_SYSCALL_BASE+106)
+#define __NR_lstat			(__NR_SYSCALL_BASE+107)
+#define __NR_fstat			(__NR_SYSCALL_BASE+108)
+					/* 109 was sys_uname */
+					/* 110 was sys_iopl */
+#define __NR_vhangup			(__NR_SYSCALL_BASE+111)
+					/* 112 was sys_idle */
+#define __NR_syscall			(__NR_SYSCALL_BASE+113) /* syscall to call a syscall! */
+#define __NR_wait4			(__NR_SYSCALL_BASE+114)
+#define __NR_swapoff			(__NR_SYSCALL_BASE+115)
+#define __NR_sysinfo			(__NR_SYSCALL_BASE+116)
+#define __NR_ipc			(__NR_SYSCALL_BASE+117)
+#define __NR_fsync			(__NR_SYSCALL_BASE+118)
+#define __NR_sigreturn			(__NR_SYSCALL_BASE+119)
+#define __NR_clone			(__NR_SYSCALL_BASE+120)
+#define __NR_setdomainname		(__NR_SYSCALL_BASE+121)
+#define __NR_uname			(__NR_SYSCALL_BASE+122)
+					/* 123 was sys_modify_ldt */
+#define __NR_adjtimex			(__NR_SYSCALL_BASE+124)
+#define __NR_mprotect			(__NR_SYSCALL_BASE+125)
+#define __NR_sigprocmask		(__NR_SYSCALL_BASE+126)
+					/* 127 was sys_create_module */
+#define __NR_init_module		(__NR_SYSCALL_BASE+128)
+#define __NR_delete_module		(__NR_SYSCALL_BASE+129)
+					/* 130 was sys_get_kernel_syms */
+#define __NR_quotactl			(__NR_SYSCALL_BASE+131)
+#define __NR_getpgid			(__NR_SYSCALL_BASE+132)
+#define __NR_fchdir			(__NR_SYSCALL_BASE+133)
+#define __NR_bdflush			(__NR_SYSCALL_BASE+134)
+#define __NR_sysfs			(__NR_SYSCALL_BASE+135)
+#define __NR_personality		(__NR_SYSCALL_BASE+136)
+					/* 137 was sys_afs_syscall */
+#define __NR_setfsuid			(__NR_SYSCALL_BASE+138)
+#define __NR_setfsgid			(__NR_SYSCALL_BASE+139)
+#define __NR__llseek			(__NR_SYSCALL_BASE+140)
+#define __NR_getdents			(__NR_SYSCALL_BASE+141)
+#define __NR__newselect			(__NR_SYSCALL_BASE+142)
+#define __NR_flock			(__NR_SYSCALL_BASE+143)
+#define __NR_msync			(__NR_SYSCALL_BASE+144)
+#define __NR_readv			(__NR_SYSCALL_BASE+145)
+#define __NR_writev			(__NR_SYSCALL_BASE+146)
+#define __NR_getsid			(__NR_SYSCALL_BASE+147)
+#define __NR_fdatasync			(__NR_SYSCALL_BASE+148)
+#define __NR__sysctl			(__NR_SYSCALL_BASE+149)
+#define __NR_mlock			(__NR_SYSCALL_BASE+150)
+#define __NR_munlock			(__NR_SYSCALL_BASE+151)
+#define __NR_mlockall			(__NR_SYSCALL_BASE+152)
+#define __NR_munlockall			(__NR_SYSCALL_BASE+153)
+#define __NR_sched_setparam		(__NR_SYSCALL_BASE+154)
+#define __NR_sched_getparam		(__NR_SYSCALL_BASE+155)
+#define __NR_sched_setscheduler		(__NR_SYSCALL_BASE+156)
+#define __NR_sched_getscheduler		(__NR_SYSCALL_BASE+157)
+#define __NR_sched_yield		(__NR_SYSCALL_BASE+158)
+#define __NR_sched_get_priority_max	(__NR_SYSCALL_BASE+159)
+#define __NR_sched_get_priority_min	(__NR_SYSCALL_BASE+160)
+#define __NR_sched_rr_get_interval	(__NR_SYSCALL_BASE+161)
+#define __NR_nanosleep			(__NR_SYSCALL_BASE+162)
+#define __NR_mremap			(__NR_SYSCALL_BASE+163)
+#define __NR_setresuid			(__NR_SYSCALL_BASE+164)
+#define __NR_getresuid			(__NR_SYSCALL_BASE+165)
+					/* 166 was sys_vm86 */
+					/* 167 was sys_query_module */
+#define __NR_poll			(__NR_SYSCALL_BASE+168)
+#define __NR_nfsservctl			(__NR_SYSCALL_BASE+169)
+#define __NR_setresgid			(__NR_SYSCALL_BASE+170)
+#define __NR_getresgid			(__NR_SYSCALL_BASE+171)
+#define __NR_prctl			(__NR_SYSCALL_BASE+172)
+#define __NR_rt_sigreturn		(__NR_SYSCALL_BASE+173)
+#define __NR_rt_sigaction		(__NR_SYSCALL_BASE+174)
+#define __NR_rt_sigprocmask		(__NR_SYSCALL_BASE+175)
+#define __NR_rt_sigpending		(__NR_SYSCALL_BASE+176)
+#define __NR_rt_sigtimedwait		(__NR_SYSCALL_BASE+177)
+#define __NR_rt_sigqueueinfo		(__NR_SYSCALL_BASE+178)
+#define __NR_rt_sigsuspend		(__NR_SYSCALL_BASE+179)
+#define __NR_pread64			(__NR_SYSCALL_BASE+180)
+#define __NR_pwrite64			(__NR_SYSCALL_BASE+181)
+#define __NR_chown			(__NR_SYSCALL_BASE+182)
+#define __NR_getcwd			(__NR_SYSCALL_BASE+183)
+#define __NR_capget			(__NR_SYSCALL_BASE+184)
+#define __NR_capset			(__NR_SYSCALL_BASE+185)
+#define __NR_sigaltstack		(__NR_SYSCALL_BASE+186)
+#define __NR_sendfile			(__NR_SYSCALL_BASE+187)
+					/* 188 reserved */
+					/* 189 reserved */
+#define __NR_vfork			(__NR_SYSCALL_BASE+190)
+#define __NR_ugetrlimit			(__NR_SYSCALL_BASE+191)	/* SuS compliant getrlimit */
+#define __NR_mmap2			(__NR_SYSCALL_BASE+192)
+#define __NR_truncate64			(__NR_SYSCALL_BASE+193)
+#define __NR_ftruncate64		(__NR_SYSCALL_BASE+194)
+#define __NR_stat64			(__NR_SYSCALL_BASE+195)
+#define __NR_lstat64			(__NR_SYSCALL_BASE+196)
+#define __NR_fstat64			(__NR_SYSCALL_BASE+197)
+#define __NR_lchown32			(__NR_SYSCALL_BASE+198)
+#define __NR_getuid32			(__NR_SYSCALL_BASE+199)
+#define __NR_getgid32			(__NR_SYSCALL_BASE+200)
+#define __NR_geteuid32			(__NR_SYSCALL_BASE+201)
+#define __NR_getegid32			(__NR_SYSCALL_BASE+202)
+#define __NR_setreuid32			(__NR_SYSCALL_BASE+203)
+#define __NR_setregid32			(__NR_SYSCALL_BASE+204)
+#define __NR_getgroups32		(__NR_SYSCALL_BASE+205)
+#define __NR_setgroups32		(__NR_SYSCALL_BASE+206)
+#define __NR_fchown32			(__NR_SYSCALL_BASE+207)
+#define __NR_setresuid32		(__NR_SYSCALL_BASE+208)
+#define __NR_getresuid32		(__NR_SYSCALL_BASE+209)
+#define __NR_setresgid32		(__NR_SYSCALL_BASE+210)
+#define __NR_getresgid32		(__NR_SYSCALL_BASE+211)
+#define __NR_chown32			(__NR_SYSCALL_BASE+212)
+#define __NR_setuid32			(__NR_SYSCALL_BASE+213)
+#define __NR_setgid32			(__NR_SYSCALL_BASE+214)
+#define __NR_setfsuid32			(__NR_SYSCALL_BASE+215)
+#define __NR_setfsgid32			(__NR_SYSCALL_BASE+216)
+#define __NR_getdents64			(__NR_SYSCALL_BASE+217)
+#define __NR_pivot_root			(__NR_SYSCALL_BASE+218)
+#define __NR_mincore			(__NR_SYSCALL_BASE+219)
+#define __NR_madvise			(__NR_SYSCALL_BASE+220)
+#define __NR_fcntl64			(__NR_SYSCALL_BASE+221)
+					/* 222 for tux */
+					/* 223 is unused */
+#define __NR_gettid			(__NR_SYSCALL_BASE+224)
+#define __NR_readahead			(__NR_SYSCALL_BASE+225)
+#define __NR_setxattr			(__NR_SYSCALL_BASE+226)
+#define __NR_lsetxattr			(__NR_SYSCALL_BASE+227)
+#define __NR_fsetxattr			(__NR_SYSCALL_BASE+228)
+#define __NR_getxattr			(__NR_SYSCALL_BASE+229)
+#define __NR_lgetxattr			(__NR_SYSCALL_BASE+230)
+#define __NR_fgetxattr			(__NR_SYSCALL_BASE+231)
+#define __NR_listxattr			(__NR_SYSCALL_BASE+232)
+#define __NR_llistxattr			(__NR_SYSCALL_BASE+233)
+#define __NR_flistxattr			(__NR_SYSCALL_BASE+234)
+#define __NR_removexattr		(__NR_SYSCALL_BASE+235)
+#define __NR_lremovexattr		(__NR_SYSCALL_BASE+236)
+#define __NR_fremovexattr		(__NR_SYSCALL_BASE+237)
+#define __NR_tkill			(__NR_SYSCALL_BASE+238)
+#define __NR_sendfile64			(__NR_SYSCALL_BASE+239)
+#define __NR_futex			(__NR_SYSCALL_BASE+240)
+#define __NR_sched_setaffinity		(__NR_SYSCALL_BASE+241)
+#define __NR_sched_getaffinity		(__NR_SYSCALL_BASE+242)
+#define __NR_io_setup			(__NR_SYSCALL_BASE+243)
+#define __NR_io_destroy			(__NR_SYSCALL_BASE+244)
+#define __NR_io_getevents		(__NR_SYSCALL_BASE+245)
+#define __NR_io_submit			(__NR_SYSCALL_BASE+246)
+#define __NR_io_cancel			(__NR_SYSCALL_BASE+247)
+#define __NR_exit_group			(__NR_SYSCALL_BASE+248)
+#define __NR_lookup_dcookie		(__NR_SYSCALL_BASE+249)
+#define __NR_epoll_create		(__NR_SYSCALL_BASE+250)
+#define __NR_epoll_ctl			(__NR_SYSCALL_BASE+251)
+#define __NR_epoll_wait			(__NR_SYSCALL_BASE+252)
+#define __NR_remap_file_pages		(__NR_SYSCALL_BASE+253)
+					/* 254 for set_thread_area */
+					/* 255 for get_thread_area */
+#define __NR_set_tid_address		(__NR_SYSCALL_BASE+256)
+#define __NR_timer_create		(__NR_SYSCALL_BASE+257)
+#define __NR_timer_settime		(__NR_SYSCALL_BASE+258)
+#define __NR_timer_gettime		(__NR_SYSCALL_BASE+259)
+#define __NR_timer_getoverrun		(__NR_SYSCALL_BASE+260)
+#define __NR_timer_delete		(__NR_SYSCALL_BASE+261)
+#define __NR_clock_settime		(__NR_SYSCALL_BASE+262)
+#define __NR_clock_gettime		(__NR_SYSCALL_BASE+263)
+#define __NR_clock_getres		(__NR_SYSCALL_BASE+264)
+#define __NR_clock_nanosleep		(__NR_SYSCALL_BASE+265)
+#define __NR_statfs64			(__NR_SYSCALL_BASE+266)
+#define __NR_fstatfs64			(__NR_SYSCALL_BASE+267)
+#define __NR_tgkill			(__NR_SYSCALL_BASE+268)
+#define __NR_utimes			(__NR_SYSCALL_BASE+269)
+#define __NR_arm_fadvise64_64		(__NR_SYSCALL_BASE+270)
+#define __NR_pciconfig_iobase		(__NR_SYSCALL_BASE+271)
+#define __NR_pciconfig_read		(__NR_SYSCALL_BASE+272)
+#define __NR_pciconfig_write		(__NR_SYSCALL_BASE+273)
+#define __NR_mq_open			(__NR_SYSCALL_BASE+274)
+#define __NR_mq_unlink			(__NR_SYSCALL_BASE+275)
+#define __NR_mq_timedsend		(__NR_SYSCALL_BASE+276)
+#define __NR_mq_timedreceive		(__NR_SYSCALL_BASE+277)
+#define __NR_mq_notify			(__NR_SYSCALL_BASE+278)
+#define __NR_mq_getsetattr		(__NR_SYSCALL_BASE+279)
+#define __NR_waitid			(__NR_SYSCALL_BASE+280)
+#define __NR_socket			(__NR_SYSCALL_BASE+281)
+#define __NR_bind			(__NR_SYSCALL_BASE+282)
+#define __NR_connect			(__NR_SYSCALL_BASE+283)
+#define __NR_listen			(__NR_SYSCALL_BASE+284)
+#define __NR_accept			(__NR_SYSCALL_BASE+285)
+#define __NR_getsockname		(__NR_SYSCALL_BASE+286)
+#define __NR_getpeername		(__NR_SYSCALL_BASE+287)
+#define __NR_socketpair			(__NR_SYSCALL_BASE+288)
+#define __NR_send			(__NR_SYSCALL_BASE+289)
+#define __NR_sendto			(__NR_SYSCALL_BASE+290)
+#define __NR_recv			(__NR_SYSCALL_BASE+291)
+#define __NR_recvfrom			(__NR_SYSCALL_BASE+292)
+#define __NR_shutdown			(__NR_SYSCALL_BASE+293)
+#define __NR_setsockopt			(__NR_SYSCALL_BASE+294)
+#define __NR_getsockopt			(__NR_SYSCALL_BASE+295)
+#define __NR_sendmsg			(__NR_SYSCALL_BASE+296)
+#define __NR_recvmsg			(__NR_SYSCALL_BASE+297)
+#define __NR_semop			(__NR_SYSCALL_BASE+298)
+#define __NR_semget			(__NR_SYSCALL_BASE+299)
+#define __NR_semctl			(__NR_SYSCALL_BASE+300)
+#define __NR_msgsnd			(__NR_SYSCALL_BASE+301)
+#define __NR_msgrcv			(__NR_SYSCALL_BASE+302)
+#define __NR_msgget			(__NR_SYSCALL_BASE+303)
+#define __NR_msgctl			(__NR_SYSCALL_BASE+304)
+#define __NR_shmat			(__NR_SYSCALL_BASE+305)
+#define __NR_shmdt			(__NR_SYSCALL_BASE+306)
+#define __NR_shmget			(__NR_SYSCALL_BASE+307)
+#define __NR_shmctl			(__NR_SYSCALL_BASE+308)
+#define __NR_add_key			(__NR_SYSCALL_BASE+309)
+#define __NR_request_key		(__NR_SYSCALL_BASE+310)
+#define __NR_keyctl			(__NR_SYSCALL_BASE+311)
+#define __NR_semtimedop			(__NR_SYSCALL_BASE+312)
+#define __NR_vserver			(__NR_SYSCALL_BASE+313)
+#define __NR_ioprio_set			(__NR_SYSCALL_BASE+314)
+#define __NR_ioprio_get			(__NR_SYSCALL_BASE+315)
+#define __NR_inotify_init		(__NR_SYSCALL_BASE+316)
+#define __NR_inotify_add_watch		(__NR_SYSCALL_BASE+317)
+#define __NR_inotify_rm_watch		(__NR_SYSCALL_BASE+318)
+#define __NR_mbind			(__NR_SYSCALL_BASE+319)
+#define __NR_get_mempolicy		(__NR_SYSCALL_BASE+320)
+#define __NR_set_mempolicy		(__NR_SYSCALL_BASE+321)
+#define __NR_openat			(__NR_SYSCALL_BASE+322)
+#define __NR_mkdirat			(__NR_SYSCALL_BASE+323)
+#define __NR_mknodat			(__NR_SYSCALL_BASE+324)
+#define __NR_fchownat			(__NR_SYSCALL_BASE+325)
+#define __NR_futimesat			(__NR_SYSCALL_BASE+326)
+#define __NR_fstatat64			(__NR_SYSCALL_BASE+327)
+#define __NR_unlinkat			(__NR_SYSCALL_BASE+328)
+#define __NR_renameat			(__NR_SYSCALL_BASE+329)
+#define __NR_linkat			(__NR_SYSCALL_BASE+330)
+#define __NR_symlinkat			(__NR_SYSCALL_BASE+331)
+#define __NR_readlinkat			(__NR_SYSCALL_BASE+332)
+#define __NR_fchmodat			(__NR_SYSCALL_BASE+333)
+#define __NR_faccessat			(__NR_SYSCALL_BASE+334)
+					/* 335 for pselect6 */
+					/* 336 for ppoll */
+#define __NR_unshare			(__NR_SYSCALL_BASE+337)
+#define __NR_set_robust_list		(__NR_SYSCALL_BASE+338)
+#define __NR_get_robust_list		(__NR_SYSCALL_BASE+339)
+#define __NR_splice			(__NR_SYSCALL_BASE+340)
+#define __NR_arm_sync_file_range	(__NR_SYSCALL_BASE+341)
+#define __NR_sync_file_range2		__NR_arm_sync_file_range
+#define __NR_tee			(__NR_SYSCALL_BASE+342)
+#define __NR_vmsplice			(__NR_SYSCALL_BASE+343)
+#define __NR_move_pages			(__NR_SYSCALL_BASE+344)
+#define __NR_getcpu			(__NR_SYSCALL_BASE+345)
+					/* 346 for epoll_pwait */
+#define __NR_kexec_load			(__NR_SYSCALL_BASE+347)
+#define __NR_utimensat			(__NR_SYSCALL_BASE+348)
+#define __NR_signalfd			(__NR_SYSCALL_BASE+349)
+#define __NR_timerfd_create		(__NR_SYSCALL_BASE+350)
+#define __NR_eventfd			(__NR_SYSCALL_BASE+351)
+#define __NR_fallocate			(__NR_SYSCALL_BASE+352)
+#define __NR_timerfd_settime		(__NR_SYSCALL_BASE+353)
+#define __NR_timerfd_gettime		(__NR_SYSCALL_BASE+354)
+
+/*
+ * The following SWIs are ARM private.
+ */
+#define __ARM_NR_BASE			(__NR_SYSCALL_BASE+0x0f0000)
+#define __ARM_NR_breakpoint		(__ARM_NR_BASE+1)
+#define __ARM_NR_cacheflush		(__ARM_NR_BASE+2)
+#define __ARM_NR_usr26			(__ARM_NR_BASE+3)
+#define __ARM_NR_usr32			(__ARM_NR_BASE+4)
+#define __ARM_NR_set_tls		(__ARM_NR_BASE+5)
+
+/*
+ * The following syscalls are obsolete and no longer available for EABI.
+ */
+#if defined(__ARM_EABI__) && !defined(__KERNEL__)
+#undef __NR_time
+#undef __NR_umount
+#undef __NR_stime
+#undef __NR_alarm
+#undef __NR_utime
+#undef __NR_getrlimit
+#undef __NR_select
+#undef __NR_readdir
+#undef __NR_mmap
+#undef __NR_socketcall
+#undef __NR_syscall
+#undef __NR_ipc
+#endif
+
+#ifdef __KERNEL__
+
+#define __ARCH_WANT_IPC_PARSE_VERSION
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_PAUSE
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_NICE
+#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_SIGPROCMASK
+#define __ARCH_WANT_SYS_RT_SIGACTION
+
+#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT)
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __ARCH_WANT_SYS_ALARM
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_OLD_GETRLIMIT
+#define __ARCH_WANT_OLD_READDIR
+#define __ARCH_WANT_SYS_SOCKETCALL
+#endif
+
+/*
+ * "Conditional" syscalls
+ *
+ * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
+ * but it doesn't work on all toolchains, so we just do it by hand
+ */
+#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
+
+/*
+ * Unimplemented (or alternatively implemented) syscalls
+ */
+#define __IGNORE_fadvise64_64		1
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_ARM_UNISTD_H */
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h
new file mode 100644
index 0000000..825c1e7
--- /dev/null
+++ b/arch/arm/include/asm/user.h
@@ -0,0 +1,84 @@
+#ifndef _ARM_USER_H
+#define _ARM_USER_H
+
+#include <asm/page.h>
+#include <asm/ptrace.h>
+/* Core file format: The core file is written in such a way that gdb
+   can understand it and provide useful information to the user (under
+   linux we use the 'trad-core' bfd).  There are quite a number of
+   obstacles to being able to view the contents of the floating point
+   registers, and until these are solved you will not be able to view the
+   contents of them.  Actually, you can read in the core file and look at
+   the contents of the user struct to find out what the floating point
+   registers contain.
+   The actual file contents are as follows:
+   UPAGE: 1 page consisting of a user struct that tells gdb what is present
+   in the file.  Directly after this is a copy of the task_struct, which
+   is currently not used by gdb, but it may come in useful at some point.
+   All of the registers are stored as part of the upage.  The upage should
+   always be only one page.
+   DATA: The data area is stored.  We use current->end_text to
+   current->brk to pick up all of the user variables, plus any memory
+   that may have been malloced.  No attempt is made to determine if a page
+   is demand-zero or if a page is totally unused, we just cover the entire
+   range.  All of the addresses are rounded in such a way that an integral
+   number of pages is written.
+   STACK: We need the stack information in order to get a meaningful
+   backtrace.  We need to write the data from (esp) to
+   current->start_stack, so we round each of these off in order to be able
+   to write an integer number of pages.
+   The minimum core file size is 3 pages, or 12288 bytes.
+*/
+
+struct user_fp {
+	struct fp_reg {
+		unsigned int sign1:1;
+		unsigned int unused:15;
+		unsigned int sign2:1;
+		unsigned int exponent:14;
+		unsigned int j:1;
+		unsigned int mantissa1:31;
+		unsigned int mantissa0:32;
+	} fpregs[8];
+	unsigned int fpsr:32;
+	unsigned int fpcr:32;
+	unsigned char ftype[8];
+	unsigned int init_flag;
+};
+
+/* When the kernel dumps core, it starts by dumping the user struct -
+   this will be used by gdb to figure out where the data and stack segments
+   are within the file, and what virtual addresses to use. */
+struct user{
+/* We start with the registers, to mimic the way that "memory" is returned
+   from the ptrace(3,...) function.  */
+  struct pt_regs regs;		/* Where the registers are actually stored */
+/* ptrace does not yet supply these.  Someday.... */
+  int u_fpvalid;		/* True if math co-processor being used. */
+                                /* for this mess. Not yet used. */
+/* The rest of this junk is to help gdb figure out what goes where */
+  unsigned long int u_tsize;	/* Text segment size (pages). */
+  unsigned long int u_dsize;	/* Data segment size (pages). */
+  unsigned long int u_ssize;	/* Stack segment size (pages). */
+  unsigned long start_code;     /* Starting virtual address of text. */
+  unsigned long start_stack;	/* Starting virtual address of stack area.
+				   This is actually the bottom of the stack,
+				   the top of the stack is always found in the
+				   esp register.  */
+  long int signal;     		/* Signal that caused the core dump. */
+  int reserved;			/* No longer used */
+  unsigned long u_ar0;		/* Used by gdb to help find the values for */
+				/* the registers. */
+  unsigned long magic;		/* To uniquely identify a core file */
+  char u_comm[32];		/* User command that was responsible */
+  int u_debugreg[8];
+  struct user_fp u_fp;		/* FP state */
+  struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */
+  				/* the FP registers. */
+};
+#define NBPG PAGE_SIZE
+#define UPAGES 1
+#define HOST_TEXT_START_ADDR (u.start_code)
+#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
+
+#endif /* _ARM_USER_H */
diff --git a/arch/arm/include/asm/vfp.h b/arch/arm/include/asm/vfp.h
new file mode 100644
index 0000000..f4ab34f
--- /dev/null
+++ b/arch/arm/include/asm/vfp.h
@@ -0,0 +1,84 @@
+/*
+ * arch/arm/include/asm/vfp.h
+ *
+ * VFP register definitions.
+ * First, the standard VFP set.
+ */
+
+#define FPSID			cr0
+#define FPSCR			cr1
+#define MVFR1			cr6
+#define MVFR0			cr7
+#define FPEXC			cr8
+#define FPINST			cr9
+#define FPINST2			cr10
+
+/* FPSID bits */
+#define FPSID_IMPLEMENTER_BIT	(24)
+#define FPSID_IMPLEMENTER_MASK	(0xff << FPSID_IMPLEMENTER_BIT)
+#define FPSID_SOFTWARE		(1<<23)
+#define FPSID_FORMAT_BIT	(21)
+#define FPSID_FORMAT_MASK	(0x3  << FPSID_FORMAT_BIT)
+#define FPSID_NODOUBLE		(1<<20)
+#define FPSID_ARCH_BIT		(16)
+#define FPSID_ARCH_MASK		(0xF  << FPSID_ARCH_BIT)
+#define FPSID_PART_BIT		(8)
+#define FPSID_PART_MASK		(0xFF << FPSID_PART_BIT)
+#define FPSID_VARIANT_BIT	(4)
+#define FPSID_VARIANT_MASK	(0xF  << FPSID_VARIANT_BIT)
+#define FPSID_REV_BIT		(0)
+#define FPSID_REV_MASK		(0xF  << FPSID_REV_BIT)
+
+/* FPEXC bits */
+#define FPEXC_EX		(1 << 31)
+#define FPEXC_EN		(1 << 30)
+#define FPEXC_DEX		(1 << 29)
+#define FPEXC_FP2V		(1 << 28)
+#define FPEXC_VV		(1 << 27)
+#define FPEXC_TFV		(1 << 26)
+#define FPEXC_LENGTH_BIT	(8)
+#define FPEXC_LENGTH_MASK	(7 << FPEXC_LENGTH_BIT)
+#define FPEXC_IDF		(1 << 7)
+#define FPEXC_IXF		(1 << 4)
+#define FPEXC_UFF		(1 << 3)
+#define FPEXC_OFF		(1 << 2)
+#define FPEXC_DZF		(1 << 1)
+#define FPEXC_IOF		(1 << 0)
+#define FPEXC_TRAP_MASK		(FPEXC_IDF|FPEXC_IXF|FPEXC_UFF|FPEXC_OFF|FPEXC_DZF|FPEXC_IOF)
+
+/* FPSCR bits */
+#define FPSCR_DEFAULT_NAN	(1<<25)
+#define FPSCR_FLUSHTOZERO	(1<<24)
+#define FPSCR_ROUND_NEAREST	(0<<22)
+#define FPSCR_ROUND_PLUSINF	(1<<22)
+#define FPSCR_ROUND_MINUSINF	(2<<22)
+#define FPSCR_ROUND_TOZERO	(3<<22)
+#define FPSCR_RMODE_BIT		(22)
+#define FPSCR_RMODE_MASK	(3 << FPSCR_RMODE_BIT)
+#define FPSCR_STRIDE_BIT	(20)
+#define FPSCR_STRIDE_MASK	(3 << FPSCR_STRIDE_BIT)
+#define FPSCR_LENGTH_BIT	(16)
+#define FPSCR_LENGTH_MASK	(7 << FPSCR_LENGTH_BIT)
+#define FPSCR_IOE		(1<<8)
+#define FPSCR_DZE		(1<<9)
+#define FPSCR_OFE		(1<<10)
+#define FPSCR_UFE		(1<<11)
+#define FPSCR_IXE		(1<<12)
+#define FPSCR_IDE		(1<<15)
+#define FPSCR_IOC		(1<<0)
+#define FPSCR_DZC		(1<<1)
+#define FPSCR_OFC		(1<<2)
+#define FPSCR_UFC		(1<<3)
+#define FPSCR_IXC		(1<<4)
+#define FPSCR_IDC		(1<<7)
+
+/* MVFR0 bits */
+#define MVFR0_A_SIMD_BIT	(0)
+#define MVFR0_A_SIMD_MASK	(0xf << MVFR0_A_SIMD_BIT)
+
+/* Bit patterns for decoding the packaged operation descriptors */
+#define VFPOPDESC_LENGTH_BIT	(9)
+#define VFPOPDESC_LENGTH_MASK	(0x07 << VFPOPDESC_LENGTH_BIT)
+#define VFPOPDESC_UNUSED_BIT	(24)
+#define VFPOPDESC_UNUSED_MASK	(0xFF << VFPOPDESC_UNUSED_BIT)
+#define VFPOPDESC_OPDESC_MASK	(~(VFPOPDESC_LENGTH_MASK | VFPOPDESC_UNUSED_MASK))
diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
new file mode 100644
index 0000000..422f3cc
--- /dev/null
+++ b/arch/arm/include/asm/vfpmacros.h
@@ -0,0 +1,47 @@
+/*
+ * arch/arm/include/asm/vfpmacros.h
+ *
+ * Assembler-only file containing VFP macros and register definitions.
+ */
+#include "vfp.h"
+
+@ Macros to allow building with old toolkits (with no VFP support)
+	.macro	VFPFMRX, rd, sysreg, cond
+	MRC\cond	p10, 7, \rd, \sysreg, cr0, 0	@ FMRX	\rd, \sysreg
+	.endm
+
+	.macro	VFPFMXR, sysreg, rd, cond
+	MCR\cond	p10, 7, \rd, \sysreg, cr0, 0	@ FMXR	\sysreg, \rd
+	.endm
+
+	@ read all the working registers back into the VFP
+	.macro	VFPFLDMIA, base, tmp
+#if __LINUX_ARM_ARCH__ < 6
+	LDC	p11, cr0, [\base],#33*4		    @ FLDMIAX \base!, {d0-d15}
+#else
+	LDC	p11, cr0, [\base],#32*4		    @ FLDMIAD \base!, {d0-d15}
+#endif
+#ifdef CONFIG_VFPv3
+	VFPFMRX	\tmp, MVFR0			    @ Media and VFP Feature Register 0
+	and	\tmp, \tmp, #MVFR0_A_SIMD_MASK	    @ A_SIMD field
+	cmp	\tmp, #2			    @ 32 x 64bit registers?
+	ldceql	p11, cr0, [\base],#32*4		    @ FLDMIAD \base!, {d16-d31}
+	addne	\base, \base, #32*4		    @ step over unused register space
+#endif
+	.endm
+
+	@ write all the working registers out of the VFP
+	.macro	VFPFSTMIA, base, tmp
+#if __LINUX_ARM_ARCH__ < 6
+	STC	p11, cr0, [\base],#33*4		    @ FSTMIAX \base!, {d0-d15}
+#else
+	STC	p11, cr0, [\base],#32*4		    @ FSTMIAD \base!, {d0-d15}
+#endif
+#ifdef CONFIG_VFPv3
+	VFPFMRX	\tmp, MVFR0			    @ Media and VFP Feature Register 0
+	and	\tmp, \tmp, #MVFR0_A_SIMD_MASK	    @ A_SIMD field
+	cmp	\tmp, #2			    @ 32 x 64bit registers?
+	stceql	p11, cr0, [\base],#32*4		    @ FSTMIAD \base!, {d16-d31}
+	addne	\base, \base, #32*4		    @ step over unused register space
+#endif
+	.endm
diff --git a/arch/arm/include/asm/vga.h b/arch/arm/include/asm/vga.h
new file mode 100644
index 0000000..1e0b913
--- /dev/null
+++ b/arch/arm/include/asm/vga.h
@@ -0,0 +1,12 @@
+#ifndef ASMARM_VGA_H
+#define ASMARM_VGA_H
+
+#include <asm/hardware.h>
+#include <asm/io.h>
+
+#define VGA_MAP_MEM(x,s)	(PCIMEM_BASE + (x))
+
+#define vga_readb(x)	(*((volatile unsigned char *)x))
+#define vga_writeb(x,y)	(*((volatile unsigned char *)y) = (x))
+
+#endif
diff --git a/arch/arm/include/asm/xor.h b/arch/arm/include/asm/xor.h
new file mode 100644
index 0000000..7604673
--- /dev/null
+++ b/arch/arm/include/asm/xor.h
@@ -0,0 +1,141 @@
+/*
+ *  arch/arm/include/asm/xor.h
+ *
+ *  Copyright (C) 2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <asm-generic/xor.h>
+
+#define __XOR(a1, a2) a1 ^= a2
+
+#define GET_BLOCK_2(dst) \
+	__asm__("ldmia	%0, {%1, %2}" \
+		: "=r" (dst), "=r" (a1), "=r" (a2) \
+		: "0" (dst))
+
+#define GET_BLOCK_4(dst) \
+	__asm__("ldmia	%0, {%1, %2, %3, %4}" \
+		: "=r" (dst), "=r" (a1), "=r" (a2), "=r" (a3), "=r" (a4) \
+		: "0" (dst))
+
+#define XOR_BLOCK_2(src) \
+	__asm__("ldmia	%0!, {%1, %2}" \
+		: "=r" (src), "=r" (b1), "=r" (b2) \
+		: "0" (src)); \
+	__XOR(a1, b1); __XOR(a2, b2);
+
+#define XOR_BLOCK_4(src) \
+	__asm__("ldmia	%0!, {%1, %2, %3, %4}" \
+		: "=r" (src), "=r" (b1), "=r" (b2), "=r" (b3), "=r" (b4) \
+		: "0" (src)); \
+	__XOR(a1, b1); __XOR(a2, b2); __XOR(a3, b3); __XOR(a4, b4)
+
+#define PUT_BLOCK_2(dst) \
+	__asm__ __volatile__("stmia	%0!, {%2, %3}" \
+		: "=r" (dst) \
+		: "0" (dst), "r" (a1), "r" (a2))
+
+#define PUT_BLOCK_4(dst) \
+	__asm__ __volatile__("stmia	%0!, {%2, %3, %4, %5}" \
+		: "=r" (dst) \
+		: "0" (dst), "r" (a1), "r" (a2), "r" (a3), "r" (a4))
+
+static void
+xor_arm4regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+	unsigned int lines = bytes / sizeof(unsigned long) / 4;
+	register unsigned int a1 __asm__("r4");
+	register unsigned int a2 __asm__("r5");
+	register unsigned int a3 __asm__("r6");
+	register unsigned int a4 __asm__("r7");
+	register unsigned int b1 __asm__("r8");
+	register unsigned int b2 __asm__("r9");
+	register unsigned int b3 __asm__("ip");
+	register unsigned int b4 __asm__("lr");
+
+	do {
+		GET_BLOCK_4(p1);
+		XOR_BLOCK_4(p2);
+		PUT_BLOCK_4(p1);
+	} while (--lines);
+}
+
+static void
+xor_arm4regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+		unsigned long *p3)
+{
+	unsigned int lines = bytes / sizeof(unsigned long) / 4;
+	register unsigned int a1 __asm__("r4");
+	register unsigned int a2 __asm__("r5");
+	register unsigned int a3 __asm__("r6");
+	register unsigned int a4 __asm__("r7");
+	register unsigned int b1 __asm__("r8");
+	register unsigned int b2 __asm__("r9");
+	register unsigned int b3 __asm__("ip");
+	register unsigned int b4 __asm__("lr");
+
+	do {
+		GET_BLOCK_4(p1);
+		XOR_BLOCK_4(p2);
+		XOR_BLOCK_4(p3);
+		PUT_BLOCK_4(p1);
+	} while (--lines);
+}
+
+static void
+xor_arm4regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+		unsigned long *p3, unsigned long *p4)
+{
+	unsigned int lines = bytes / sizeof(unsigned long) / 2;
+	register unsigned int a1 __asm__("r8");
+	register unsigned int a2 __asm__("r9");
+	register unsigned int b1 __asm__("ip");
+	register unsigned int b2 __asm__("lr");
+
+	do {
+		GET_BLOCK_2(p1);
+		XOR_BLOCK_2(p2);
+		XOR_BLOCK_2(p3);
+		XOR_BLOCK_2(p4);
+		PUT_BLOCK_2(p1);
+	} while (--lines);
+}
+
+static void
+xor_arm4regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+		unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+	unsigned int lines = bytes / sizeof(unsigned long) / 2;
+	register unsigned int a1 __asm__("r8");
+	register unsigned int a2 __asm__("r9");
+	register unsigned int b1 __asm__("ip");
+	register unsigned int b2 __asm__("lr");
+
+	do {
+		GET_BLOCK_2(p1);
+		XOR_BLOCK_2(p2);
+		XOR_BLOCK_2(p3);
+		XOR_BLOCK_2(p4);
+		XOR_BLOCK_2(p5);
+		PUT_BLOCK_2(p1);
+	} while (--lines);
+}
+
+static struct xor_block_template xor_block_arm4regs = {
+	.name	= "arm4regs",
+	.do_2	= xor_arm4regs_2,
+	.do_3	= xor_arm4regs_3,
+	.do_4	= xor_arm4regs_4,
+	.do_5	= xor_arm4regs_5,
+};
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES			\
+	do {					\
+		xor_speed(&xor_block_arm4regs);	\
+		xor_speed(&xor_block_8regs);	\
+		xor_speed(&xor_block_32regs);	\
+	} while (0)
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 7e9c00a..1c3c6ea 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -181,7 +181,7 @@
 	ldmfd	sp!, {r4 - r7, r9, pc}
 
 /*
- * Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for
+ * Look in <asm/procinfo.h> and arch/arm/kernel/arch.[ch] for
  * more information about the __proc_info and __arch_info structures.
  */
 	.long	__proc_info_begin
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 1dd8ea4f..2034d4d 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -20,7 +20,7 @@
  *		r2, r3 contains the zero-extended value
  *		lr corrupted
  *
- * No other registers must be altered.  (see include/asm-arm/uaccess.h
+ * No other registers must be altered.  (see <asm/uaccess.h>
  * for specific ASM register usage).
  *
  * Note that ADDR_LIMIT is either 0 or 0xc0000000.
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
index 8620afe..08ec7df 100644
--- a/arch/arm/lib/putuser.S
+++ b/arch/arm/lib/putuser.S
@@ -20,7 +20,7 @@
  * Outputs:	r0 is the error code
  *		lr corrupted
  *
- * No other registers must be altered.  (see include/asm-arm/uaccess.h
+ * No other registers must be altered.  (see <asm/uaccess.h>
  * for specific ASM register usage).
  *
  * Note that ADDR_LIMIT is either 0 or 0xc0000000
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 303a7ff..b81dbf9 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -259,7 +259,7 @@
  * caller shouldn't need to know that small detail.
  *
  * 'flags' are the extra L_PTE_ flags that you want to specify for this
- * mapping.  See include/asm-arm/proc-armv/pgtable.h for more information.
+ * mapping.  See <asm/pgtable.h> for more information.
  */
 void __iomem *
 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index d64f8e6..eda733d 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -231,7 +231,7 @@
 		.align
 
 /*
- * See linux/include/asm-arm/procinfo.h for a definition of this structure.
+ * See <asm/procinfo.h> for a definition of this structure.
  */
 	
 		.section ".proc.info.init", #alloc, #execinstr
diff --git a/arch/arm/nwfpe/fpa11.h b/arch/arm/nwfpe/fpa11.h
index 4a4d02c..386cbd1 100644
--- a/arch/arm/nwfpe/fpa11.h
+++ b/arch/arm/nwfpe/fpa11.h
@@ -69,7 +69,7 @@
  * This structure is exported to user space.  Do not re-order.
  * Only add new stuff to the end, and do not change the size of
  * any element.  Elements of this structure are used by user
- * space, and must match struct user_fp in include/asm-arm/user.h.
+ * space, and must match struct user_fp in <asm/user.h>.
  * We include the byte offsets below for documentation purposes.
  *
  * The size of this structure and FPREG are checked by fpmodule.c