Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/include/asm-m32r/a.out.h b/include/asm-m32r/a.out.h
new file mode 100644
index 0000000..4619ba5
--- /dev/null
+++ b/include/asm-m32r/a.out.h
@@ -0,0 +1,28 @@
+#ifndef _ASM_M32R_A_OUT_H
+#define _ASM_M32R_A_OUT_H
+
+/* orig : i386 2.4.18 */
+
+struct exec
+{
+  unsigned long a_info;		/* Use macros N_MAGIC, etc for access */
+  unsigned a_text;		/* length of text, in bytes */
+  unsigned a_data;		/* length of data, in bytes */
+  unsigned a_bss;		/* length of uninitialized data area for file, in bytes */
+  unsigned a_syms;		/* length of symbol table data in file, in bytes */
+  unsigned a_entry;		/* start address */
+  unsigned a_trsize;		/* length of relocation info for text, in bytes */
+  unsigned a_drsize;		/* length of relocation info for data, in bytes */
+};
+
+#define N_TRSIZE(a)	((a).a_trsize)
+#define N_DRSIZE(a)	((a).a_drsize)
+#define N_SYMSIZE(a)	((a).a_syms)
+
+#ifdef __KERNEL__
+
+#define STACK_TOP	TASK_SIZE
+
+#endif
+
+#endif /* _ASM_M32R_A_OUT_H */
diff --git a/include/asm-m32r/addrspace.h b/include/asm-m32r/addrspace.h
new file mode 100644
index 0000000..06a83dc
--- /dev/null
+++ b/include/asm-m32r/addrspace.h
@@ -0,0 +1,58 @@
+/* $Id$ */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 by Hiroyuki Kondo
+ *
+ * Defitions for the address spaces of the M32R CPUs.
+ */
+#ifndef __ASM_M32R_ADDRSPACE_H
+#define __ASM_M32R_ADDRSPACE_H
+
+/*
+ * Memory segments (32bit kernel mode addresses)
+ */
+#define KUSEG                   0x00000000
+#define KSEG0                   0x80000000
+#define KSEG1                   0xa0000000
+#define KSEG2                   0xc0000000
+#define KSEG3                   0xe0000000
+
+#define K0BASE  KSEG0
+
+/*
+ * Returns the kernel segment base of a given address
+ */
+#ifndef __ASSEMBLY__
+#define KSEGX(a)                (((unsigned long)(a)) & 0xe0000000)
+#else
+#define KSEGX(a)                ((a) & 0xe0000000)
+#endif
+
+/*
+ * Returns the physical address of a KSEG0/KSEG1 address
+ */
+#ifndef __ASSEMBLY__
+#define PHYSADDR(a)		(((unsigned long)(a)) & 0x1fffffff)
+#else
+#define PHYSADDR(a)		((a) & 0x1fffffff)
+#endif
+
+/*
+ * Map an address to a certain kernel segment
+ */
+#ifndef __ASSEMBLY__
+#define KSEG0ADDR(a)		((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | KSEG0))
+#define KSEG1ADDR(a)		((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | KSEG1))
+#define KSEG2ADDR(a)		((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | KSEG2))
+#define KSEG3ADDR(a)		((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | KSEG3))
+#else
+#define KSEG0ADDR(a)		(((a) & 0x1fffffff) | KSEG0)
+#define KSEG1ADDR(a)		(((a) & 0x1fffffff) | KSEG1)
+#define KSEG2ADDR(a)		(((a) & 0x1fffffff) | KSEG2)
+#define KSEG3ADDR(a)		(((a) & 0x1fffffff) | KSEG3)
+#endif
+
+#endif /* __ASM_M32R_ADDRSPACE_H */
diff --git a/include/asm-m32r/assembler.h b/include/asm-m32r/assembler.h
new file mode 100644
index 0000000..e1dff9d
--- /dev/null
+++ b/include/asm-m32r/assembler.h
@@ -0,0 +1,225 @@
+#ifndef _ASM_M32R_ASSEMBLER_H
+#define _ASM_M32R_ASSEMBLER_H
+
+/*
+ * linux/asm-m32r/assembler.h
+ *
+ * Copyright (C) 2004  Hirokazu Takata <takata at linux-m32r.org>
+ *
+ * This file contains M32R architecture specific macro definitions.
+ */
+
+#include <linux/config.h>
+
+#ifndef __STR
+#ifdef __ASSEMBLY__
+#define __STR(x) x
+#else
+#define __STR(x) #x
+#endif
+#endif /* __STR */
+
+#ifdef CONFIG_SMP
+#define M32R_LOCK	__STR(lock)
+#define M32R_UNLOCK	__STR(unlock)
+#else
+#define M32R_LOCK	__STR(ld)
+#define M32R_UNLOCK	__STR(st)
+#endif
+
+#ifdef __ASSEMBLY__
+#undef ENTRY
+#define ENTRY(name) ENTRY_M name
+	.macro  ENTRY_M name
+	.global \name
+	ALIGN
+\name:
+	.endm
+#endif
+
+
+/**
+ * LDIMM - load immediate value
+ * STI - enable interruption
+ * CLI - disable interruption
+ */
+
+#ifdef __ASSEMBLY__
+
+#define LDIMM(reg,x) LDIMM reg x
+	.macro LDIMM reg x
+	seth	\reg, #high(\x)
+	or3	\reg, \reg, #low(\x)
+	.endm
+
+#if !defined(CONFIG_CHIP_M32102)
+#define STI(reg) STI_M reg
+	.macro STI_M reg
+	setpsw  #0x40	    ->	nop
+	; WORKAROUND: "-> nop" is a workaround for the M32700(TS1).
+	.endm
+
+#define CLI(reg) CLI_M reg
+	.macro CLI_M reg
+	clrpsw  #0x40	    ->	nop
+	; WORKAROUND: "-> nop" is a workaround for the M32700(TS1).
+	.endm
+#else	/* CONFIG_CHIP_M32102 */
+#define STI(reg) STI_M reg
+	.macro STI_M reg
+	mvfc	\reg, psw
+	or3	\reg, \reg, #0x0040
+	mvtc	\reg, psw
+	.endm
+
+#define CLI(reg) CLI_M reg
+	.macro CLI_M reg
+	mvfc	\reg, psw
+	and3	\reg, \reg, #0xffbf
+	mvtc	\reg, psw
+	.endm
+#endif	/* CONFIG_CHIP_M32102 */
+
+	.macro	SAVE_ALL
+	push	r0		; orig_r0
+	push	sp		; spi (r15)
+	push	lr		; r14
+	push	r13
+	mvfc	r13, cr3	; spu
+	push	r13
+	mvfc	r13, bbpc
+	push	r13
+	mvfc	r13, bbpsw
+	push	r13
+	mvfc	r13, bpc
+	push	r13
+	mvfc	r13, psw
+	push	r13
+#if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2)
+	mvfaclo	r13, a1
+	push	r13
+	mvfachi r13, a1
+	push	r13
+	mvfaclo	r13, a0
+	push	r13
+	mvfachi	r13, a0
+	push	r13
+#elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
+	mvfaclo	r13
+	push	r13
+	mvfachi	r13
+	push	r13
+#else
+#error unknown isa configuration
+#endif
+	ldi	r13, #-1
+	push	r13		; syscall_nr (default: -1)
+	push	r12
+	push	r11
+	push	r10
+	push	r9
+	push	r8
+	push	r7
+	push	r3
+	push	r2
+	push	r1
+	push	r0
+	addi	sp, #-4		; room for implicit pt_regs parameter
+	push	r6
+	push	r5
+	push	r4
+	.endm
+
+	.macro	RESTORE_ALL
+	pop	r4
+	pop	r5
+	pop	r6
+	addi	sp, #4
+	pop	r0
+	pop	r1
+	pop	r2
+	pop	r3
+	pop	r7
+	pop	r8
+	pop	r9
+	pop	r10
+	pop	r11
+	pop	r12
+	addi	r15, #4		; Skip syscall number
+#if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2)
+	pop	r13
+	mvtachi	r13, a0
+	pop	r13
+	mvtaclo	r13, a0
+	pop	r13
+	mvtachi	r13, a1
+	pop	r13
+	mvtaclo	r13, a1
+#elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
+	pop	r13
+	mvtachi	r13
+	pop	r13
+	mvtaclo	r13
+#else
+#error unknown isa configuration
+#endif
+	pop	r14
+	mvtc	r14, psw
+	pop	r14
+	mvtc	r14, bpc
+	addi	sp, #8		; Skip bbpsw, bbpc
+	pop	r14
+	mvtc	r14, cr3	; spu
+	pop	r13
+	pop	lr		; r14
+	pop	sp		; spi (r15)
+	addi	sp, #4		; Skip orig_r0
+	.fillinsn
+1:	rte
+	.section .fixup,"ax"
+2:	bl	do_exit
+	.previous
+	.section __ex_table,"a"
+	ALIGN
+	.long	1b, 2b
+	.previous
+	.endm
+
+#define GET_CURRENT(reg)  get_current reg
+	.macro get_current reg
+	ldi  \reg, #-8192
+	and  \reg, sp
+	.endm
+
+#if !defined(CONFIG_CHIP_M32102)
+	.macro	SWITCH_TO_KERNEL_STACK
+	; switch to kernel stack (spi)
+	clrpsw	#0x80	    ->	nop
+	.endm
+#else	/* CONFIG_CHIP_M32102 */
+	.macro	SWITCH_TO_KERNEL_STACK
+	push	r0		; save r0 for working
+	mvfc	r0, psw
+	and3	r0, r0, #0x00ff7f
+	mvtc	r0, psw
+	slli	r0, #16
+	bltz	r0, 1f		; check BSM-bit
+;
+	;; called from kernel context: previous stack = spi
+	pop	r0		; retrieve r0
+	bra	2f
+	.fillinsn
+1:
+	;; called from user context: previous stack = spu
+	mvfc	r0, cr3		; spu
+	addi	r0, #4
+	mvtc	r0, cr3		; spu
+	ld	r0, @(-4,r0)	; retrieve r0
+	.fillinsn
+2:
+	.endm
+#endif	/* CONFIG_CHIP_M32102 */
+
+#endif	/* __ASSEMBLY__ */
+
+#endif	/* _ASM_M32R_ASSEMBLER_H */
diff --git a/include/asm-m32r/atomic.h b/include/asm-m32r/atomic.h
new file mode 100644
index 0000000..bfff69a
--- /dev/null
+++ b/include/asm-m32r/atomic.h
@@ -0,0 +1,295 @@
+#ifndef _ASM_M32R_ATOMIC_H
+#define _ASM_M32R_ATOMIC_H
+
+/*
+ *  linux/include/asm-m32r/atomic.h
+ *
+ *  M32R version:
+ *    Copyright (C) 2001, 2002  Hitoshi Yamamoto
+ *    Copyright (C) 2004  Hirokazu Takata <takata at linux-m32r.org>
+ */
+
+#include <linux/config.h>
+#include <asm/assembler.h>
+#include <asm/system.h>
+
+/*
+ * Atomic operations that C can't guarantee us.  Useful for
+ * resource counting etc..
+ */
+
+/*
+ * Make sure gcc doesn't try to be clever and move things around
+ * on us. We need to use _exactly_ the address the user gave us,
+ * not some alias that contains the same information.
+ */
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i)	{ (i) }
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+#define atomic_read(v)	((v)->counter)
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+#define atomic_set(v,i)	(((v)->counter) = (i))
+
+/**
+ * atomic_add_return - add integer to atomic variable and return it
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and return (@i + @v).
+ */
+static __inline__ int atomic_add_return(int i, atomic_t *v)
+{
+	unsigned long flags;
+	int result;
+
+	local_irq_save(flags);
+	__asm__ __volatile__ (
+		"# atomic_add_return		\n\t"
+		DCACHE_CLEAR("%0", "r4", "%1")
+		M32R_LOCK" %0, @%1;		\n\t"
+		"add	%0, %2;			\n\t"
+		M32R_UNLOCK" %0, @%1;		\n\t"
+		: "=&r" (result)
+		: "r" (&v->counter), "r" (i)
+		: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		, "r4"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+	);
+	local_irq_restore(flags);
+
+	return result;
+}
+
+/**
+ * atomic_sub_return - subtract integer from atomic variable and return it
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and return (@v - @i).
+ */
+static __inline__ int atomic_sub_return(int i, atomic_t *v)
+{
+	unsigned long flags;
+	int result;
+
+	local_irq_save(flags);
+	__asm__ __volatile__ (
+		"# atomic_sub_return		\n\t"
+		DCACHE_CLEAR("%0", "r4", "%1")
+		M32R_LOCK" %0, @%1;		\n\t"
+		"sub	%0, %2;			\n\t"
+		M32R_UNLOCK" %0, @%1;		\n\t"
+		: "=&r" (result)
+		: "r" (&v->counter), "r" (i)
+		: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		, "r4"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+	);
+	local_irq_restore(flags);
+
+	return result;
+}
+
+/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v.
+ */
+#define atomic_add(i,v) ((void) atomic_add_return((i), (v)))
+
+/**
+ * atomic_sub - subtract the atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v.
+ */
+#define atomic_sub(i,v) ((void) atomic_sub_return((i), (v)))
+
+/**
+ * atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
+
+/**
+ * atomic_inc_return - increment atomic variable and return it
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1 and returns the result.
+ */
+static __inline__ int atomic_inc_return(atomic_t *v)
+{
+	unsigned long flags;
+	int result;
+
+	local_irq_save(flags);
+	__asm__ __volatile__ (
+		"# atomic_inc_return		\n\t"
+		DCACHE_CLEAR("%0", "r4", "%1")
+		M32R_LOCK" %0, @%1;		\n\t"
+		"addi	%0, #1;			\n\t"
+		M32R_UNLOCK" %0, @%1;		\n\t"
+		: "=&r" (result)
+		: "r" (&v->counter)
+		: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		, "r4"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+	);
+	local_irq_restore(flags);
+
+	return result;
+}
+
+/**
+ * atomic_dec_return - decrement atomic variable and return it
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and returns the result.
+ */
+static __inline__ int atomic_dec_return(atomic_t *v)
+{
+	unsigned long flags;
+	int result;
+
+	local_irq_save(flags);
+	__asm__ __volatile__ (
+		"# atomic_dec_return		\n\t"
+		DCACHE_CLEAR("%0", "r4", "%1")
+		M32R_LOCK" %0, @%1;		\n\t"
+		"addi	%0, #-1;		\n\t"
+		M32R_UNLOCK" %0, @%1;		\n\t"
+		: "=&r" (result)
+		: "r" (&v->counter)
+		: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		, "r4"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+	);
+	local_irq_restore(flags);
+
+	return result;
+}
+
+/**
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1.
+ */
+#define atomic_inc(v) ((void)atomic_inc_return(v))
+
+/**
+ * atomic_dec - decrement atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1.
+ */
+#define atomic_dec(v) ((void)atomic_dec_return(v))
+
+/**
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+
+/**
+ * atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all
+ * other cases.
+ */
+#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
+
+/**
+ * atomic_add_negative - add and test if negative
+ * @v: pointer of type atomic_t
+ * @i: integer value to add
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+#define atomic_add_negative(i,v) (atomic_add_return((i), (v)) < 0)
+
+static __inline__ void atomic_clear_mask(unsigned long  mask, atomic_t *addr)
+{
+	unsigned long flags;
+	unsigned long tmp;
+
+	local_irq_save(flags);
+	__asm__ __volatile__ (
+		"# atomic_clear_mask		\n\t"
+		DCACHE_CLEAR("%0", "r5", "%1")
+		M32R_LOCK" %0, @%1;		\n\t"
+		"and	%0, %2;			\n\t"
+		M32R_UNLOCK" %0, @%1;		\n\t"
+		: "=&r" (tmp)
+		: "r" (addr), "r" (~mask)
+		: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		, "r5"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+	);
+	local_irq_restore(flags);
+}
+
+static __inline__ void atomic_set_mask(unsigned long  mask, atomic_t *addr)
+{
+	unsigned long flags;
+	unsigned long tmp;
+
+	local_irq_save(flags);
+	__asm__ __volatile__ (
+		"# atomic_set_mask		\n\t"
+		DCACHE_CLEAR("%0", "r5", "%1")
+		M32R_LOCK" %0, @%1;		\n\t"
+		"or	%0, %2;			\n\t"
+		M32R_UNLOCK" %0, @%1;		\n\t"
+		: "=&r" (tmp)
+		: "r" (addr), "r" (mask)
+		: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		, "r5"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+	);
+	local_irq_restore(flags);
+}
+
+/* Atomic operations are already serializing on m32r */
+#define smp_mb__before_atomic_dec()	barrier()
+#define smp_mb__after_atomic_dec()	barrier()
+#define smp_mb__before_atomic_inc()	barrier()
+#define smp_mb__after_atomic_inc()	barrier()
+
+#endif	/* _ASM_M32R_ATOMIC_H */
diff --git a/include/asm-m32r/bitops.h b/include/asm-m32r/bitops.h
new file mode 100644
index 0000000..e784439
--- /dev/null
+++ b/include/asm-m32r/bitops.h
@@ -0,0 +1,702 @@
+#ifndef _ASM_M32R_BITOPS_H
+#define _ASM_M32R_BITOPS_H
+
+/*
+ *  linux/include/asm-m32r/bitops.h
+ *
+ *  Copyright 1992, Linus Torvalds.
+ *
+ *  M32R version:
+ *    Copyright (C) 2001, 2002  Hitoshi Yamamoto
+ *    Copyright (C) 2004  Hirokazu Takata <takata at linux-m32r.org>
+ */
+
+#include <linux/config.h>
+#include <linux/compiler.h>
+#include <asm/assembler.h>
+#include <asm/system.h>
+#include <asm/byteorder.h>
+#include <asm/types.h>
+
+/*
+ * These have to be done with inline assembly: that way the bit-setting
+ * is guaranteed to be atomic. All bit operations return 0 if the bit
+ * was cleared before the operation and != 0 if it was not.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered.  See __set_bit()
+ * if you do not require the atomic guarantees.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static __inline__ void set_bit(int nr, volatile void * addr)
+{
+	__u32 mask;
+	volatile __u32 *a = addr;
+	unsigned long flags;
+	unsigned long tmp;
+
+	a += (nr >> 5);
+	mask = (1 << (nr & 0x1F));
+
+	local_irq_save(flags);
+	__asm__ __volatile__ (
+		DCACHE_CLEAR("%0", "r6", "%1")
+		M32R_LOCK" %0, @%1;		\n\t"
+		"or	%0, %2;			\n\t"
+		M32R_UNLOCK" %0, @%1;		\n\t"
+		: "=&r" (tmp)
+		: "r" (a), "r" (mask)
+		: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		, "r6"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+	);
+	local_irq_restore(flags);
+}
+
+/**
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __inline__ void __set_bit(int nr, volatile void * addr)
+{
+	__u32 mask;
+	volatile __u32 *a = addr;
+
+	a += (nr >> 5);
+	mask = (1 << (nr & 0x1F));
+	*a |= mask;
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered.  However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * in order to ensure changes are visible on other processors.
+ */
+static __inline__ void clear_bit(int nr, volatile void * addr)
+{
+	__u32 mask;
+	volatile __u32 *a = addr;
+	unsigned long flags;
+	unsigned long tmp;
+
+	a += (nr >> 5);
+	mask = (1 << (nr & 0x1F));
+
+	local_irq_save(flags);
+
+	__asm__ __volatile__ (
+		DCACHE_CLEAR("%0", "r6", "%1")
+		M32R_LOCK" %0, @%1;		\n\t"
+		"and	%0, %2;			\n\t"
+		M32R_UNLOCK" %0, @%1;		\n\t"
+		: "=&r" (tmp)
+		: "r" (a), "r" (~mask)
+		: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		, "r6"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+	);
+	local_irq_restore(flags);
+}
+
+static __inline__ void __clear_bit(int nr, volatile unsigned long * addr)
+{
+	unsigned long mask;
+	volatile unsigned long *a = addr;
+
+	a += (nr >> 5);
+	mask = (1 << (nr & 0x1F));
+	*a &= ~mask;
+}
+
+#define smp_mb__before_clear_bit()	barrier()
+#define smp_mb__after_clear_bit()	barrier()
+
+/**
+ * __change_bit - Toggle a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __inline__ void __change_bit(int nr, volatile void * addr)
+{
+	__u32 mask;
+	volatile __u32 *a = addr;
+
+	a += (nr >> 5);
+	mask = (1 << (nr & 0x1F));
+	*a ^= mask;
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * change_bit() is atomic and may not be reordered.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static __inline__ void change_bit(int nr, volatile void * addr)
+{
+	__u32  mask;
+	volatile __u32  *a = addr;
+	unsigned long flags;
+	unsigned long tmp;
+
+	a += (nr >> 5);
+	mask = (1 << (nr & 0x1F));
+
+	local_irq_save(flags);
+	__asm__ __volatile__ (
+		DCACHE_CLEAR("%0", "r6", "%1")
+		M32R_LOCK" %0, @%1;		\n\t"
+		"xor	%0, %2;			\n\t"
+		M32R_UNLOCK" %0, @%1;		\n\t"
+		: "=&r" (tmp)
+		: "r" (a), "r" (mask)
+		: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		, "r6"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+	);
+	local_irq_restore(flags);
+}
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static __inline__ int test_and_set_bit(int nr, volatile void * addr)
+{
+	__u32 mask, oldbit;
+	volatile __u32 *a = addr;
+	unsigned long flags;
+	unsigned long tmp;
+
+	a += (nr >> 5);
+	mask = (1 << (nr & 0x1F));
+
+	local_irq_save(flags);
+	__asm__ __volatile__ (
+		DCACHE_CLEAR("%0", "%1", "%2")
+		M32R_LOCK" %0, @%2;		\n\t"
+		"mv	%1, %0;			\n\t"
+		"and	%0, %3;			\n\t"
+		"or	%1, %3;			\n\t"
+		M32R_UNLOCK" %1, @%2;		\n\t"
+		: "=&r" (oldbit), "=&r" (tmp)
+		: "r" (a), "r" (mask)
+		: "memory"
+	);
+	local_irq_restore(flags);
+
+	return (oldbit != 0);
+}
+
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail.  You must protect multiple accesses with a lock.
+ */
+static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
+{
+	__u32 mask, oldbit;
+	volatile __u32 *a = addr;
+
+	a += (nr >> 5);
+	mask = (1 << (nr & 0x1F));
+	oldbit = (*a & mask);
+	*a |= mask;
+
+	return (oldbit != 0);
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+{
+	__u32 mask, oldbit;
+	volatile __u32 *a = addr;
+	unsigned long flags;
+	unsigned long tmp;
+
+	a += (nr >> 5);
+	mask = (1 << (nr & 0x1F));
+
+	local_irq_save(flags);
+
+	__asm__ __volatile__ (
+		DCACHE_CLEAR("%0", "%1", "%3")
+		M32R_LOCK" %0, @%3;		\n\t"
+		"mv	%1, %0;			\n\t"
+		"and	%0, %2;			\n\t"
+		"not	%2, %2;			\n\t"
+		"and	%1, %2;			\n\t"
+		M32R_UNLOCK" %1, @%3;		\n\t"
+		: "=&r" (oldbit), "=&r" (tmp), "+r" (mask)
+		: "r" (a)
+		: "memory"
+	);
+	local_irq_restore(flags);
+
+	return (oldbit != 0);
+}
+
+/**
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail.  You must protect multiple accesses with a lock.
+ */
+static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
+{
+	__u32 mask, oldbit;
+	volatile __u32 *a = addr;
+
+	a += (nr >> 5);
+	mask = (1 << (nr & 0x1F));
+	oldbit = (*a & mask);
+	*a &= ~mask;
+
+	return (oldbit != 0);
+}
+
+/* WARNING: non atomic and it can be reordered! */
+static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
+{
+	__u32 mask, oldbit;
+	volatile __u32 *a = addr;
+
+	a += (nr >> 5);
+	mask = (1 << (nr & 0x1F));
+	oldbit = (*a & mask);
+	*a ^= mask;
+
+	return (oldbit != 0);
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static __inline__ int test_and_change_bit(int nr, volatile void * addr)
+{
+	__u32 mask, oldbit;
+	volatile __u32 *a = addr;
+	unsigned long flags;
+	unsigned long tmp;
+
+	a += (nr >> 5);
+	mask = (1 << (nr & 0x1F));
+
+	local_irq_save(flags);
+	__asm__ __volatile__ (
+		DCACHE_CLEAR("%0", "%1", "%2")
+		M32R_LOCK" %0, @%2;		\n\t"
+		"mv	%1, %0;			\n\t"
+		"and	%0, %3;			\n\t"
+		"xor	%1, %3;			\n\t"
+		M32R_UNLOCK" %1, @%2;		\n\t"
+		: "=&r" (oldbit), "=&r" (tmp)
+		: "r" (a), "r" (mask)
+		: "memory"
+	);
+	local_irq_restore(flags);
+
+	return (oldbit != 0);
+}
+
+/**
+ * test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __inline__ int test_bit(int nr, const volatile void * addr)
+{
+	__u32 mask;
+	const volatile __u32 *a = addr;
+
+	a += (nr >> 5);
+	mask = (1 << (nr & 0x1F));
+
+	return ((*a & mask) != 0);
+}
+
+/**
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+static __inline__ unsigned long ffz(unsigned long word)
+{
+	int k;
+
+	word = ~word;
+	k = 0;
+	if (!(word & 0x0000ffff)) { k += 16; word >>= 16; }
+	if (!(word & 0x000000ff)) { k += 8; word >>= 8; }
+	if (!(word & 0x0000000f)) { k += 4; word >>= 4; }
+	if (!(word & 0x00000003)) { k += 2; word >>= 2; }
+	if (!(word & 0x00000001)) { k += 1; }
+
+	return k;
+}
+
+/**
+ * find_first_zero_bit - find the first zero bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit-number of the first zero bit, not the number of the byte
+ * containing a bit.
+ */
+
+#define find_first_zero_bit(addr, size) \
+	find_next_zero_bit((addr), (size), 0)
+
+/**
+ * find_next_zero_bit - find the first zero bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+static __inline__ int find_next_zero_bit(const unsigned long *addr,
+					 int size, int offset)
+{
+	const unsigned long *p = addr + (offset >> 5);
+	unsigned long result = offset & ~31UL;
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset &= 31UL;
+	if (offset) {
+		tmp = *(p++);
+		tmp |= ~0UL >> (32-offset);
+		if (size < 32)
+			goto found_first;
+		if (~tmp)
+			goto found_middle;
+		size -= 32;
+		result += 32;
+	}
+	while (size & ~31UL) {
+		if (~(tmp = *(p++)))
+			goto found_middle;
+		result += 32;
+		size -= 32;
+	}
+	if (!size)
+		return result;
+	tmp = *p;
+
+found_first:
+	tmp |= ~0UL << size;
+found_middle:
+	return result + ffz(tmp);
+}
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __inline__ unsigned long __ffs(unsigned long word)
+{
+	int k = 0;
+
+	if (!(word & 0x0000ffff)) { k += 16; word >>= 16; }
+	if (!(word & 0x000000ff)) { k += 8; word >>= 8; }
+	if (!(word & 0x0000000f)) { k += 4; word >>= 4; }
+	if (!(word & 0x00000003)) { k += 2; word >>= 2; }
+	if (!(word & 0x00000001)) { k += 1;}
+
+	return k;
+}
+
+/*
+ * fls: find last bit set.
+ */
+#define fls(x) generic_fls(x)
+
+#ifdef __KERNEL__
+
+/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 140-bit bitmap where the first 100 bits are
+ * unlikely to be set. It's guaranteed that at least one of the 140
+ * bits is cleared.
+ */
+static inline int sched_find_first_bit(unsigned long *b)
+{
+	if (unlikely(b[0]))
+		return __ffs(b[0]);
+	if (unlikely(b[1]))
+		return __ffs(b[1]) + 32;
+	if (unlikely(b[2]))
+		return __ffs(b[2]) + 64;
+	if (b[3])
+		return __ffs(b[3]) + 96;
+	return __ffs(b[4]) + 128;
+}
+
+/**
+ * find_next_bit - find the first set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+static inline unsigned long find_next_bit(const unsigned long *addr,
+	unsigned long size, unsigned long offset)
+{
+	unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
+	unsigned int result = offset & ~31UL;
+	unsigned int tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset &= 31UL;
+	if (offset) {
+		tmp = *p++;
+		tmp &= ~0UL << offset;
+		if (size < 32)
+			goto found_first;
+		if (tmp)
+			goto found_middle;
+		size -= 32;
+		result += 32;
+	}
+	while (size >= 32) {
+		if ((tmp = *p++) != 0)
+			goto found_middle;
+		result += 32;
+		size -= 32;
+	}
+	if (!size)
+		return result;
+	tmp = *p;
+
+found_first:
+	tmp &= ~0UL >> (32 - size);
+	if (tmp == 0UL)        /* Are any bits set? */
+		return result + size; /* Nope. */
+found_middle:
+	return result + __ffs(tmp);
+}
+
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit-number of the first set bit, not the number of the byte
+ * containing a bit.
+ */
+#define find_first_bit(addr, size) \
+	find_next_bit((addr), (size), 0)
+
+/**
+ * ffs - find first bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+#define ffs(x)	generic_ffs(x)
+
+/**
+ * hweightN - returns the hamming weight of a N-bit word
+ * @x: the word to weigh
+ *
+ * The Hamming Weight of a number is the total number of bits set in it.
+ */
+
+#define hweight32(x)	generic_hweight32(x)
+#define hweight16(x)	generic_hweight16(x)
+#define hweight8(x)	generic_hweight8(x)
+
+#endif /* __KERNEL__ */
+
+#ifdef __KERNEL__
+
+/*
+ * ext2_XXXX function
+ * orig: include/asm-sh/bitops.h
+ */
+
+#ifdef __LITTLE_ENDIAN__
+#define ext2_set_bit			test_and_set_bit
+#define ext2_clear_bit			__test_and_clear_bit
+#define ext2_test_bit			test_bit
+#define ext2_find_first_zero_bit	find_first_zero_bit
+#define ext2_find_next_zero_bit		find_next_zero_bit
+#else
+static inline int ext2_set_bit(int nr, volatile void * addr)
+{
+	__u8 mask, oldbit;
+	volatile __u8 *a = addr;
+
+	a += (nr >> 3);
+	mask = (1 << (nr & 0x07));
+	oldbit = (*a & mask);
+	*a |= mask;
+
+	return (oldbit != 0);
+}
+
+static inline int ext2_clear_bit(int nr, volatile void * addr)
+{
+	__u8 mask, oldbit;
+	volatile __u8 *a = addr;
+
+	a += (nr >> 3);
+	mask = (1 << (nr & 0x07));
+	oldbit = (*a & mask);
+	*a &= ~mask;
+
+	return (oldbit != 0);
+}
+
+static inline int ext2_test_bit(int nr, const volatile void * addr)
+{
+	__u32 mask;
+	const volatile __u8 *a = addr;
+
+	a += (nr >> 3);
+	mask = (1 << (nr & 0x07));
+
+	return ((mask & *a) != 0);
+}
+
+#define ext2_find_first_zero_bit(addr, size) \
+	ext2_find_next_zero_bit((addr), (size), 0)
+
+static inline unsigned long ext2_find_next_zero_bit(void *addr,
+	unsigned long size, unsigned long offset)
+{
+	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
+	unsigned long result = offset & ~31UL;
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset &= 31UL;
+	if(offset) {
+		/* We hold the little endian value in tmp, but then the
+		 * shift is illegal. So we could keep a big endian value
+		 * in tmp, like this:
+		 *
+		 * tmp = __swab32(*(p++));
+		 * tmp |= ~0UL >> (32-offset);
+		 *
+		 * but this would decrease preformance, so we change the
+		 * shift:
+		 */
+		tmp = *(p++);
+		tmp |= __swab32(~0UL >> (32-offset));
+		if(size < 32)
+			goto found_first;
+		if(~tmp)
+			goto found_middle;
+		size -= 32;
+		result += 32;
+	}
+	while(size & ~31UL) {
+		if(~(tmp = *(p++)))
+			goto found_middle;
+		result += 32;
+		size -= 32;
+	}
+	if(!size)
+		return result;
+	tmp = *p;
+
+found_first:
+	/* tmp is little endian, so we would have to swab the shift,
+	 * see above. But then we have to swab tmp below for ffz, so
+	 * we might as well do this here.
+	 */
+	return result + ffz(__swab32(tmp) | (~0UL << size));
+found_middle:
+	return result + ffz(__swab32(tmp));
+}
+#endif
+
+#define ext2_set_bit_atomic(lock, nr, addr)		\
+	({						\
+		int ret;				\
+		spin_lock(lock);			\
+		ret = ext2_set_bit((nr), (addr));	\
+		spin_unlock(lock);			\
+		ret;					\
+	})
+
+#define ext2_clear_bit_atomic(lock, nr, addr)		\
+	({						\
+		int ret;				\
+		spin_lock(lock);			\
+		ret = ext2_clear_bit((nr), (addr));	\
+		spin_unlock(lock);			\
+		ret;					\
+	})
+
+/* Bitmap functions for the minix filesystem.  */
+#define minix_test_and_set_bit(nr,addr)		__test_and_set_bit(nr,addr)
+#define minix_set_bit(nr,addr)			__set_bit(nr,addr)
+#define minix_test_and_clear_bit(nr,addr)	__test_and_clear_bit(nr,addr)
+#define minix_test_bit(nr,addr) test_bit(nr,addr)
+#define minix_find_first_zero_bit(addr,size)	find_first_zero_bit(addr,size)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_M32R_BITOPS_H */
diff --git a/include/asm-m32r/bug.h b/include/asm-m32r/bug.h
new file mode 100644
index 0000000..4cc0462
--- /dev/null
+++ b/include/asm-m32r/bug.h
@@ -0,0 +1,4 @@
+#ifndef _M32R_BUG_H
+#define _M32R_BUG_H
+#include <asm-generic/bug.h>
+#endif
diff --git a/include/asm-m32r/bugs.h b/include/asm-m32r/bugs.h
new file mode 100644
index 0000000..9a56f66
--- /dev/null
+++ b/include/asm-m32r/bugs.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_M32R_BUGS_H
+#define _ASM_M32R_BUGS_H
+
+/* $Id$ */
+
+/*
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Needs:
+ *     void check_bugs(void);
+ */
+#include <asm/processor.h>
+
+static void __init check_bugs(void)
+{
+	extern unsigned long loops_per_jiffy;
+
+	current_cpu_data.loops_per_jiffy = loops_per_jiffy;
+}
+
+#endif  /* _ASM_M32R_BUGS_H */
diff --git a/include/asm-m32r/byteorder.h b/include/asm-m32r/byteorder.h
new file mode 100644
index 0000000..3c0b9a2
--- /dev/null
+++ b/include/asm-m32r/byteorder.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_M32R_BYTEORDER_H
+#define _ASM_M32R_BYTEORDER_H
+
+/* $Id$ */
+
+#include <asm/types.h>
+
+#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+#  define __BYTEORDER_HAS_U64__
+#  define __SWAB_64_THRU_32__
+#endif
+
+#if defined(__LITTLE_ENDIAN__)
+#  include <linux/byteorder/little_endian.h>
+#else
+#  include <linux/byteorder/big_endian.h>
+#endif
+
+#endif /* _ASM_M32R_BYTEORDER_H */
diff --git a/include/asm-m32r/cache.h b/include/asm-m32r/cache.h
new file mode 100644
index 0000000..7248205
--- /dev/null
+++ b/include/asm-m32r/cache.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_M32R_CACHE_H
+#define _ASM_M32R_CACHE_H
+
+/* $Id$ */
+
+/* L1 cache line size */
+#define L1_CACHE_SHIFT		4
+#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+
+#define L1_CACHE_SHIFT_MAX	4
+
+#endif  /* _ASM_M32R_CACHE_H */
diff --git a/include/asm-m32r/cachectl.h b/include/asm-m32r/cachectl.h
new file mode 100644
index 0000000..2aab8f6
--- /dev/null
+++ b/include/asm-m32r/cachectl.h
@@ -0,0 +1,26 @@
+/*
+ * cachectl.h -- defines for M32R cache control system calls
+ *
+ * Copyright (C) 2003 by Kazuhiro Inaoka
+ */
+#ifndef	__ASM_M32R_CACHECTL
+#define	__ASM_M32R_CACHECTL
+
+/*
+ * Options for cacheflush system call
+ *
+ * cacheflush() is currently fluch_cache_all().
+ */
+#define	ICACHE	(1<<0)		/* flush instruction cache        */
+#define	DCACHE	(1<<1)		/* writeback and flush data cache */
+#define	BCACHE	(ICACHE|DCACHE)	/* flush both caches              */
+
+/*
+ * Caching modes for the cachectl(2) call
+ *
+ * cachectl(2) is currently not supported and returns ENOSYS.
+ */
+#define CACHEABLE	0	/* make pages cacheable */
+#define UNCACHEABLE	1	/* make pages uncacheable */
+
+#endif	/* __ASM_M32R_CACHECTL */
diff --git a/include/asm-m32r/cacheflush.h b/include/asm-m32r/cacheflush.h
new file mode 100644
index 0000000..46fc4c3
--- /dev/null
+++ b/include/asm-m32r/cacheflush.h
@@ -0,0 +1,68 @@
+#ifndef _ASM_M32R_CACHEFLUSH_H
+#define _ASM_M32R_CACHEFLUSH_H
+
+#include <linux/config.h>
+#include <linux/mm.h>
+
+extern void _flush_cache_all(void);
+extern void _flush_cache_copyback_all(void);
+
+#if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP)
+#define flush_cache_all()			do { } while (0)
+#define flush_cache_mm(mm)			do { } while (0)
+#define flush_cache_range(vma, start, end)	do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
+#define flush_dcache_page(page)			do { } while (0)
+#define flush_dcache_mmap_lock(mapping)		do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
+#ifndef CONFIG_SMP
+#define flush_icache_range(start, end)		_flush_cache_copyback_all()
+#define flush_icache_page(vma,pg)		_flush_cache_copyback_all()
+#define flush_icache_user_range(vma,pg,adr,len)	_flush_cache_copyback_all()
+#define flush_cache_sigtramp(addr)		_flush_cache_copyback_all()
+#else	/* CONFIG_SMP */
+extern void smp_flush_cache_all(void);
+#define flush_icache_range(start, end)		smp_flush_cache_all()
+#define flush_icache_page(vma,pg)		smp_flush_cache_all()
+#define flush_icache_user_range(vma,pg,adr,len)	smp_flush_cache_all()
+#define flush_cache_sigtramp(addr)		_flush_cache_copyback_all()
+#endif	/* CONFIG_SMP */
+#elif defined(CONFIG_CHIP_M32102)
+#define flush_cache_all()			do { } while (0)
+#define flush_cache_mm(mm)			do { } while (0)
+#define flush_cache_range(vma, start, end)	do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
+#define flush_dcache_page(page)			do { } while (0)
+#define flush_dcache_mmap_lock(mapping)		do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
+#define flush_icache_range(start, end)		_flush_cache_all()
+#define flush_icache_page(vma,pg)		_flush_cache_all()
+#define flush_icache_user_range(vma,pg,adr,len)	_flush_cache_all()
+#define flush_cache_sigtramp(addr)		_flush_cache_all()
+#else
+#define flush_cache_all()			do { } while (0)
+#define flush_cache_mm(mm)			do { } while (0)
+#define flush_cache_range(vma, start, end)	do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
+#define flush_dcache_page(page)			do { } while (0)
+#define flush_dcache_mmap_lock(mapping)		do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
+#define flush_icache_range(start, end)		do { } while (0)
+#define flush_icache_page(vma,pg)		do { } while (0)
+#define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
+#define flush_cache_sigtramp(addr)		do { } while (0)
+#endif	/* CONFIG_CHIP_* */
+
+#define flush_cache_vmap(start, end)	do { } while (0)
+#define flush_cache_vunmap(start, end)	do { } while (0)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len)	\
+do {								\
+	memcpy(dst, src, len);					\
+	flush_icache_user_range(vma, page, vaddr, len);		\
+} while (0)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len)	\
+	memcpy(dst, src, len)
+
+#endif /* _ASM_M32R_CACHEFLUSH_H */
+
diff --git a/include/asm-m32r/checksum.h b/include/asm-m32r/checksum.h
new file mode 100644
index 0000000..99f37db
--- /dev/null
+++ b/include/asm-m32r/checksum.h
@@ -0,0 +1,208 @@
+#ifdef __KERNEL__
+#ifndef _ASM_M32R_CHECKSUM_H
+#define _ASM_M32R_CHECKSUM_H
+
+/*
+ * include/asm-m32r/checksum.h
+ *
+ * IP/TCP/UDP checksum routines
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Some code taken from mips and parisc architecture.
+ *
+ *    Copyright (C) 2001, 2002  Hiroyuki Kondo, Hirokazu Takata
+ *    Copyright (C) 2004  Hirokazu Takata <takata at linux-m32r.org>
+ */
+
+#include <linux/in6.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+asmlinkage unsigned int csum_partial(const unsigned char *buff,
+				     int len, unsigned int sum);
+
+/*
+ * The same as csum_partial, but copies from src while it checksums.
+ *
+ * Here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+extern unsigned int csum_partial_copy_nocheck(const unsigned char *src,
+					      unsigned char *dst,
+                                              int len, unsigned int sum);
+
+/*
+ * This is a new version of the above that records errors it finds in *errp,
+ * but continues and zeros thre rest of the buffer.
+ */
+extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
+                                                unsigned char *dst,
+                                                int len, unsigned int sum,
+                                                int *err_ptr);
+
+/*
+ *	Fold a partial checksum
+ */
+
+static inline unsigned int csum_fold(unsigned int sum)
+{
+	unsigned long tmpreg;
+	__asm__(
+		"	sll3	%1, %0, #16 \n"
+		"	cmp	%0, %0 \n"
+		"	addx	%0, %1 \n"
+		"	ldi	%1, #0 \n"
+		"	srli	%0, #16 \n"
+		"	addx	%0, %1 \n"
+		"	xor3	%0, %0, #0x0000ffff \n"
+		: "=r" (sum), "=&r" (tmpreg)
+		: "0"  (sum)
+		: "cbit"
+	);
+	return sum;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+static inline unsigned short ip_fast_csum(unsigned char * iph,
+					  unsigned int ihl) {
+	unsigned long sum, tmpreg0, tmpreg1;
+
+	__asm__ __volatile__(
+		"	ld	%0, @%1+ \n"
+		"	addi	%2, #-4 \n"
+		"#	bgez	%2, 2f \n"
+		"	cmp	%0, %0 \n"
+		"	ld	%3, @%1+ \n"
+		"	ld	%4, @%1+ \n"
+		"	addx	%0, %3 \n"
+		"	ld	%3, @%1+ \n"
+		"	addx	%0, %4 \n"
+		"	addx	%0, %3 \n"
+		"	.fillinsn\n"
+		"1: \n"
+		"	ld	%4, @%1+ \n"
+		"	addi	%2, #-1 \n"
+		"	addx	%0, %4 \n"
+		"	bgtz	%2, 1b \n"
+		"\n"
+		"	ldi	%3, #0 \n"
+		"	addx	%0, %3 \n"
+		"	.fillinsn\n"
+		"2: \n"
+	/* Since the input registers which are loaded with iph and ipl
+	   are modified, we must also specify them as outputs, or gcc
+	   will assume they contain their original values. */
+	: "=&r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmpreg0), "=&r" (tmpreg1)
+	: "1" (iph), "2" (ihl)
+	: "cbit", "memory");
+
+	return csum_fold(sum);
+}
+
+static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
+					       unsigned long daddr,
+					       unsigned short len,
+					       unsigned short proto,
+					       unsigned int sum)
+{
+#if defined(__LITTLE_ENDIAN)
+	unsigned long len_proto = (ntohs(len)<<16)+proto*256;
+#else
+	unsigned long len_proto = (proto<<16)+len;
+#endif
+	unsigned long tmpreg;
+
+	__asm__(
+		"	cmp	%0, %0 \n"
+		"	addx	%0, %2 \n"
+		"	addx	%0, %3 \n"
+		"	addx	%0, %4 \n"
+		"	ldi	%1, #0 \n"
+		"	addx	%0, %1 \n"
+		: "=r" (sum), "=&r" (tmpreg)
+		: "r" (daddr), "r" (saddr), "r" (len_proto), "0" (sum)
+		: "cbit"
+	);
+
+	return sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
+						   unsigned long daddr,
+						   unsigned short len,
+						   unsigned short proto,
+						   unsigned int sum)
+{
+	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+
+static inline unsigned short ip_compute_csum(unsigned char * buff, int len) {
+	return csum_fold (csum_partial(buff, len, 0));
+}
+
+#define _HAVE_ARCH_IPV6_CSUM
+static inline unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
+						 struct in6_addr *daddr,
+						 __u16 len,
+						 unsigned short proto,
+						 unsigned int sum)
+{
+	unsigned long tmpreg0, tmpreg1, tmpreg2, tmpreg3;
+	__asm__(
+		"	ld	%1, @(%5) \n"
+		"	ld	%2, @(4,%5) \n"
+		"	ld	%3, @(8,%5) \n"
+		"	ld	%4, @(12,%5) \n"
+		"	add	%0, %1 \n"
+		"	addx	%0, %2 \n"
+		"	addx	%0, %3 \n"
+		"	addx	%0, %4 \n"
+		"	ld	%1, @(%6) \n"
+		"	ld	%2, @(4,%6) \n"
+		"	ld	%3, @(8,%6) \n"
+		"	ld	%4, @(12,%6) \n"
+		"	addx	%0, %1 \n"
+		"	addx	%0, %2 \n"
+		"	addx	%0, %3 \n"
+		"	addx	%0, %4 \n"
+		"	addx	%0, %7 \n"
+		"	addx	%0, %8 \n"
+		"	ldi	%1, #0 \n"
+		"	addx	%0, %1 \n"
+		: "=&r" (sum), "=&r" (tmpreg0), "=&r" (tmpreg1),
+		  "=&r" (tmpreg2), "=&r" (tmpreg3)
+		: "r" (saddr), "r" (daddr),
+		  "r" (htonl((__u32) (len))), "r" (htonl(proto)), "0" (sum)
+		: "cbit"
+	);
+
+	return csum_fold(sum);
+}
+
+#endif /* _ASM_M32R_CHECKSUM_H */
+#endif /* __KERNEL__ */
diff --git a/include/asm-m32r/cputime.h b/include/asm-m32r/cputime.h
new file mode 100644
index 0000000..0a47550
--- /dev/null
+++ b/include/asm-m32r/cputime.h
@@ -0,0 +1,6 @@
+#ifndef __M32R_CPUTIME_H
+#define __M32R_CPUTIME_H
+
+#include <asm-generic/cputime.h>
+
+#endif /* __M32R_CPUTIME_H */
diff --git a/include/asm-m32r/current.h b/include/asm-m32r/current.h
new file mode 100644
index 0000000..c19d927
--- /dev/null
+++ b/include/asm-m32r/current.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_M32R_CURRENT_H
+#define _ASM_M32R_CURRENT_H
+
+/* $Id$ */
+
+#include <linux/thread_info.h>
+
+struct task_struct;
+
+static __inline__ struct task_struct *get_current(void)
+{
+	return current_thread_info()->task;
+}
+
+#define current	(get_current())
+
+#endif	/* _ASM_M32R_CURRENT_H */
+
diff --git a/include/asm-m32r/delay.h b/include/asm-m32r/delay.h
new file mode 100644
index 0000000..f285eae
--- /dev/null
+++ b/include/asm-m32r/delay.h
@@ -0,0 +1,28 @@
+#ifndef _ASM_M32R_DELAY_H
+#define _ASM_M32R_DELAY_H
+
+/* $Id$ */
+
+/*
+ * Copyright (C) 1993 Linus Torvalds
+ *
+ * Delay routines calling functions in arch/m32r/lib/delay.c
+ */
+
+extern void __bad_udelay(void);
+extern void __bad_ndelay(void);
+
+extern void __udelay(unsigned long usecs);
+extern void __ndelay(unsigned long nsecs);
+extern void __const_udelay(unsigned long usecs);
+extern void __delay(unsigned long loops);
+
+#define udelay(n) (__builtin_constant_p(n) ? \
+	((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
+	__udelay(n))
+
+#define ndelay(n) (__builtin_constant_p(n) ? \
+	((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
+	__ndelay(n))
+
+#endif /* _ASM_M32R_DELAY_H */
diff --git a/include/asm-m32r/div64.h b/include/asm-m32r/div64.h
new file mode 100644
index 0000000..417a51b
--- /dev/null
+++ b/include/asm-m32r/div64.h
@@ -0,0 +1,38 @@
+#ifndef _ASM_M32R_DIV64
+#define _ASM_M32R_DIV64
+
+/* $Id$ */
+
+/* unsigned long long division.
+ * Input:
+ *  unsigned long long  n
+ *  unsigned long  base
+ * Output:
+ *  n = n / base;
+ *  return value = n % base;
+ */
+#define do_div(n, base)						\
+({								\
+	unsigned long _res, _high, _mid, _low;			\
+								\
+	_low = (n) & 0xffffffffUL;				\
+	_high = (n) >> 32;					\
+	if (_high) {						\
+		_mid = (_high % (unsigned long)(base)) << 16;	\
+		_high = _high / (unsigned long)(base);		\
+		_mid += _low >> 16;				\
+		_low &= 0x0000ffffUL;				\
+		_low += (_mid % (unsigned long)(base)) << 16;	\
+		_mid = _mid / (unsigned long)(base);		\
+		_res = _low % (unsigned long)(base);		\
+		_low = _low / (unsigned long)(base);		\
+		n = _low + ((long long)_mid << 16) +		\
+			((long long)_high << 32);		\
+	} else {						\
+		_res = _low % (unsigned long)(base);		\
+		n = (_low / (unsigned long)(base));		\
+	}							\
+	_res;							\
+})
+
+#endif  /* _ASM_M32R_DIV64 */
diff --git a/include/asm-m32r/dma-mapping.h b/include/asm-m32r/dma-mapping.h
new file mode 100644
index 0000000..3a2db28
--- /dev/null
+++ b/include/asm-m32r/dma-mapping.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_M32R_DMA_MAPPING_H
+#define _ASM_M32R_DMA_MAPPING_H
+
+/*
+ * NOTE: Do not include <asm-generic/dma-mapping.h>
+ * Because it requires PCI stuffs, but current M32R don't provide these.
+ */
+
+static inline void *
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+		   int flag)
+{
+	return (void *)NULL;
+}
+
+static inline void
+dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+		    dma_addr_t dma_handle)
+{
+	return;
+}
+
+#endif /* _ASM_M32R_DMA_MAPPING_H */
diff --git a/include/asm-m32r/dma.h b/include/asm-m32r/dma.h
new file mode 100644
index 0000000..7263b01
--- /dev/null
+++ b/include/asm-m32r/dma.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_M32R_DMA_H
+#define _ASM_M32R_DMA_H
+
+/* $Id$ */
+
+#include <asm/io.h>
+
+/*
+ * The maximum address that we can perform a DMA transfer
+ * to on this platform
+ */
+#define MAX_DMA_ADDRESS      (PAGE_OFFSET+0x20000000)
+
+#endif /* _ASM_M32R_DMA_H */
diff --git a/include/asm-m32r/elf.h b/include/asm-m32r/elf.h
new file mode 100644
index 0000000..bbee8b2
--- /dev/null
+++ b/include/asm-m32r/elf.h
@@ -0,0 +1,136 @@
+#ifndef _ASM_M32R__ELF_H
+#define _ASM_M32R__ELF_H
+
+/*
+ * ELF-specific definitions.
+ *
+ * Copyright (C) 1999-2004, Renesas Technology Corp.
+ *      Hirokazu Takata <takata at linux-m32r.org>
+ */
+
+#include <asm/ptrace.h>
+#include <asm/user.h>
+#include <asm/page.h>
+
+/* M32R relocation types  */
+#define	R_M32R_NONE		0
+#define	R_M32R_16		1
+#define	R_M32R_32		2
+#define	R_M32R_24		3
+#define	R_M32R_10_PCREL		4
+#define	R_M32R_18_PCREL		5
+#define	R_M32R_26_PCREL		6
+#define	R_M32R_HI16_ULO		7
+#define	R_M32R_HI16_SLO		8
+#define	R_M32R_LO16		9
+#define	R_M32R_SDA16		10
+#define	R_M32R_GNU_VTINHERIT	11
+#define	R_M32R_GNU_VTENTRY	12
+
+#define R_M32R_16_RELA		33
+#define R_M32R_32_RELA		34
+#define R_M32R_24_RELA		35
+#define R_M32R_10_PCREL_RELA	36
+#define R_M32R_18_PCREL_RELA	37
+#define R_M32R_26_PCREL_RELA	38
+#define R_M32R_HI16_ULO_RELA	39
+#define R_M32R_HI16_SLO_RELA	40
+#define R_M32R_LO16_RELA	41
+#define R_M32R_SDA16_RELA	42
+#define	R_M32R_RELA_GNU_VTINHERIT	43
+#define	R_M32R_RELA_GNU_VTENTRY	44
+
+#define R_M32R_GOT24		48
+#define R_M32R_26_PLTREL	49
+#define R_M32R_COPY		50
+#define R_M32R_GLOB_DAT		51
+#define R_M32R_JMP_SLOT		52
+#define R_M32R_RELATIVE		53
+#define R_M32R_GOTOFF		54
+#define R_M32R_GOTPC24		55
+#define R_M32R_GOT16_HI_ULO	56
+#define R_M32R_GOT16_HI_SLO	57
+#define R_M32R_GOT16_LO		58
+#define R_M32R_GOTPC_HI_ULO	59
+#define R_M32R_GOTPC_HI_SLO	60
+#define R_M32R_GOTPC_LO		61
+#define R_M32R_GOTOFF_HI_ULO	62
+#define R_M32R_GOTOFF_HI_SLO	63
+#define R_M32R_GOTOFF_LO	64
+
+#define R_M32R_NUM		256
+
+/*
+ * ELF register definitions..
+ */
+#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
+
+typedef unsigned long elf_greg_t;
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+/* We have no FP mumumu.  */
+typedef double elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t;
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) \
+	(((x)->e_machine == EM_M32R) || ((x)->e_machine == EM_CYGNUS_M32R))
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS	ELFCLASS32
+#if defined(__LITTLE_ENDIAN)
+#define ELF_DATA	ELFDATA2LSB
+#elif defined(__BIG_ENDIAN)
+#define ELF_DATA	ELFDATA2MSB
+#else
+#error no endian defined
+#endif
+#define ELF_ARCH	EM_M32R
+
+/* r0 is set by ld.so to a pointer to a function which might be
+ * registered using 'atexit'.  This provides a mean for the dynamic
+ * linker to call DT_FINI functions for shared libraries that have
+ * been loaded before the code runs.
+ *
+ * So that we can use the same startup file with static executables,
+ * we start programs with a value of 0 to indicate that there is no
+ * such function.
+ */
+#define ELF_PLAT_INIT(_r, load_addr)	(_r)->r0 = 0
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE	PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed.
+ * Typical use of this is to invoke "./ld.so someprog" to test out a
+ * new version of the loader.  We need to make sure that it is out of
+ * the way of the program that it will "exec", and that there is
+ * sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE         (TASK_SIZE / 3 * 2)
+
+/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
+   now struct_user_regs, they are different) */
+
+#define ELF_CORE_COPY_REGS(pr_reg, regs)  \
+	memcpy((char *)pr_reg, (char *)regs, sizeof (struct pt_regs));
+
+/* This yields a mask that user programs can use to figure out what
+   instruction set this CPU supports.  */
+#define ELF_HWCAP	(0)
+
+/* This yields a string that ld.so will use to load implementation
+   specific libraries for optimization.  This is more specific in
+   intent than poking at uname or /proc/cpuinfo.  */
+#define ELF_PLATFORM	(NULL)
+
+#ifdef __KERNEL__
+#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX)
+#endif
+
+#endif  /* _ASM_M32R__ELF_H */
diff --git a/include/asm-m32r/errno.h b/include/asm-m32r/errno.h
new file mode 100644
index 0000000..7a98520
--- /dev/null
+++ b/include/asm-m32r/errno.h
@@ -0,0 +1,9 @@
+#ifndef _ASM_M32R_ERRNO_H
+#define _ASM_M32R_ERRNO_H
+
+/* $Id$ */
+
+#include <asm-generic/errno.h>
+
+#endif /* _ASM_M32R_ERRNO_H */
+
diff --git a/include/asm-m32r/fcntl.h b/include/asm-m32r/fcntl.h
new file mode 100644
index 0000000..3e30895
--- /dev/null
+++ b/include/asm-m32r/fcntl.h
@@ -0,0 +1,92 @@
+#ifndef _ASM_M32R_FCNTL_H
+#define _ASM_M32R_FCNTL_H
+
+/* $Id$ */
+
+/* orig : i386 2.4.18 */
+
+/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
+   located on an ext2 file system */
+#define O_ACCMODE	   0003
+#define O_RDONLY	     00
+#define O_WRONLY	     01
+#define O_RDWR		     02
+#define O_CREAT		   0100	/* not fcntl */
+#define O_EXCL		   0200	/* not fcntl */
+#define O_NOCTTY	   0400	/* not fcntl */
+#define O_TRUNC		  01000	/* not fcntl */
+#define O_APPEND	  02000
+#define O_NONBLOCK	  04000
+#define O_NDELAY	O_NONBLOCK
+#define O_SYNC		 010000
+#define FASYNC		 020000	/* fcntl, for BSD compatibility */
+#define O_DIRECT	 040000	/* direct disk access hint */
+#define O_LARGEFILE	0100000
+#define O_DIRECTORY	0200000	/* must be a directory */
+#define O_NOFOLLOW	0400000 /* don't follow links */
+#define O_NOATIME	01000000
+
+#define F_DUPFD		0	/* dup */
+#define F_GETFD		1	/* get close_on_exec */
+#define F_SETFD		2	/* set/clear close_on_exec */
+#define F_GETFL		3	/* get file->f_flags */
+#define F_SETFL		4	/* set file->f_flags */
+#define F_GETLK		5
+#define F_SETLK		6
+#define F_SETLKW	7
+
+#define F_SETOWN	8	/*  for sockets. */
+#define F_GETOWN	9	/*  for sockets. */
+#define F_SETSIG	10	/*  for sockets. */
+#define F_GETSIG	11	/*  for sockets. */
+
+#define F_GETLK64	12	/*  using 'struct flock64' */
+#define F_SETLK64	13
+#define F_SETLKW64	14
+
+/* for F_[GET|SET]FL */
+#define FD_CLOEXEC	1	/* actually anything with low bit set goes */
+
+/* for posix fcntl() and lockf() */
+#define F_RDLCK		0
+#define F_WRLCK		1
+#define F_UNLCK		2
+
+/* for old implementation of bsd flock () */
+#define F_EXLCK		4	/* or 3 */
+#define F_SHLCK		8	/* or 4 */
+
+/* for leases */
+#define F_INPROGRESS	16
+
+/* operations for bsd flock(), also used by the kernel implementation */
+#define LOCK_SH		1	/* shared lock */
+#define LOCK_EX		2	/* exclusive lock */
+#define LOCK_NB		4	/* or'd with one of the above to prevent
+				   blocking */
+#define LOCK_UN		8	/* remove lock */
+
+#define LOCK_MAND	32	/* This is a mandatory flock */
+#define LOCK_READ	64	/* ... Which allows concurrent read operations */
+#define LOCK_WRITE	128	/* ... Which allows concurrent write operations */
+#define LOCK_RW		192	/* ... Which allows concurrent read & write ops */
+
+struct flock {
+	short l_type;
+	short l_whence;
+	off_t l_start;
+	off_t l_len;
+	pid_t l_pid;
+};
+
+struct flock64 {
+	short  l_type;
+	short  l_whence;
+	loff_t l_start;
+	loff_t l_len;
+	pid_t  l_pid;
+};
+
+#define F_LINUX_SPECIFIC_BASE	1024
+
+#endif  /* _ASM_M32R_FCNTL_H */
diff --git a/include/asm-m32r/flat.h b/include/asm-m32r/flat.h
new file mode 100644
index 0000000..1b285f6
--- /dev/null
+++ b/include/asm-m32r/flat.h
@@ -0,0 +1,145 @@
+/*
+ * include/asm-m32r/flat.h
+ *
+ * uClinux flat-format executables
+ *
+ * Copyright (C) 2004  Kazuhiro Inaoka
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive for
+ * more details.
+ */
+#ifndef __ASM_M32R_FLAT_H
+#define __ASM_M32R_FLAT_H
+
+#define	flat_stack_align(sp)		(*sp += (*sp & 3 ? (4 - (*sp & 3)): 0))
+#define	flat_argvp_envp_on_stack()		0
+#define	flat_old_ram_flag(flags)		(flags)
+#define	flat_reloc_valid(reloc, size)		\
+	(((reloc) - textlen_for_m32r_lo16_data) <= (size))
+#define flat_get_addr_from_rp(rp, relval, flags) \
+	m32r_flat_get_addr_from_rp(rp, relval, (text_len) )
+
+#define flat_put_addr_at_rp(rp, addr, relval) \
+	m32r_flat_put_addr_at_rp(rp, addr, relval)
+
+/* Convert a relocation entry into an address.  */
+static inline unsigned long
+flat_get_relocate_addr (unsigned long relval)
+{
+        return relval & 0x00ffffff; /* Mask out top 8-bits */
+}
+
+#define	flat_m32r_get_reloc_type(relval)	((relval) >> 24)
+
+#define M32R_SETH_OPCODE	0xd0c00000 /* SETH instruction code */
+
+#define FLAT_M32R_32		0x00	/* 32bits reloc */
+#define FLAT_M32R_24		0x01	/* unsigned 24bits reloc */
+#define FLAT_M32R_16		0x02	/* 16bits reloc */
+#define FLAT_M32R_LO16		0x03	/* signed low 16bits reloc (low()) */
+#define FLAT_M32R_LO16_DATA	0x04	/* signed low 16bits reloc (low())
+					   for a symbol in .data section */
+					/* High 16bits of an address used
+					   when the lower 16bbits are treated
+					   as unsigned.
+                                           To create SETH instruction only.
+					   0x1X: X means a number of register.
+					   0x10 - 0x3F are reserved. */
+#define FLAT_M32R_HI16_ULO	0x10	/* reloc for SETH Rn,#high(imm16) */
+					/* High 16bits of an address used
+					   when the lower 16bbits are treated
+					   as signed.
+                                           To create SETH instruction only.
+					   0x2X: X means a number of register.
+					   0x20 - 0x4F are reserved. */
+#define FLAT_M32R_HI16_SLO	0x20	/* reloc for SETH Rn,#shigh(imm16) */
+
+static unsigned long textlen_for_m32r_lo16_data = 0;
+
+static inline unsigned long m32r_flat_get_addr_from_rp (unsigned long *rp,
+                                                        unsigned long relval,
+						        unsigned long textlen)
+{
+        unsigned int reloc = flat_m32r_get_reloc_type (relval);
+	textlen_for_m32r_lo16_data = 0;
+	if (reloc & 0xf0) {
+		unsigned long addr = htonl(*rp);
+		switch (reloc & 0xf0)
+		{
+		case FLAT_M32R_HI16_ULO:
+		case FLAT_M32R_HI16_SLO:
+			if (addr == 0) {
+				/* put "seth Rn,#0x0" instead of 0 (addr). */
+				*rp = (M32R_SETH_OPCODE | ((reloc & 0x0f)<<24));
+			}
+			return addr;
+		default:
+			break;
+		}
+	} else {
+		switch (reloc)
+		{
+		case FLAT_M32R_LO16:
+			return htonl(*rp) & 0xFFFF;
+		case FLAT_M32R_LO16_DATA:
+                        /* FIXME: The return value will decrease by textlen
+			   at m32r_flat_put_addr_at_rp () */
+			textlen_for_m32r_lo16_data = textlen;
+			return (htonl(*rp) & 0xFFFF) + textlen;
+		case FLAT_M32R_16:
+			return htons(*(unsigned short *)rp) & 0xFFFF;
+		case FLAT_M32R_24:
+			return htonl(*rp) & 0xFFFFFF;
+		case FLAT_M32R_32:
+			return htonl(*rp);
+		default:
+			break;
+		}
+	}
+	return ~0;      /* bogus value */
+}
+
+static inline void m32r_flat_put_addr_at_rp (unsigned long *rp,
+					     unsigned long addr,
+                                             unsigned long relval)
+{
+        unsigned int reloc = flat_m32r_get_reloc_type (relval);
+	if (reloc & 0xf0) {
+		unsigned long Rn = reloc & 0x0f; /* get a number of register */
+		Rn <<= 24; /* 0x0R000000 */
+		reloc &= 0xf0;
+		switch (reloc)
+		{
+		case FLAT_M32R_HI16_ULO: /* To create SETH Rn,#high(imm16) */
+			*rp = (M32R_SETH_OPCODE | Rn
+			       | ((addr >> 16) & 0xFFFF));
+			break;
+		case FLAT_M32R_HI16_SLO: /* To create SETH Rn,#shigh(imm16) */
+			*rp = (M32R_SETH_OPCODE | Rn
+			       | (((addr >> 16) + ((addr & 0x8000) ? 1 : 0))
+				  & 0xFFFF));
+			break;
+		}
+	} else {
+		switch (reloc) {
+		case FLAT_M32R_LO16_DATA:
+			addr -= textlen_for_m32r_lo16_data;
+			textlen_for_m32r_lo16_data = 0;
+		case FLAT_M32R_LO16:
+			*rp = (htonl(*rp) & 0xFFFF0000) | (addr & 0xFFFF);
+			break;
+		case FLAT_M32R_16:
+			*(unsigned short *)rp = addr & 0xFFFF;
+			break;
+		case FLAT_M32R_24:
+			*rp = (htonl(*rp) & 0xFF000000) | (addr & 0xFFFFFF);
+			break;
+		case FLAT_M32R_32:
+			*rp = addr;
+			break;
+		}
+	}
+}
+
+#endif /* __ASM_M32R_FLAT_H */
diff --git a/include/asm-m32r/hardirq.h b/include/asm-m32r/hardirq.h
new file mode 100644
index 0000000..5da830e
--- /dev/null
+++ b/include/asm-m32r/hardirq.h
@@ -0,0 +1,37 @@
+#ifdef __KERNEL__
+#ifndef __ASM_HARDIRQ_H
+#define __ASM_HARDIRQ_H
+
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+typedef struct {
+	unsigned int __softirq_pending;
+} ____cacheline_aligned irq_cpustat_t;
+
+#include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */
+
+#if NR_IRQS > 256
+#define HARDIRQ_BITS	9
+#else
+#define HARDIRQ_BITS	8
+#endif
+
+/*
+ * The hardirq mask has to be large enough to have
+ * space for potentially all IRQ sources in the system
+ * nesting on a single CPU:
+ */
+#if (1 << HARDIRQ_BITS) < NR_IRQS
+# error HARDIRQ_BITS is too low!
+#endif
+
+static inline void ack_bad_irq(int irq)
+{
+	printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
+	BUG();
+}
+
+#endif /* __ASM_HARDIRQ_H */
+#endif /* __KERNEL__ */
diff --git a/include/asm-m32r/hdreg.h b/include/asm-m32r/hdreg.h
new file mode 100644
index 0000000..7f7fd1a
--- /dev/null
+++ b/include/asm-m32r/hdreg.h
@@ -0,0 +1 @@
+#include <asm-generic/hdreg.h>
diff --git a/include/asm-m32r/hw_irq.h b/include/asm-m32r/hw_irq.h
new file mode 100644
index 0000000..8d7e9d0
--- /dev/null
+++ b/include/asm-m32r/hw_irq.h
@@ -0,0 +1,9 @@
+#ifndef _ASM_M32R_HW_IRQ_H
+#define _ASM_M32R_HW_IRQ_H
+
+static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
+{
+	/* Nothing to do */
+}
+
+#endif /* _ASM_M32R_HW_IRQ_H */
diff --git a/include/asm-m32r/ide.h b/include/asm-m32r/ide.h
new file mode 100644
index 0000000..be64f24
--- /dev/null
+++ b/include/asm-m32r/ide.h
@@ -0,0 +1,82 @@
+#ifndef _ASM_M32R_IDE_H
+#define _ASM_M32R_IDE_H
+
+/* $Id$ */
+
+/*
+ *  linux/include/asm-m32r/ide.h
+ *
+ *  Copyright (C) 1994-1996  Linus Torvalds & authors
+ */
+
+/*
+ *  This file contains the i386 architecture specific IDE code.
+ */
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+
+#ifndef MAX_HWIFS
+# ifdef CONFIG_BLK_DEV_IDEPCI
+#define MAX_HWIFS	10
+# else
+#define MAX_HWIFS	2
+# endif
+#endif
+
+#if defined(CONFIG_PLAT_M32700UT)
+#include <asm/irq.h>
+#include <asm/m32700ut/m32700ut_pld.h>
+#endif
+
+#define IDE_ARCH_OBSOLETE_DEFAULTS
+
+static __inline__ int ide_default_irq(unsigned long base)
+{
+	switch (base) {
+#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_MAPPI2)
+		case 0x1f0: return PLD_IRQ_CFIREQ;
+		default:
+			return 0;
+#else
+		case 0x1f0: return 14;
+		case 0x170: return 15;
+		case 0x1e8: return 11;
+		case 0x168: return 10;
+		case 0x1e0: return 8;
+		case 0x160: return 12;
+		default:
+			return 0;
+#endif
+	}
+}
+
+static __inline__ unsigned long ide_default_io_base(int index)
+{
+	switch (index) {
+		case 0:	return 0x1f0;
+		case 1:	return 0x170;
+		case 2: return 0x1e8;
+		case 3: return 0x168;
+		case 4: return 0x1e0;
+		case 5: return 0x160;
+		default:
+			return 0;
+	}
+}
+
+#define IDE_ARCH_OBSOLETE_INIT
+#define ide_default_io_ctl(base)	((base) + 0x206) /* obsolete */
+
+#ifdef CONFIG_BLK_DEV_IDEPCI
+#define ide_init_default_irq(base)     (0)
+#else
+#define ide_init_default_irq(base)     ide_default_irq(base)
+#endif
+
+#include <asm-generic/ide_iops.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_M32R_IDE_H */
diff --git a/include/asm-m32r/io.h b/include/asm-m32r/io.h
new file mode 100644
index 0000000..8e9e481
--- /dev/null
+++ b/include/asm-m32r/io.h
@@ -0,0 +1,232 @@
+#ifndef _ASM_M32R_IO_H
+#define _ASM_M32R_IO_H
+
+#include <linux/string.h>
+#include <linux/compiler.h>
+#include <asm/page.h>  /* __va */
+
+#ifdef __KERNEL__
+
+#define IO_SPACE_LIMIT  0xFFFFFFFF
+
+/**
+ *	virt_to_phys	-	map virtual addresses to physical
+ *	@address: address to remap
+ *
+ *	The returned physical address is the physical (CPU) mapping for
+ *	the memory address given. It is only valid to use this function on
+ *	addresses directly mapped or allocated via kmalloc.
+ *
+ *	This function does not give bus mappings for DMA transfers. In
+ *	almost all conceivable cases a device driver should not be using
+ *	this function
+ */
+
+static inline unsigned long virt_to_phys(volatile void * address)
+{
+	return __pa(address);
+}
+
+/**
+ *	phys_to_virt	-	map physical address to virtual
+ *	@address: address to remap
+ *
+ *	The returned virtual address is a current CPU mapping for
+ *	the memory address given. It is only valid to use this function on
+ *	addresses that have a kernel mapping
+ *
+ *	This function does not handle bus mappings for DMA transfers. In
+ *	almost all conceivable cases a device driver should not be using
+ *	this function
+ */
+
+static inline void *phys_to_virt(unsigned long address)
+{
+	return __va(address);
+}
+
+extern void __iomem *
+__ioremap(unsigned long offset, unsigned long size, unsigned long flags);
+
+/**
+ *	ioremap		-	map bus memory into CPU space
+ *	@offset:	bus address of the memory
+ *	@size:		size of the resource to map
+ *
+ *	ioremap performs a platform specific sequence of operations to
+ *	make bus memory CPU accessible via the readb/readw/readl/writeb/
+ *	writew/writel functions and the other mmio helpers. The returned
+ *	address is not guaranteed to be usable directly as a virtual
+ *	address.
+ */
+
+static inline void * ioremap(unsigned long offset, unsigned long size)
+{
+	return __ioremap(offset, size, 0);
+}
+
+extern void iounmap(volatile void __iomem *addr);
+#define ioremap_nocache(off,size) ioremap(off,size)
+
+/*
+ * IO bus memory addresses are also 1:1 with the physical address
+ */
+#define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
+#define page_to_bus	page_to_phys
+#define virt_to_bus	virt_to_phys
+
+extern unsigned char _inb(unsigned long);
+extern unsigned short _inw(unsigned long);
+extern unsigned long _inl(unsigned long);
+extern unsigned char _inb_p(unsigned long);
+extern unsigned short _inw_p(unsigned long);
+extern unsigned long _inl_p(unsigned long);
+extern void _outb(unsigned char, unsigned long);
+extern void _outw(unsigned short, unsigned long);
+extern void _outl(unsigned long, unsigned long);
+extern void _outb_p(unsigned char, unsigned long);
+extern void _outw_p(unsigned short, unsigned long);
+extern void _outl_p(unsigned long, unsigned long);
+extern void _insb(unsigned int, void *, unsigned long);
+extern void _insw(unsigned int, void *, unsigned long);
+extern void _insl(unsigned int, void *, unsigned long);
+extern void _outsb(unsigned int, const void *, unsigned long);
+extern void _outsw(unsigned int, const void *, unsigned long);
+extern void _outsl(unsigned int, const void *, unsigned long);
+
+static inline unsigned char _readb(unsigned long addr)
+{
+	return *(volatile unsigned char __force *)addr;
+}
+
+static inline unsigned short _readw(unsigned long addr)
+{
+	return *(volatile unsigned short __force *)addr;
+}
+
+static inline unsigned long _readl(unsigned long addr)
+{
+	return *(volatile unsigned long __force *)addr;
+}
+
+static inline void _writeb(unsigned char b, unsigned long addr)
+{
+	*(volatile unsigned char __force *)addr = b;
+}
+
+static inline void _writew(unsigned short w, unsigned long addr)
+{
+	*(volatile unsigned short __force *)addr = w;
+}
+
+static inline void _writel(unsigned long l, unsigned long addr)
+{
+	*(volatile unsigned long __force *)addr = l;
+}
+
+#define inb     _inb
+#define inw     _inw
+#define inl     _inl
+#define outb    _outb
+#define outw    _outw
+#define outl    _outl
+
+#define inb_p   _inb_p
+#define inw_p   _inw_p
+#define inl_p   _inl_p
+#define outb_p  _outb_p
+#define outw_p  _outw_p
+#define outl_p  _outl_p
+
+#define insb    _insb
+#define insw    _insw
+#define insl    _insl
+#define outsb   _outsb
+#define outsw   _outsw
+#define outsl   _outsl
+
+#define readb(addr)   _readb((unsigned long)(addr))
+#define readw(addr)   _readw((unsigned long)(addr))
+#define readl(addr)   _readl((unsigned long)(addr))
+#define __raw_readb readb
+#define __raw_readw readw
+#define __raw_readl readl
+#define readb_relaxed readb
+#define readw_relaxed readw
+#define readl_relaxed readl
+
+#define writeb(val, addr)  _writeb((val), (unsigned long)(addr))
+#define writew(val, addr)  _writew((val), (unsigned long)(addr))
+#define writel(val, addr)  _writel((val), (unsigned long)(addr))
+#define __raw_writeb writeb
+#define __raw_writew writew
+#define __raw_writel writel
+
+#define mmiowb()
+
+#define flush_write_buffers() do { } while (0)  /* M32R_FIXME */
+
+/**
+ *	check_signature		-	find BIOS signatures
+ *	@io_addr: mmio address to check
+ *	@signature:  signature block
+ *	@length: length of signature
+ *
+ *	Perform a signature comparison with the ISA mmio address io_addr.
+ *	Returns 1 on a match.
+ *
+ *	This function is deprecated. New drivers should use ioremap and
+ *	check_signature.
+ */
+
+static inline int check_signature(void __iomem *io_addr,
+        const unsigned char *signature, int length)
+{
+        int retval = 0;
+#if 0
+printk("check_signature\n");
+        do {
+                if (readb(io_addr) != *signature)
+                        goto out;
+                io_addr++;
+                signature++;
+                length--;
+        } while (length);
+        retval = 1;
+out:
+#endif
+        return retval;
+}
+
+static inline void
+memset_io(volatile void __iomem *addr, unsigned char val, int count)
+{
+	memset((void __force *) addr, val, count);
+}
+
+static inline void
+memcpy_fromio(void *dst, volatile void __iomem *src, int count)
+{
+	memcpy(dst, (void __force *) src, count);
+}
+
+static inline void
+memcpy_toio(volatile void __iomem *dst, const void *src, int count)
+{
+	memcpy((void __force *) dst, src, count);
+}
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p)	__va(p)
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p)	p
+
+#endif  /* __KERNEL__ */
+
+#endif  /* _ASM_M32R_IO_H */
diff --git a/include/asm-m32r/ioctl.h b/include/asm-m32r/ioctl.h
new file mode 100644
index 0000000..87d8f7d
--- /dev/null
+++ b/include/asm-m32r/ioctl.h
@@ -0,0 +1,78 @@
+#ifndef _ASM_M32R_IOCTL_H
+#define _ASM_M32R_IOCTL_H
+
+/* $Id$ */
+
+/* orig : i386 2.4.18 */
+
+/*
+ * linux/ioctl.h for Linux by H.H. Bergman.
+ */
+
+/* ioctl command encoding: 32 bits total, command in lower 16 bits,
+ * size of the parameter structure in the lower 14 bits of the
+ * upper 16 bits.
+ * Encoding the size of the parameter structure in the ioctl request
+ * is useful for catching programs compiled with old versions
+ * and to avoid overwriting user space outside the user buffer area.
+ * The highest 2 bits are reserved for indicating the ``access mode''.
+ * NOTE: This limits the max parameter size to 16kB -1 !
+ */
+
+/*
+ * The following is for compatibility across the various Linux
+ * platforms.  The i386 ioctl numbering scheme doesn't really enforce
+ * a type field.  De facto, however, the top 8 bits of the lower 16
+ * bits are indeed used as a type field, so we might just as well make
+ * this explicit here.  Please be sure to use the decoding macros
+ * below from now on.
+ */
+#define _IOC_NRBITS	8
+#define _IOC_TYPEBITS	8
+#define _IOC_SIZEBITS	14
+#define _IOC_DIRBITS	2
+
+#define _IOC_NRMASK	((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK	((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK	((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK	((1 << _IOC_DIRBITS)-1)
+
+#define _IOC_NRSHIFT	0
+#define _IOC_TYPESHIFT	(_IOC_NRSHIFT+_IOC_NRBITS)
+#define _IOC_SIZESHIFT	(_IOC_TYPESHIFT+_IOC_TYPEBITS)
+#define _IOC_DIRSHIFT	(_IOC_SIZESHIFT+_IOC_SIZEBITS)
+
+/*
+ * Direction bits.
+ */
+#define _IOC_NONE	0U
+#define _IOC_WRITE	1U
+#define _IOC_READ	2U
+
+#define _IOC(dir,type,nr,size) \
+	(((dir)  << _IOC_DIRSHIFT) | \
+	 ((type) << _IOC_TYPESHIFT) | \
+	 ((nr)   << _IOC_NRSHIFT) | \
+	 ((size) << _IOC_SIZESHIFT))
+
+/* used to create numbers */
+#define _IO(type,nr)		_IOC(_IOC_NONE,(type),(nr),0)
+#define _IOR(type,nr,size)	_IOC(_IOC_READ,(type),(nr),sizeof(size))
+#define _IOW(type,nr,size)	_IOC(_IOC_WRITE,(type),(nr),sizeof(size))
+#define _IOWR(type,nr,size)	_IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
+
+/* used to decode ioctl numbers.. */
+#define _IOC_DIR(nr)		(((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
+#define _IOC_TYPE(nr)		(((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
+#define _IOC_NR(nr)		(((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+#define _IOC_SIZE(nr)		(((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+/* ...and for the drivers/sound files... */
+
+#define IOC_IN		(_IOC_WRITE << _IOC_DIRSHIFT)
+#define IOC_OUT		(_IOC_READ << _IOC_DIRSHIFT)
+#define IOC_INOUT	((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
+#define IOCSIZE_MASK	(_IOC_SIZEMASK << _IOC_SIZESHIFT)
+#define IOCSIZE_SHIFT	(_IOC_SIZESHIFT)
+
+#endif /* _ASM_M32R_IOCTL_H */
diff --git a/include/asm-m32r/ioctls.h b/include/asm-m32r/ioctls.h
new file mode 100644
index 0000000..b350829
--- /dev/null
+++ b/include/asm-m32r/ioctls.h
@@ -0,0 +1,88 @@
+#ifndef __ARCH_M32R_IOCTLS_H__
+#define __ARCH_M32R_IOCTLS_H__
+
+/* $Id$ */
+
+/* orig : i386 2.5.67 */
+
+#include <asm/ioctl.h>
+
+/* 0x54 is just a magic number to make these relatively unique ('T') */
+
+#define TCGETS		0x5401
+#define TCSETS		0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
+#define TCSETSW		0x5403
+#define TCSETSF		0x5404
+#define TCGETA		0x5405
+#define TCSETA		0x5406
+#define TCSETAW		0x5407
+#define TCSETAF		0x5408
+#define TCSBRK		0x5409
+#define TCXONC		0x540A
+#define TCFLSH		0x540B
+#define TIOCEXCL	0x540C
+#define TIOCNXCL	0x540D
+#define TIOCSCTTY	0x540E
+#define TIOCGPGRP	0x540F
+#define TIOCSPGRP	0x5410
+#define TIOCOUTQ	0x5411
+#define TIOCSTI		0x5412
+#define TIOCGWINSZ	0x5413
+#define TIOCSWINSZ	0x5414
+#define TIOCMGET	0x5415
+#define TIOCMBIS	0x5416
+#define TIOCMBIC	0x5417
+#define TIOCMSET	0x5418
+#define TIOCGSOFTCAR	0x5419
+#define TIOCSSOFTCAR	0x541A
+#define FIONREAD	0x541B
+#define TIOCINQ		FIONREAD
+#define TIOCLINUX	0x541C
+#define TIOCCONS	0x541D
+#define TIOCGSERIAL	0x541E
+#define TIOCSSERIAL	0x541F
+#define TIOCPKT		0x5420
+#define FIONBIO		0x5421
+#define TIOCNOTTY	0x5422
+#define TIOCSETD	0x5423
+#define TIOCGETD	0x5424
+#define TCSBRKP		0x5425	/* Needed for POSIX tcsendbreak() */
+/* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */
+#define TIOCSBRK	0x5427  /* BSD compatibility */
+#define TIOCCBRK	0x5428  /* BSD compatibility */
+#define TIOCGSID	0x5429  /* Return the session ID of FD */
+#define TIOCGPTN	_IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK	_IOW('T',0x31, int)  /* Lock/unlock Pty */
+
+#define FIONCLEX	0x5450
+#define FIOCLEX		0x5451
+#define FIOASYNC	0x5452
+#define TIOCSERCONFIG	0x5453
+#define TIOCSERGWILD	0x5454
+#define TIOCSERSWILD	0x5455
+#define TIOCGLCKTRMIOS	0x5456
+#define TIOCSLCKTRMIOS	0x5457
+#define TIOCSERGSTRUCT	0x5458 /* For debugging only */
+#define TIOCSERGETLSR   0x5459 /* Get line status register */
+#define TIOCSERGETMULTI 0x545A /* Get multiport config  */
+#define TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TIOCMIWAIT	0x545C	/* wait for a change on serial input line(s) */
+#define TIOCGICOUNT	0x545D	/* read serial port inline interrupt counts */
+#define TIOCGHAYESESP   0x545E  /* Get Hayes ESP configuration */
+#define TIOCSHAYESESP   0x545F  /* Set Hayes ESP configuration */
+#define FIOQSIZE	0x5460
+
+/* Used for packet mode */
+#define TIOCPKT_DATA		 0
+#define TIOCPKT_FLUSHREAD	 1
+#define TIOCPKT_FLUSHWRITE	 2
+#define TIOCPKT_STOP		 4
+#define TIOCPKT_START		 8
+#define TIOCPKT_NOSTOP		16
+#define TIOCPKT_DOSTOP		32
+
+#define TIOCSER_TEMT    0x01	/* Transmitter physically empty */
+
+#endif /* __ARCH_M32R_IOCTLS_H__ */
+
diff --git a/include/asm-m32r/ipc.h b/include/asm-m32r/ipc.h
new file mode 100644
index 0000000..a46e3d9
--- /dev/null
+++ b/include/asm-m32r/ipc.h
@@ -0,0 +1 @@
+#include <asm-generic/ipc.h>
diff --git a/include/asm-m32r/ipcbuf.h b/include/asm-m32r/ipcbuf.h
new file mode 100644
index 0000000..7c77fb0
--- /dev/null
+++ b/include/asm-m32r/ipcbuf.h
@@ -0,0 +1,33 @@
+#ifndef _ASM_M32R_IPCBUF_H
+#define _ASM_M32R_IPCBUF_H
+
+/* $Id$ */
+
+/* orig : i386 2.4.18 */
+
+/*
+ * The ipc64_perm structure for m32r architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 32-bit mode_t and seq
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct ipc64_perm
+{
+	__kernel_key_t		key;
+	__kernel_uid32_t	uid;
+	__kernel_gid32_t	gid;
+	__kernel_uid32_t	cuid;
+	__kernel_gid32_t	cgid;
+	__kernel_mode_t		mode;
+	unsigned short		__pad1;
+	unsigned short		seq;
+	unsigned short		__pad2;
+	unsigned long		__unused1;
+	unsigned long		__unused2;
+};
+
+#endif /* _ASM_M32R_IPCBUF_H */
diff --git a/include/asm-m32r/irq.h b/include/asm-m32r/irq.h
new file mode 100644
index 0000000..8ed7796
--- /dev/null
+++ b/include/asm-m32r/irq.h
@@ -0,0 +1,75 @@
+#ifdef __KERNEL__
+#ifndef _ASM_M32R_IRQ_H
+#define _ASM_M32R_IRQ_H
+
+#include <linux/config.h>
+
+#if defined(CONFIG_PLAT_M32700UT_Alpha) || defined(CONFIG_PLAT_USRV)
+/*
+ * IRQ definitions for M32700UT
+ *  M32700 Chip: 64 interrupts
+ *  ICU of M32700UT-on-board PLD: 32 interrupts cascaded to INT1# chip pin
+ */
+#define	M32700UT_NUM_CPU_IRQ	(64)
+#define M32700UT_NUM_PLD_IRQ	(32)
+#define M32700UT_IRQ_BASE	0
+#define M32700UT_CPU_IRQ_BASE	M32700UT_IRQ_BASE
+#define M32700UT_PLD_IRQ_BASE	(M32700UT_CPU_IRQ_BASE + M32700UT_NUM_CPU_IRQ)
+
+#define NR_IRQS	(M32700UT_NUM_CPU_IRQ + M32700UT_NUM_PLD_IRQ)
+#elif defined(CONFIG_PLAT_M32700UT)
+/*
+ * IRQ definitions for M32700UT(Rev.C) + M32R-LAN
+ *  M32700 Chip: 64 interrupts
+ *  ICU of M32700UT-on-board PLD: 32 interrupts cascaded to INT1# chip pin
+ *  ICU of M32R-LCD-on-board PLD: 32 interrupts cascaded to INT2# chip pin
+ *  ICU of M32R-LAN-on-board PLD: 32 interrupts cascaded to INT0# chip pin
+ */
+#define M32700UT_NUM_CPU_IRQ		(64)
+#define M32700UT_NUM_PLD_IRQ		(32)
+#define M32700UT_NUM_LCD_PLD_IRQ	(32)
+#define M32700UT_NUM_LAN_PLD_IRQ	(32)
+#define M32700UT_IRQ_BASE		0
+#define M32700UT_CPU_IRQ_BASE		(M32700UT_IRQ_BASE)
+#define M32700UT_PLD_IRQ_BASE \
+	(M32700UT_CPU_IRQ_BASE + M32700UT_NUM_CPU_IRQ)
+#define M32700UT_LCD_PLD_IRQ_BASE \
+	(M32700UT_PLD_IRQ_BASE + M32700UT_NUM_PLD_IRQ)
+#define M32700UT_LAN_PLD_IRQ_BASE \
+	(M32700UT_LCD_PLD_IRQ_BASE + M32700UT_NUM_LCD_PLD_IRQ)
+
+#define NR_IRQS \
+	(M32700UT_NUM_CPU_IRQ + M32700UT_NUM_PLD_IRQ \
+	+ M32700UT_NUM_LCD_PLD_IRQ + M32700UT_NUM_LAN_PLD_IRQ)
+#elif defined(CONFIG_PLAT_OPSPUT)
+/*
+ * IRQ definitions for OPSPUT + M32R-LAN
+ *  OPSP Chip: 64 interrupts
+ *  ICU of OPSPUT-on-board PLD: 32 interrupts cascaded to INT1# chip pin
+ *  ICU of M32R-LCD-on-board PLD: 32 interrupts cascaded to INT2# chip pin
+ *  ICU of M32R-LAN-on-board PLD: 32 interrupts cascaded to INT0# chip pin
+ */
+#define OPSPUT_NUM_CPU_IRQ		(64)
+#define OPSPUT_NUM_PLD_IRQ		(32)
+#define OPSPUT_NUM_LCD_PLD_IRQ	(32)
+#define OPSPUT_NUM_LAN_PLD_IRQ	(32)
+#define OPSPUT_IRQ_BASE		0
+#define OPSPUT_CPU_IRQ_BASE		(OPSPUT_IRQ_BASE)
+#define OPSPUT_PLD_IRQ_BASE \
+	(OPSPUT_CPU_IRQ_BASE + OPSPUT_NUM_CPU_IRQ)
+#define OPSPUT_LCD_PLD_IRQ_BASE \
+	(OPSPUT_PLD_IRQ_BASE + OPSPUT_NUM_PLD_IRQ)
+#define OPSPUT_LAN_PLD_IRQ_BASE \
+	(OPSPUT_LCD_PLD_IRQ_BASE + OPSPUT_NUM_LCD_PLD_IRQ)
+
+#define NR_IRQS \
+	(OPSPUT_NUM_CPU_IRQ + OPSPUT_NUM_PLD_IRQ \
+	+ OPSPUT_NUM_LCD_PLD_IRQ + OPSPUT_NUM_LAN_PLD_IRQ)
+#else
+#define NR_IRQS	64
+#endif
+
+#define irq_canonicalize(irq)	(irq)
+
+#endif /* _ASM_M32R_IRQ_H */
+#endif /* __KERNEL__ */
diff --git a/include/asm-m32r/kmap_types.h b/include/asm-m32r/kmap_types.h
new file mode 100644
index 0000000..7429591
--- /dev/null
+++ b/include/asm-m32r/kmap_types.h
@@ -0,0 +1,34 @@
+#ifndef __M32R_KMAP_TYPES_H
+#define __M32R_KMAP_TYPES_H
+
+/* Dummy header just to define km_type. */
+
+#include <linux/config.h>
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+# define D(n) __KM_FENCE_##n ,
+#else
+# define D(n)
+#endif
+
+enum km_type {
+D(0)	KM_BOUNCE_READ,
+D(1)	KM_SKB_SUNRPC_DATA,
+D(2)	KM_SKB_DATA_SOFTIRQ,
+D(3)	KM_USER0,
+D(4)	KM_USER1,
+D(5)	KM_BIO_SRC_IRQ,
+D(6)	KM_BIO_DST_IRQ,
+D(7)	KM_PTE0,
+D(8)	KM_PTE1,
+D(9)	KM_IRQ0,
+D(10)	KM_IRQ1,
+D(11)	KM_SOFTIRQ0,
+D(12)	KM_SOFTIRQ1,
+D(13)	KM_TYPE_NR
+};
+
+#undef D
+
+#endif /* __M32R_KMAP_TYPES_H */
+
diff --git a/include/asm-m32r/linkage.h b/include/asm-m32r/linkage.h
new file mode 100644
index 0000000..a9fb151
--- /dev/null
+++ b/include/asm-m32r/linkage.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define __ALIGN		.balign 4
+#define __ALIGN_STR	".balign 4"
+
+#endif /* __ASM_LINKAGE_H */
diff --git a/include/asm-m32r/local.h b/include/asm-m32r/local.h
new file mode 100644
index 0000000..def29d0
--- /dev/null
+++ b/include/asm-m32r/local.h
@@ -0,0 +1,6 @@
+#ifndef __M32R_LOCAL_H
+#define __M32R_LOCAL_H
+
+#include <asm-generic/local.h>
+
+#endif /* __M32R_LOCAL_H */
diff --git a/include/asm-m32r/m32102.h b/include/asm-m32r/m32102.h
new file mode 100644
index 0000000..b560340
--- /dev/null
+++ b/include/asm-m32r/m32102.h
@@ -0,0 +1,294 @@
+#ifndef _M32102_H_
+#define _M32102_H_
+
+/*
+ * Renesas M32R 32102 group
+ *
+ * Copyright (c) 2001  Hitoshi Yamamoto
+ * Copyright (c) 2003, 2004  Renesas Technology Corp.
+ */
+
+/*======================================================================*
+ * Special Function Register
+ *======================================================================*/
+#define M32R_SFR_OFFSET  (0x00E00000)  /* 0x00E00000-0x00EFFFFF 1[MB] */
+
+/*
+ * Clock and Power Management registers.
+ */
+#define M32R_CPM_OFFSET          (0x000F4000+M32R_SFR_OFFSET)
+
+#define M32R_CPM_CPUCLKCR_PORTL  (0x00+M32R_CPM_OFFSET)
+#define M32R_CPM_CLKMOD_PORTL    (0x04+M32R_CPM_OFFSET)
+#define M32R_CPM_PLLCR_PORTL     (0x08+M32R_CPM_OFFSET)
+
+/*
+ * DMA Controller registers.
+ */
+#define M32R_DMA_OFFSET		(0x000F8000+M32R_SFR_OFFSET)
+
+#define M32R_DMAEN_PORTL	(0x000+M32R_DMA_OFFSET)
+#define M32R_DMAISTS_PORTL	(0x004+M32R_DMA_OFFSET)
+#define M32R_DMAEDET_PORTL	(0x008+M32R_DMA_OFFSET)
+#define M32R_DMAASTS_PORTL	(0x00c+M32R_DMA_OFFSET)
+
+#define M32R_DMA0CR0_PORTL	(0x100+M32R_DMA_OFFSET)
+#define M32R_DMA0CR1_PORTL	(0x104+M32R_DMA_OFFSET)
+#define M32R_DMA0CSA_PORTL	(0x108+M32R_DMA_OFFSET)
+#define M32R_DMA0RSA_PORTL	(0x10c+M32R_DMA_OFFSET)
+#define M32R_DMA0CDA_PORTL	(0x110+M32R_DMA_OFFSET)
+#define M32R_DMA0RDA_PORTL	(0x114+M32R_DMA_OFFSET)
+#define M32R_DMA0CBCUT_PORTL	(0x118+M32R_DMA_OFFSET)
+#define M32R_DMA0RBCUT_PORTL	(0x11c+M32R_DMA_OFFSET)
+
+#define M32R_DMA1CR0_PORTL	(0x200+M32R_DMA_OFFSET)
+#define M32R_DMA1CR1_PORTL	(0x204+M32R_DMA_OFFSET)
+#define M32R_DMA1CSA_PORTL	(0x208+M32R_DMA_OFFSET)
+#define M32R_DMA1RSA_PORTL	(0x20c+M32R_DMA_OFFSET)
+#define M32R_DMA1CDA_PORTL	(0x210+M32R_DMA_OFFSET)
+#define M32R_DMA1RDA_PORTL	(0x214+M32R_DMA_OFFSET)
+#define M32R_DMA1CBCUT_PORTL	(0x218+M32R_DMA_OFFSET)
+#define M32R_DMA1RBCUT_PORTL	(0x21c+M32R_DMA_OFFSET)
+
+/*
+ * Multi Function Timer registers.
+ */
+#define M32R_MFT_OFFSET        (0x000FC000+M32R_SFR_OFFSET)
+
+#define M32R_MFTCR_PORTL       (0x000+M32R_MFT_OFFSET)  /* MFT control */
+#define M32R_MFTRPR_PORTL      (0x004+M32R_MFT_OFFSET)  /* MFT real port */
+
+#define M32R_MFT0_OFFSET       (0x100+M32R_MFT_OFFSET)
+#define M32R_MFT0MOD_PORTL     (0x00+M32R_MFT0_OFFSET)  /* MFT0 mode */
+#define M32R_MFT0BOS_PORTL     (0x04+M32R_MFT0_OFFSET)  /* MFT0 b-port output status */
+#define M32R_MFT0CUT_PORTL     (0x08+M32R_MFT0_OFFSET)  /* MFT0 count */
+#define M32R_MFT0RLD_PORTL     (0x0C+M32R_MFT0_OFFSET)  /* MFT0 reload */
+#define M32R_MFT0CMPRLD_PORTL  (0x10+M32R_MFT0_OFFSET)  /* MFT0 compare reload */
+
+#define M32R_MFT1_OFFSET       (0x200+M32R_MFT_OFFSET)
+#define M32R_MFT1MOD_PORTL     (0x00+M32R_MFT1_OFFSET)  /* MFT1 mode */
+#define M32R_MFT1BOS_PORTL     (0x04+M32R_MFT1_OFFSET)  /* MFT1 b-port output status */
+#define M32R_MFT1CUT_PORTL     (0x08+M32R_MFT1_OFFSET)  /* MFT1 count */
+#define M32R_MFT1RLD_PORTL     (0x0C+M32R_MFT1_OFFSET)  /* MFT1 reload */
+#define M32R_MFT1CMPRLD_PORTL  (0x10+M32R_MFT1_OFFSET)  /* MFT1 compare reload */
+
+#define M32R_MFT2_OFFSET       (0x300+M32R_MFT_OFFSET)
+#define M32R_MFT2MOD_PORTL     (0x00+M32R_MFT2_OFFSET)  /* MFT2 mode */
+#define M32R_MFT2BOS_PORTL     (0x04+M32R_MFT2_OFFSET)  /* MFT2 b-port output status */
+#define M32R_MFT2CUT_PORTL     (0x08+M32R_MFT2_OFFSET)  /* MFT2 count */
+#define M32R_MFT2RLD_PORTL     (0x0C+M32R_MFT2_OFFSET)  /* MFT2 reload */
+#define M32R_MFT2CMPRLD_PORTL  (0x10+M32R_MFT2_OFFSET)  /* MFT2 compare reload */
+
+#define M32R_MFT3_OFFSET       (0x400+M32R_MFT_OFFSET)
+#define M32R_MFT3MOD_PORTL     (0x00+M32R_MFT3_OFFSET)  /* MFT3 mode */
+#define M32R_MFT3BOS_PORTL     (0x04+M32R_MFT3_OFFSET)  /* MFT3 b-port output status */
+#define M32R_MFT3CUT_PORTL     (0x08+M32R_MFT3_OFFSET)  /* MFT3 count */
+#define M32R_MFT3RLD_PORTL     (0x0C+M32R_MFT3_OFFSET)  /* MFT3 reload */
+#define M32R_MFT3CMPRLD_PORTL  (0x10+M32R_MFT3_OFFSET)  /* MFT3 compare reload */
+
+#define M32R_MFT4_OFFSET       (0x500+M32R_MFT_OFFSET)
+#define M32R_MFT4MOD_PORTL     (0x00+M32R_MFT4_OFFSET)  /* MFT4 mode */
+#define M32R_MFT4BOS_PORTL     (0x04+M32R_MFT4_OFFSET)  /* MFT4 b-port output status */
+#define M32R_MFT4CUT_PORTL     (0x08+M32R_MFT4_OFFSET)  /* MFT4 count */
+#define M32R_MFT4RLD_PORTL     (0x0C+M32R_MFT4_OFFSET)  /* MFT4 reload */
+#define M32R_MFT4CMPRLD_PORTL  (0x10+M32R_MFT4_OFFSET)  /* MFT4 compare reload */
+
+#define M32R_MFT5_OFFSET       (0x600+M32R_MFT_OFFSET)
+#define M32R_MFT5MOD_PORTL     (0x00+M32R_MFT5_OFFSET)  /* MFT4 mode */
+#define M32R_MFT5BOS_PORTL     (0x04+M32R_MFT5_OFFSET)  /* MFT4 b-port output status */
+#define M32R_MFT5CUT_PORTL     (0x08+M32R_MFT5_OFFSET)  /* MFT4 count */
+#define M32R_MFT5RLD_PORTL     (0x0C+M32R_MFT5_OFFSET)  /* MFT4 reload */
+#define M32R_MFT5CMPRLD_PORTL  (0x10+M32R_MFT5_OFFSET)  /* MFT4 compare reload */
+
+#ifdef CONFIG_CHIP_M32700
+#define M32R_MFTCR_MFT0MSK  (1UL<<31)  /* b0 */
+#define M32R_MFTCR_MFT1MSK  (1UL<<30)  /* b1 */
+#define M32R_MFTCR_MFT2MSK  (1UL<<29)  /* b2 */
+#define M32R_MFTCR_MFT3MSK  (1UL<<28)  /* b3 */
+#define M32R_MFTCR_MFT4MSK  (1UL<<27)  /* b4 */
+#define M32R_MFTCR_MFT5MSK  (1UL<<26)  /* b5 */
+#define M32R_MFTCR_MFT0EN   (1UL<<23)  /* b8 */
+#define M32R_MFTCR_MFT1EN   (1UL<<22)  /* b9 */
+#define M32R_MFTCR_MFT2EN   (1UL<<21)  /* b10 */
+#define M32R_MFTCR_MFT3EN   (1UL<<20)  /* b11 */
+#define M32R_MFTCR_MFT4EN   (1UL<<19)  /* b12 */
+#define M32R_MFTCR_MFT5EN   (1UL<<18)  /* b13 */
+#else	/* not CONFIG_CHIP_M32700 */
+#define M32R_MFTCR_MFT0MSK  (1UL<<15)  /* b16 */
+#define M32R_MFTCR_MFT1MSK  (1UL<<14)  /* b17 */
+#define M32R_MFTCR_MFT2MSK  (1UL<<13)  /* b18 */
+#define M32R_MFTCR_MFT3MSK  (1UL<<12)  /* b19 */
+#define M32R_MFTCR_MFT4MSK  (1UL<<11)  /* b20 */
+#define M32R_MFTCR_MFT5MSK  (1UL<<10)  /* b21 */
+#define M32R_MFTCR_MFT0EN   (1UL<<7)   /* b24 */
+#define M32R_MFTCR_MFT1EN   (1UL<<6)   /* b25 */
+#define M32R_MFTCR_MFT2EN   (1UL<<5)   /* b26 */
+#define M32R_MFTCR_MFT3EN   (1UL<<4)   /* b27 */
+#define M32R_MFTCR_MFT4EN   (1UL<<3)   /* b28 */
+#define M32R_MFTCR_MFT5EN   (1UL<<2)   /* b29 */
+#endif	/* not CONFIG_CHIP_M32700 */
+
+#define M32R_MFTMOD_CC_MASK    (1UL<<15)  /* b16 */
+#define M32R_MFTMOD_TCCR       (1UL<<13)  /* b18 */
+#define M32R_MFTMOD_GTSEL000   (0UL<<8)   /* b21-23 : 000 */
+#define M32R_MFTMOD_GTSEL001   (1UL<<8)   /* b21-23 : 001 */
+#define M32R_MFTMOD_GTSEL010   (2UL<<8)   /* b21-23 : 010 */
+#define M32R_MFTMOD_GTSEL011   (3UL<<8)   /* b21-23 : 011 */
+#define M32R_MFTMOD_GTSEL110   (6UL<<8)   /* b21-23 : 110 */
+#define M32R_MFTMOD_GTSEL111   (7UL<<8)   /* b21-23 : 111 */
+#define M32R_MFTMOD_CMSEL      (1UL<<3)   /* b28 */
+#define M32R_MFTMOD_CSSEL000   (0UL<<0)   /* b29-b31 : 000 */
+#define M32R_MFTMOD_CSSEL001   (1UL<<0)   /* b29-b31 : 001 */
+#define M32R_MFTMOD_CSSEL010   (2UL<<0)   /* b29-b31 : 010 */
+#define M32R_MFTMOD_CSSEL011   (3UL<<0)   /* b29-b31 : 011 */
+#define M32R_MFTMOD_CSSEL100   (4UL<<0)   /* b29-b31 : 100 */
+#define M32R_MFTMOD_CSSEL110   (6UL<<0)   /* b29-b31 : 110 */
+
+/*
+ * Serial I/O registers.
+ */
+#define M32R_SIO_OFFSET  (0x000FD000+M32R_SFR_OFFSET)
+
+#define M32R_SIO0_CR_PORTL    (0x000+M32R_SIO_OFFSET)
+#define M32R_SIO0_MOD0_PORTL  (0x004+M32R_SIO_OFFSET)
+#define M32R_SIO0_MOD1_PORTL  (0x008+M32R_SIO_OFFSET)
+#define M32R_SIO0_STS_PORTL   (0x00C+M32R_SIO_OFFSET)
+#define M32R_SIO0_TRCR_PORTL  (0x010+M32R_SIO_OFFSET)
+#define M32R_SIO0_BAUR_PORTL  (0x014+M32R_SIO_OFFSET)
+#define M32R_SIO0_RBAUR_PORTL (0x018+M32R_SIO_OFFSET)
+#define M32R_SIO0_TXB_PORTL   (0x01C+M32R_SIO_OFFSET)
+#define M32R_SIO0_RXB_PORTL   (0x020+M32R_SIO_OFFSET)
+
+/*
+ * Interrupt Control Unit registers.
+ */
+#define M32R_ICU_OFFSET       (0x000FF000+M32R_SFR_OFFSET)
+#define M32R_ICU_ISTS_PORTL   (0x004+M32R_ICU_OFFSET)
+#define M32R_ICU_IREQ0_PORTL  (0x008+M32R_ICU_OFFSET)
+#define M32R_ICU_IREQ1_PORTL  (0x00C+M32R_ICU_OFFSET)
+#define M32R_ICU_SBICR_PORTL  (0x018+M32R_ICU_OFFSET)
+#define M32R_ICU_IMASK_PORTL  (0x01C+M32R_ICU_OFFSET)
+#define M32R_ICU_CR1_PORTL    (0x200+M32R_ICU_OFFSET)  /* INT0 */
+#define M32R_ICU_CR2_PORTL    (0x204+M32R_ICU_OFFSET)  /* INT1 */
+#define M32R_ICU_CR3_PORTL    (0x208+M32R_ICU_OFFSET)  /* INT2 */
+#define M32R_ICU_CR4_PORTL    (0x20C+M32R_ICU_OFFSET)  /* INT3 */
+#define M32R_ICU_CR5_PORTL    (0x210+M32R_ICU_OFFSET)  /* INT4 */
+#define M32R_ICU_CR6_PORTL    (0x214+M32R_ICU_OFFSET)  /* INT5 */
+#define M32R_ICU_CR7_PORTL    (0x218+M32R_ICU_OFFSET)  /* INT6 */
+#define M32R_ICU_CR16_PORTL   (0x23C+M32R_ICU_OFFSET)  /* MFT0 */
+#define M32R_ICU_CR17_PORTL   (0x240+M32R_ICU_OFFSET)  /* MFT1 */
+#define M32R_ICU_CR18_PORTL   (0x244+M32R_ICU_OFFSET)  /* MFT2 */
+#define M32R_ICU_CR19_PORTL   (0x248+M32R_ICU_OFFSET)  /* MFT3 */
+#define M32R_ICU_CR20_PORTL   (0x24C+M32R_ICU_OFFSET)  /* MFT4 */
+#define M32R_ICU_CR21_PORTL   (0x250+M32R_ICU_OFFSET)  /* MFT5 */
+#define M32R_ICU_CR32_PORTL   (0x27C+M32R_ICU_OFFSET)  /* DMA0 */
+#define M32R_ICU_CR33_PORTL   (0x280+M32R_ICU_OFFSET)  /* DMA1 */
+#define M32R_ICU_CR48_PORTL   (0x2BC+M32R_ICU_OFFSET)  /* SIO0 */
+#define M32R_ICU_CR49_PORTL   (0x2C0+M32R_ICU_OFFSET)  /* SIO0 */
+#define M32R_ICU_CR50_PORTL   (0x2C4+M32R_ICU_OFFSET)  /* SIO1 */
+#define M32R_ICU_CR51_PORTL   (0x2C8+M32R_ICU_OFFSET)  /* SIO1 */
+#define M32R_ICU_CR52_PORTL   (0x2CC+M32R_ICU_OFFSET)  /* SIO2 */
+#define M32R_ICU_CR53_PORTL   (0x2D0+M32R_ICU_OFFSET)  /* SIO2 */
+#define M32R_ICU_CR54_PORTL   (0x2D4+M32R_ICU_OFFSET)  /* SIO3 */
+#define M32R_ICU_CR55_PORTL   (0x2D8+M32R_ICU_OFFSET)  /* SIO3 */
+#define M32R_ICU_CR56_PORTL   (0x2DC+M32R_ICU_OFFSET)  /* SIO4 */
+#define M32R_ICU_CR57_PORTL   (0x2E0+M32R_ICU_OFFSET)  /* SIO4 */
+
+#ifdef CONFIG_SMP
+#define M32R_ICU_IPICR0_PORTL (0x2dc+M32R_ICU_OFFSET)  /* IPI0 */
+#define M32R_ICU_IPICR1_PORTL (0x2e0+M32R_ICU_OFFSET)  /* IPI1 */
+#define M32R_ICU_IPICR2_PORTL (0x2e4+M32R_ICU_OFFSET)  /* IPI2 */
+#define M32R_ICU_IPICR3_PORTL (0x2e8+M32R_ICU_OFFSET)  /* IPI3 */
+#define M32R_ICU_IPICR4_PORTL (0x2ec+M32R_ICU_OFFSET)  /* IPI4 */
+#define M32R_ICU_IPICR5_PORTL (0x2f0+M32R_ICU_OFFSET)  /* IPI5 */
+#define M32R_ICU_IPICR6_PORTL (0x2f4+M32R_ICU_OFFSET)  /* IPI6 */
+#define M32R_ICU_IPICR7_PORTL (0x2f8+M32R_ICU_OFFSET)  /* IPI7 */
+#endif /* CONFIG_SMP */
+
+#define M32R_ICUIMASK_IMSK0  (0UL<<16)  /* b13-b15: Disable interrupt */
+#define M32R_ICUIMASK_IMSK1  (1UL<<16)  /* b13-b15: Enable level 0 interrupt */
+#define M32R_ICUIMASK_IMSK2  (2UL<<16)  /* b13-b15: Enable level 0,1 interrupt */
+#define M32R_ICUIMASK_IMSK3  (3UL<<16)  /* b13-b15: Enable level 0-2 interrupt */
+#define M32R_ICUIMASK_IMSK4  (4UL<<16)  /* b13-b15: Enable level 0-3 interrupt */
+#define M32R_ICUIMASK_IMSK5  (5UL<<16)  /* b13-b15: Enable level 0-4 interrupt */
+#define M32R_ICUIMASK_IMSK6  (6UL<<16)  /* b13-b15: Enable level 0-5 interrupt */
+#define M32R_ICUIMASK_IMSK7  (7UL<<16)  /* b13-b15: Enable level 0-6 interrupt */
+
+#define M32R_ICUCR_IEN      (1UL<<12)  /* b19: Interrupt enable */
+#define M32R_ICUCR_IRQ      (1UL<<8)   /* b23: Interrupt request */
+#define M32R_ICUCR_ISMOD00  (0UL<<4)   /* b26-b27: Interrupt sense mode Edge HtoL */
+#define M32R_ICUCR_ISMOD01  (1UL<<4)   /* b26-b27: Interrupt sense mode Level L */
+#define M32R_ICUCR_ISMOD10  (2UL<<4)   /* b26-b27: Interrupt sense mode Edge LtoH*/
+#define M32R_ICUCR_ISMOD11  (3UL<<4)   /* b26-b27: Interrupt sense mode Level H */
+#define M32R_ICUCR_ILEVEL0  (0UL<<0)   /* b29-b31: Interrupt priority level 0 */
+#define M32R_ICUCR_ILEVEL1  (1UL<<0)   /* b29-b31: Interrupt priority level 1 */
+#define M32R_ICUCR_ILEVEL2  (2UL<<0)   /* b29-b31: Interrupt priority level 2 */
+#define M32R_ICUCR_ILEVEL3  (3UL<<0)   /* b29-b31: Interrupt priority level 3 */
+#define M32R_ICUCR_ILEVEL4  (4UL<<0)   /* b29-b31: Interrupt priority level 4 */
+#define M32R_ICUCR_ILEVEL5  (5UL<<0)   /* b29-b31: Interrupt priority level 5 */
+#define M32R_ICUCR_ILEVEL6  (6UL<<0)   /* b29-b31: Interrupt priority level 6 */
+#define M32R_ICUCR_ILEVEL7  (7UL<<0)   /* b29-b31: Disable interrupt */
+
+#define M32R_IRQ_INT0    (1)   /* INT0 */
+#define M32R_IRQ_INT1    (2)   /* INT1 */
+#define M32R_IRQ_INT2    (3)   /* INT2 */
+#define M32R_IRQ_INT3    (4)   /* INT3 */
+#define M32R_IRQ_INT4    (5)   /* INT4 */
+#define M32R_IRQ_INT5    (6)   /* INT5 */
+#define M32R_IRQ_INT6    (7)   /* INT6 */
+#define M32R_IRQ_MFT0    (16)  /* MFT0 */
+#define M32R_IRQ_MFT1    (17)  /* MFT1 */
+#define M32R_IRQ_MFT2    (18)  /* MFT2 */
+#define M32R_IRQ_MFT3    (19)  /* MFT3 */
+#define M32R_IRQ_MFT4    (20)  /* MFT4 */
+#define M32R_IRQ_MFT5    (21)  /* MFT5 */
+#define M32R_IRQ_DMA0    (32)  /* DMA0 */
+#define M32R_IRQ_DMA1    (33)  /* DMA1 */
+#define M32R_IRQ_SIO0_R  (48)  /* SIO0 send    */
+#define M32R_IRQ_SIO0_S  (49)  /* SIO0 receive */
+#define M32R_IRQ_SIO1_R  (50)  /* SIO1 send    */
+#define M32R_IRQ_SIO1_S  (51)  /* SIO1 receive */
+#define M32R_IRQ_SIO2_R  (52)  /* SIO2 send    */
+#define M32R_IRQ_SIO2_S  (53)  /* SIO2 receive */
+#define M32R_IRQ_SIO3_R  (54)  /* SIO3 send    */
+#define M32R_IRQ_SIO3_S  (55)  /* SIO3 receive */
+#define M32R_IRQ_SIO4_R  (56)  /* SIO4 send    */
+#define M32R_IRQ_SIO4_S  (57)  /* SIO4 receive */
+
+#ifdef CONFIG_SMP
+#define M32R_IRQ_IPI0    (56)
+#define M32R_IRQ_IPI1    (57)
+#define M32R_IRQ_IPI2    (58)
+#define M32R_IRQ_IPI3    (59)
+#define M32R_IRQ_IPI4    (60)
+#define M32R_IRQ_IPI5    (61)
+#define M32R_IRQ_IPI6    (62)
+#define M32R_IRQ_IPI7    (63)
+#define M32R_CPUID_PORTL (0xffffffe0)
+
+#define M32R_FPGA_TOP (0x000F0000+M32R_SFR_OFFSET)
+
+#define M32R_FPGA_NUM_OF_CPUS_PORTL (0x00+M32R_FPGA_TOP)
+#define M32R_FPGA_CPU_NAME0_PORTL   (0x10+M32R_FPGA_TOP)
+#define M32R_FPGA_CPU_NAME1_PORTL   (0x14+M32R_FPGA_TOP)
+#define M32R_FPGA_CPU_NAME2_PORTL   (0x18+M32R_FPGA_TOP)
+#define M32R_FPGA_CPU_NAME3_PORTL   (0x1c+M32R_FPGA_TOP)
+#define M32R_FPGA_MODEL_ID0_PORTL   (0x20+M32R_FPGA_TOP)
+#define M32R_FPGA_MODEL_ID1_PORTL   (0x24+M32R_FPGA_TOP)
+#define M32R_FPGA_MODEL_ID2_PORTL   (0x28+M32R_FPGA_TOP)
+#define M32R_FPGA_MODEL_ID3_PORTL   (0x2c+M32R_FPGA_TOP)
+#define M32R_FPGA_VERSION0_PORTL    (0x30+M32R_FPGA_TOP)
+#define M32R_FPGA_VERSION1_PORTL    (0x34+M32R_FPGA_TOP)
+
+#ifndef __ASSEMBLY__
+/* For NETDEV WATCHDOG */
+typedef struct {
+	unsigned long icucr;	/* ICU Control Register */
+} icu_data_t;
+
+extern icu_data_t icu_data[];
+#endif
+
+#endif /* CONFIG_SMP */
+
+#endif /* _M32102_H_ */
diff --git a/include/asm-m32r/m32102peri.h b/include/asm-m32r/m32102peri.h
new file mode 100644
index 0000000..3c12955
--- /dev/null
+++ b/include/asm-m32r/m32102peri.h
@@ -0,0 +1,468 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000,2001 by Hiroyuki Kondo
+ */
+
+#ifndef __ASSEMBLY__
+
+typedef	void	V;
+typedef	char	B;
+typedef	short	S;
+typedef	int		W;
+typedef	long	L;
+typedef	float	F;
+typedef	double	D;
+typedef	unsigned char	UB;
+typedef	unsigned short	US;
+typedef	unsigned int	UW;
+typedef	unsigned long	UL;
+typedef	const unsigned int	CUW;
+
+/*********************************
+
+M32102 ICU
+
+*********************************/
+#define		ICUISTS		(UW *)0xa0EFF004
+#define		ICUIREQ0	(UW *)0xa0EFF008
+#define		ICUIREQ1	(UW *)0xa0EFF00C
+
+#define		ICUSBICR	(UW *)0xa0EFF018
+#define		ICUIMASK	(UW *)0xa0EFF01C
+
+#define		ICUCR1		(UW *)0xa0EFF200	/* INT0 */
+#define		ICUCR2		(UW *)0xa0EFF204	/* INT1 */
+#define		ICUCR3		(UW *)0xa0EFF208	/* INT2 */
+#define		ICUCR4		(UW *)0xa0EFF20C	/* INT3 */
+#define		ICUCR5		(UW *)0xa0EFF210	/* INT4 */
+#define		ICUCR6		(UW *)0xa0EFF214	/* INT5 */
+#define		ICUCR7		(UW *)0xa0EFF218	/* INT6 */
+
+#define		ICUCR16		(UW *)0xa0EFF23C	/* MFT0 */
+#define		ICUCR17		(UW *)0xa0EFF240	/* MFT1 */
+#define		ICUCR18		(UW *)0xa0EFF244	/* MFT2 */
+#define		ICUCR19		(UW *)0xa0EFF248	/* MFT3 */
+#define		ICUCR20		(UW *)0xa0EFF24C	/* MFT4 */
+#define		ICUCR21		(UW *)0xa0EFF250	/* MFT5 */
+
+#define		ICUCR32		(UW *)0xa0EFF27C	/* DMA0 */
+#define		ICUCR33		(UW *)0xa0EFF280	/* DMA1 */
+
+#define		ICUCR48		(UW *)0xa0EFF2BC	/* SIO0R */
+#define		ICUCR49		(UW *)0xa0EFF2C0	/* SIO0S */
+#define		ICUCR50		(UW *)0xa0EFF2C4	/* SIO1R */
+#define		ICUCR51		(UW *)0xa0EFF2C8	/* SIO1S */
+#define		ICUCR52		(UW *)0xa0EFF2CC	/* SIO2R */
+#define		ICUCR53		(UW *)0xa0EFF2D0	/* SIO2S */
+#define		ICUCR54		(UW *)0xa0EFF2D4	/* SIO3R */
+#define		ICUCR55		(UW *)0xa0EFF2D8	/* SIO3S */
+#define		ICUCR56		(UW *)0xa0EFF2DC	/* SIO4R */
+#define		ICUCR57		(UW *)0xa0EFF2E0	/* SIO4S */
+
+/*********************************
+
+M32102 MFT
+
+*********************************/
+#define		MFTCR		(US *)0xa0EFC002
+#define		MFTRPR		(UB *)0xa0EFC006
+
+#define		MFT0MOD		(US *)0xa0EFC102
+#define		MFT0BOS		(US *)0xa0EFC106
+#define		MFT0CUT		(US *)0xa0EFC10A
+#define		MFT0RLD		(US *)0xa0EFC10E
+#define		MFT0CRLD	(US *)0xa0EFC112
+
+#define		MFT1MOD		(US *)0xa0EFC202
+#define		MFT1BOS		(US *)0xa0EFC206
+#define		MFT1CUT		(US *)0xa0EFC20A
+#define		MFT1RLD		(US *)0xa0EFC20E
+#define		MFT1CRLD	(US *)0xa0EFC212
+
+#define		MFT2MOD		(US *)0xa0EFC302
+#define		MFT2BOS		(US *)0xa0EFC306
+#define		MFT2CUT		(US *)0xa0EFC30A
+#define		MFT2RLD		(US *)0xa0EFC30E
+#define		MFT2CRLD	(US *)0xa0EFC312
+
+#define		MFT3MOD		(US *)0xa0EFC402
+#define		MFT3CUT		(US *)0xa0EFC40A
+#define		MFT3RLD		(US *)0xa0EFC40E
+#define		MFT3CRLD	(US *)0xa0EFC412
+
+#define		MFT4MOD		(US *)0xa0EFC502
+#define		MFT4CUT		(US *)0xa0EFC50A
+#define		MFT4RLD		(US *)0xa0EFC50E
+#define		MFT4CRLD	(US *)0xa0EFC512
+
+#define		MFT5MOD		(US *)0xa0EFC602
+#define		MFT5CUT		(US *)0xa0EFC60A
+#define		MFT5RLD		(US *)0xa0EFC60E
+#define		MFT5CRLD	(US *)0xa0EFC612
+
+/*********************************
+
+M32102 SIO
+
+*********************************/
+
+#define SIO0CR     (volatile int *)0xa0efd000
+#define SIO0MOD0   (volatile int *)0xa0efd004
+#define SIO0MOD1   (volatile int *)0xa0efd008
+#define SIO0STS    (volatile int *)0xa0efd00c
+#define SIO0IMASK  (volatile int *)0xa0efd010
+#define SIO0BAUR   (volatile int *)0xa0efd014
+#define SIO0RBAUR  (volatile int *)0xa0efd018
+#define SIO0TXB    (volatile int *)0xa0efd01c
+#define SIO0RXB    (volatile int *)0xa0efd020
+
+#define SIO1CR     (volatile int *)0xa0efd100
+#define SIO1MOD0   (volatile int *)0xa0efd104
+#define SIO1MOD1   (volatile int *)0xa0efd108
+#define SIO1STS    (volatile int *)0xa0efd10c
+#define SIO1IMASK  (volatile int *)0xa0efd110
+#define SIO1BAUR   (volatile int *)0xa0efd114
+#define SIO1RBAUR  (volatile int *)0xa0efd118
+#define SIO1TXB    (volatile int *)0xa0efd11c
+#define SIO1RXB    (volatile int *)0xa0efd120
+/*********************************
+
+M32102 PORT
+
+*********************************/
+#define		PIEN		(UB *)0xa0EF1003	/* input enable */
+
+#define		P0DATA		(UB *)0xa0EF1020	/* data */
+#define		P1DATA		(UB *)0xa0EF1021
+#define		P2DATA		(UB *)0xa0EF1022
+#define		P3DATA		(UB *)0xa0EF1023
+#define		P4DATA		(UB *)0xa0EF1024
+#define		P5DATA		(UB *)0xa0EF1025
+#define		P6DATA		(UB *)0xa0EF1026
+#define		P7DATA		(UB *)0xa0EF1027
+
+#define		P0DIR		(UB *)0xa0EF1040	/* direction */
+#define		P1DIR		(UB *)0xa0EF1041
+#define		P2DIR		(UB *)0xa0EF1042
+#define		P3DIR		(UB *)0xa0EF1043
+#define		P4DIR		(UB *)0xa0EF1044
+#define		P5DIR		(UB *)0xa0EF1045
+#define		P6DIR		(UB *)0xa0EF1046
+#define		P7DIR		(UB *)0xa0EF1047
+
+#define		P0MOD		(US *)0xa0EF1060	/* mode control */
+#define		P1MOD		(US *)0xa0EF1062
+#define		P2MOD		(US *)0xa0EF1064
+#define		P3MOD		(US *)0xa0EF1066
+#define		P4MOD		(US *)0xa0EF1068
+#define		P5MOD		(US *)0xa0EF106A
+#define		P6MOD		(US *)0xa0EF106C
+#define		P7MOD		(US *)0xa0EF106E
+
+#define		P0ODCR		(UB *)0xa0EF1080	/* open-drain control */
+#define		P1ODCR		(UB *)0xa0EF1081
+#define		P2ODCR		(UB *)0xa0EF1082
+#define		P3ODCR		(UB *)0xa0EF1083
+#define		P4ODCR		(UB *)0xa0EF1084
+#define		P5ODCR		(UB *)0xa0EF1085
+#define		P6ODCR		(UB *)0xa0EF1086
+#define		P7ODCR		(UB *)0xa0EF1087
+
+/*********************************
+
+M32102 Cache
+
+********************************/
+
+#define		MCCR	(US *)0xFFFFFFFE
+
+
+#else  /* __ASSEMBLY__ */
+
+;;
+;; PIO     0x80ef1000
+;;
+
+#define PIEN          0xa0ef1000
+
+#define P0DATA        0xa0ef1020
+#define P1DATA        0xa0ef1021
+#define P2DATA        0xa0ef1022
+#define P3DATA        0xa0ef1023
+#define P4DATA        0xa0ef1024
+#define P5DATA        0xa0ef1025
+#define P6DATA        0xa0ef1026
+#define P7DATA        0xa0ef1027
+
+#define P0DIR         0xa0ef1040
+#define P1DIR         0xa0ef1041
+#define P2DIR         0xa0ef1042
+#define P3DIR         0xa0ef1043
+#define P4DIR         0xa0ef1044
+#define P5DIR         0xa0ef1045
+#define P6DIR         0xa0ef1046
+#define P7DIR         0xa0ef1047
+
+#define P0MOD         0xa0ef1060
+#define P1MOD         0xa0ef1062
+#define P2MOD         0xa0ef1064
+#define P3MOD         0xa0ef1066
+#define P4MOD         0xa0ef1068
+#define P5MOD         0xa0ef106a
+#define P6MOD         0xa0ef106c
+#define P7MOD         0xa0ef106e
+;
+#define P0ODCR        0xa0ef1080
+#define P1ODCR        0xa0ef1081
+#define P2ODCR        0xa0ef1082
+#define P3ODCR        0xa0ef1083
+#define P4ODCR        0xa0ef1084
+#define P5ODCR        0xa0ef1085
+#define P6ODCR        0xa0ef1086
+#define P7ODCR        0xa0ef1087
+
+;;
+;; WDT     0xa0ef2000
+;;
+
+#define WDTCR         0xa0ef2000
+
+
+;;
+;; CLK     0xa0ef4000
+;;
+
+#define CPUCLKCR      0xa0ef4000
+#define CLKMOD        0xa0ef4004
+#define PLLCR         0xa0ef4008
+
+
+;;
+;; BSEL    0xa0ef5000
+;;
+
+#define BSEL0CR       0xa0ef5000
+#define BSEL1CR       0xa0ef5004
+#define BSEL2CR       0xa0ef5008
+#define BSEL3CR       0xa0ef500c
+#define BSEL4CR       0xa0ef5010
+#define BSEL5CR       0xa0ef5014
+
+
+;;
+;; SDRAMC  0xa0ef6000
+;;
+
+#define SDRF0         0xa0ef6000
+#define SDRF1         0xa0ef6004
+#define SDIR0         0xa0ef6008
+#define SDIR1         0xa0ef600c
+#define SDBR          0xa0ef6010
+
+;; CH0
+#define SD0ADR        0xa0ef6020
+#define SD0SZ         0xa0ef6022
+#define SD0ER         0xa0ef6024
+#define SD0TR         0xa0ef6028
+#define SD0MOD        0xa0ef602c
+
+;; CH1
+#define SD1ADR        0xa0ef6040
+#define SD1SZ         0xa0ef6042
+#define SD1ER         0xa0ef6044
+#define SD1TR         0xa0ef6048
+#define SD1MOD        0xa0ef604c
+
+
+;;
+;; DMAC    0xa0ef8000
+;;
+
+#define DMAEN         0xa0ef8000
+#define DMAISTS       0xa0ef8004
+#define DMAEDET       0xa0ef8008
+#define DMAASTS       0xa0ef800c
+
+;; CH0
+#define DMA0CR0       0xa0ef8100
+#define DMA0CR1       0xa0ef8104
+#define DMA0CSA       0xa0ef8108
+#define DMA0RSA       0xa0ef810c
+#define DMA0CDA       0xa0ef8110
+#define DMA0RDA       0xa0ef8114
+#define DMA0CBCUT     0xa0ef8118
+#define DMA0RBCUT     0xa0ef811c
+
+;; CH1
+#define DMA1CR0       0xa0ef8200
+#define DMA1CR1       0xa0ef8204
+#define DMA1CSA       0xa0ef8208
+#define DMA1RSA       0xa0ef820c
+#define DMA1CDA       0xa0ef8210
+#define DMA1RDA       0xa0ef8214
+#define DMA1CBCUT     0xa0ef8218
+#define DMA1RBCUT     0xa0ef821c
+
+
+;;
+;; MFT     0xa0efc000
+;;
+
+#define MFTCR        0xa0efc000
+#define MFTRPR       0xa0efc004
+
+;; CH0
+#define MFT0MOD      0xa0efc100
+#define MFT0BOS      0xa0efc104
+#define MFT0CUT      0xa0efc108
+#define MFT0RLD      0xa0efc10c
+#define MFT0CMPRLD   0xa0efc110
+
+;; CH1
+#define MFT1MOD      0xa0efc200
+#define MFT1BOS      0xa0efc204
+#define MFT1CUT      0xa0efc208
+#define MFT1RLD      0xa0efc20c
+#define MFT1CMPRLD   0xa0efc210
+
+;; CH2
+#define MFT2MOD      0xa0efc300
+#define MFT2BOS      0xa0efc304
+#define MFT2CUT      0xa0efc308
+#define MFT2RLD      0xa0efc30c
+#define MFT2CMPRLD   0xa0efc310
+
+;; CH3
+#define MFT3MOD      0xa0efc400
+#define MFT3BOS      0xa0efc404
+#define MFT3CUT      0xa0efc408
+#define MFT3RLD      0xa0efc40c
+#define MFT3CMPRLD   0xa0efc410
+
+;; CH4
+#define MFT4MOD      0xa0efc500
+#define MFT4BOS      0xa0efc504
+#define MFT4CUT      0xa0efc508
+#define MFT4RLD      0xa0efc50c
+#define MFT4CMPRLD   0xa0efc510
+
+;; CH5
+#define MFT5MOD      0xa0efc600
+#define MFT5BOS      0xa0efc604
+#define MFT5CUT      0xa0efc608
+#define MFT5RLD      0xa0efc60c
+#define MFT5CMPRLD   0xa0efc610
+
+
+;;
+;; SIO     0xa0efd000
+;;
+
+;; CH0
+#define SIO0CR        0xa0efd000
+#define SIO0MOD0      0xa0efd004
+#define SIO0MOD1      0xa0efd008
+#define SIO0STS       0xa0efd00c
+#define SIO0IMASK     0xa0efd010
+#define SIO0BAUR      0xa0efd014
+#define SIO0RBAUR     0xa0efd018
+#define SIO0TXB       0xa0efd01c
+#define SIO0RXB       0xa0efd020
+
+;; CH1
+#define SIO1CR        0xa0efd100
+#define SIO1MOD0      0xa0efd104
+#define SIO1MOD1      0xa0efd108
+#define SIO1STS       0xa0efd10c
+#define SIO1IMASK     0xa0efd110
+#define SIO1BAUR      0xa0efd114
+#define SIO1RBAUR     0xa0efd118
+#define SIO1TXB       0xa0efd11c
+#define SIO1RXB       0xa0efd120
+
+;; CH2
+#define SIO2CR        0xa0efd200
+#define SIO2MOD0      0xa0efd204
+#define SIO2MOD1      0xa0efd208
+#define SIO2STS       0xa0efd20c
+#define SIO2IMASK     0xa0efd210
+#define SIO2BAUR      0xa0efd214
+#define SIO2RBAUR     0xa0efd218
+#define SIO2TXB       0xa0efd21c
+#define SIO2RXB       0xa0efd220
+
+;; CH3
+#define SIO3CR        0xa0efd300
+#define SIO3MOD0      0xa0efd304
+#define SIO3MOD1      0xa0efd308
+#define SIO3STS       0xa0efd30c
+#define SIO3IMASK     0xa0efd310
+#define SIO3BAUR      0xa0efd314
+#define SIO3RBAUR     0xa0efd318
+#define SIO3TXB       0xa0efd31c
+#define SIO3RXB       0xa0efd320
+
+;; CH4
+#define SIO4CR        0xa0efd400
+#define SIO4MOD0      0xa0efd404
+#define SIO4MOD1      0xa0efd408
+#define SIO4STS       0xa0efd40c
+#define SIO4IMASK     0xa0efd410
+#define SIO4BAUR      0xa0efd414
+#define SIO4RBAUR     0xa0efd418
+#define SIO4TXB       0xa0efd41c
+#define SIO4RXB       0xa0efd420
+
+
+;;
+;; ICU     0xa0eff000
+;;
+
+#define ICUISTS       0xa0eff004
+#define ICUIREQ0      0xa0eff008
+#define ICUIREQ1      0xa0eff00c
+
+#define ICUSBICR      0xa0eff018
+#define ICUIMASK      0xa0eff01c
+
+#define ICUCR1        0xa0eff200
+#define ICUCR2        0xa0eff204
+#define ICUCR3        0xa0eff208
+#define ICUCR4        0xa0eff20c
+#define ICUCR5        0xa0eff210
+#define ICUCR6        0xa0eff214
+#define ICUCR7        0xa0eff218
+
+#define ICUCR16       0xa0eff23c
+#define ICUCR17       0xa0eff240
+#define ICUCR18       0xa0eff244
+#define ICUCR19       0xa0eff248
+#define ICUCR20       0xa0eff24c
+#define ICUCR21       0xa0eff250
+
+#define ICUCR32       0xa0eff27c
+#define ICUCR33       0xa0eff280
+
+#define ICUCR48       0xa0eff2bc
+#define ICUCR49       0xa0eff2c0
+#define ICUCR50       0xa0eff2c4
+#define ICUCR51       0xa0eff2c8
+#define ICUCR52       0xa0eff2cc
+#define ICUCR53       0xa0eff2d0
+#define ICUCR54       0xa0eff2d4
+#define ICUCR55       0xa0eff2d8
+#define ICUCR56       0xa0eff2dc
+#define ICUCR57       0xa0eff2e0
+
+;;
+;; CACHE
+;;
+
+#define MCCR		  0xfffffffc
+
+
+#endif  /* __ASSEMBLY__ */
diff --git a/include/asm-m32r/m32700ut/m32700ut_lan.h b/include/asm-m32r/m32700ut/m32700ut_lan.h
new file mode 100644
index 0000000..50545ec
--- /dev/null
+++ b/include/asm-m32r/m32700ut/m32700ut_lan.h
@@ -0,0 +1,107 @@
+/*
+ * include/asm/m32700ut_lan.h
+ *
+ * M32700UT-LAN board
+ *
+ * Copyright (c) 2002	Takeo Takahashi
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * $Id$
+ */
+
+#ifndef _M32700UT_M32700UT_LAN_H
+#define _M32700UT_M32700UT_LAN_H
+
+#include <linux/config.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * C functions use non-cache address.
+ */
+#define M32700UT_LAN_BASE	(0x10000000 /* + NONCACHE_OFFSET */)
+#else
+#define M32700UT_LAN_BASE	(0x10000000 + NONCACHE_OFFSET)
+#endif	/* __ASSEMBLY__ */
+
+/* ICU
+ *  ICUISTS:	status register
+ *  ICUIREQ0: 	request register
+ *  ICUIREQ1: 	request register
+ *  ICUCR3:	control register for CFIREQ# interrupt
+ *  ICUCR4:	control register for CFC Card insert interrupt
+ *  ICUCR5:	control register for CFC Card eject interrupt
+ *  ICUCR6:	control register for external interrupt
+ *  ICUCR11:	control register for MMC Card insert/eject interrupt
+ *  ICUCR13:	control register for SC error interrupt
+ *  ICUCR14:	control register for SC receive interrupt
+ *  ICUCR15:	control register for SC send interrupt
+ *  ICUCR16:	control register for SIO0 receive interrupt
+ *  ICUCR17:	control register for SIO0 send interrupt
+ */
+#define M32700UT_LAN_IRQ_LAN	(M32700UT_LAN_PLD_IRQ_BASE + 1)	/* LAN */
+#define M32700UT_LAN_IRQ_I2C	(M32700UT_LAN_PLD_IRQ_BASE + 3)	/* I2C */
+
+#define M32700UT_LAN_ICUISTS	__reg16(M32700UT_LAN_BASE + 0xc0002)
+#define M32700UT_LAN_ICUISTS_VECB_MASK	(0xf000)
+#define M32700UT_LAN_VECB(x)	((x) & M32700UT_LAN_ICUISTS_VECB_MASK)
+#define M32700UT_LAN_ICUISTS_ISN_MASK	(0x07c0)
+#define M32700UT_LAN_ICUISTS_ISN(x)	((x) & M32700UT_LAN_ICUISTS_ISN_MASK)
+#define M32700UT_LAN_ICUIREQ0	__reg16(M32700UT_LAN_BASE + 0xc0004)
+#define M32700UT_LAN_ICUCR1	__reg16(M32700UT_LAN_BASE + 0xc0010)
+#define M32700UT_LAN_ICUCR3	__reg16(M32700UT_LAN_BASE + 0xc0014)
+
+/*
+ * AR register on PLD
+ */
+#define ARVCR0		__reg32(M32700UT_LAN_BASE + 0x40000)
+#define ARVCR0_VDS		0x00080000
+#define ARVCR0_RST		0x00010000
+#define ARVCR1		__reg32(M32700UT_LAN_BASE + 0x40004)
+#define ARVCR1_QVGA		0x02000000
+#define ARVCR1_NORMAL		0x01000000
+#define ARVCR1_HIEN		0x00010000
+#define ARVHCOUNT	__reg32(M32700UT_LAN_BASE + 0x40008)
+#define ARDATA		__reg32(M32700UT_LAN_BASE + 0x40010)
+#define ARINTSEL	__reg32(M32700UT_LAN_BASE + 0x40014)
+#define ARINTSEL_INT3		0x10000000	/* CPU INT3 */
+#define ARDATA32	__reg32(M32700UT_LAN_BASE + 0x04040010)	// Block 5
+/*
+#define ARINTSEL_SEL2		0x00002000
+#define ARINTSEL_SEL3		0x00001000
+#define ARINTSEL_SEL6		0x00000200
+#define ARINTSEL_SEL7		0x00000100
+#define ARINTSEL_SEL9		0x00000040
+#define ARINTSEL_SEL10		0x00000020
+#define ARINTSEL_SEL11		0x00000010
+#define ARINTSEL_SEL12		0x00000008
+*/
+
+/*
+ * I2C register on PLD
+ */
+#define PLDI2CCR	__reg32(M32700UT_LAN_BASE + 0x40040)
+#define	PLDI2CCR_ES0		0x00000001	/* enable I2C interface */
+#define PLDI2CMOD	__reg32(M32700UT_LAN_BASE + 0x40044)
+#define PLDI2CMOD_ACKCLK	0x00000200
+#define PLDI2CMOD_DTWD		0x00000100
+#define PLDI2CMOD_10BT		0x00000004
+#define PLDI2CMOD_ATM_NORMAL	0x00000000
+#define PLDI2CMOD_ATM_AUTO	0x00000003
+#define PLDI2CACK	__reg32(M32700UT_LAN_BASE + 0x40048)
+#define PLDI2CACK_ACK		0x00000001
+#define PLDI2CFREQ	__reg32(M32700UT_LAN_BASE + 0x4004c)
+#define PLDI2CCND	__reg32(M32700UT_LAN_BASE + 0x40050)
+#define PLDI2CCND_START		0x00000001
+#define PLDI2CCND_STOP		0x00000002
+#define PLDI2CSTEN	__reg32(M32700UT_LAN_BASE + 0x40054)
+#define PLDI2CSTEN_STEN		0x00000001
+#define PLDI2CDATA	__reg32(M32700UT_LAN_BASE + 0x40060)
+#define PLDI2CSTS	__reg32(M32700UT_LAN_BASE + 0x40064)
+#define PLDI2CSTS_TRX		0x00000020
+#define PLDI2CSTS_BB		0x00000010
+#define PLDI2CSTS_NOACK		0x00000001	/* 0:ack, 1:noack */
+
+#endif	/* _M32700UT_M32700UT_LAN_H */
diff --git a/include/asm-m32r/m32700ut/m32700ut_lcd.h b/include/asm-m32r/m32700ut/m32700ut_lcd.h
new file mode 100644
index 0000000..ede6c77
--- /dev/null
+++ b/include/asm-m32r/m32700ut/m32700ut_lcd.h
@@ -0,0 +1,59 @@
+/*
+ * include/asm/m32700ut_lcd.h
+ *
+ * M32700UT-LCD board
+ *
+ * Copyright (c) 2002	Takeo Takahashi
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * $Id$
+ */
+
+#ifndef _M32700UT_M32700UT_LCD_H
+#define _M32700UT_M32700UT_LCD_H
+
+#include <linux/config.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * C functions use non-cache address.
+ */
+#define M32700UT_LCD_BASE	(0x10000000 /* + NONCACHE_OFFSET */)
+#else
+#define M32700UT_LCD_BASE	(0x10000000 + NONCACHE_OFFSET)
+#endif	/* __ASSEMBLY__ */
+
+/*
+ * ICU
+ */
+#define M32700UT_LCD_IRQ_BAT_INT	(M32700UT_LCD_PLD_IRQ_BASE + 1)
+#define M32700UT_LCD_IRQ_USB_INT1	(M32700UT_LCD_PLD_IRQ_BASE + 2)
+#define M32700UT_LCD_IRQ_AUDT0		(M32700UT_LCD_PLD_IRQ_BASE + 3)
+#define M32700UT_LCD_IRQ_AUDT2		(M32700UT_LCD_PLD_IRQ_BASE + 4)
+#define M32700UT_LCD_IRQ_BATSIO_RCV	(M32700UT_LCD_PLD_IRQ_BASE + 16)
+#define M32700UT_LCD_IRQ_BATSIO_SND	(M32700UT_LCD_PLD_IRQ_BASE + 17)
+#define M32700UT_LCD_IRQ_ASNDSIO_RCV	(M32700UT_LCD_PLD_IRQ_BASE + 18)
+#define M32700UT_LCD_IRQ_ASNDSIO_SND	(M32700UT_LCD_PLD_IRQ_BASE + 19)
+#define M32700UT_LCD_IRQ_ACNLSIO_SND	(M32700UT_LCD_PLD_IRQ_BASE + 21)
+
+#define M32700UT_LCD_ICUISTS	__reg16(M32700UT_LCD_BASE + 0x300002)
+#define M32700UT_LCD_ICUISTS_VECB_MASK	(0xf000)
+#define M32700UT_LCD_VECB(x)	((x) & M32700UT_LCD_ICUISTS_VECB_MASK)
+#define M32700UT_LCD_ICUISTS_ISN_MASK	(0x07c0)
+#define M32700UT_LCD_ICUISTS_ISN(x)	((x) & M32700UT_LCD_ICUISTS_ISN_MASK)
+#define M32700UT_LCD_ICUIREQ0	__reg16(M32700UT_LCD_BASE + 0x300004)
+#define M32700UT_LCD_ICUIREQ1	__reg16(M32700UT_LCD_BASE + 0x300006)
+#define M32700UT_LCD_ICUCR1	__reg16(M32700UT_LCD_BASE + 0x300020)
+#define M32700UT_LCD_ICUCR2	__reg16(M32700UT_LCD_BASE + 0x300022)
+#define M32700UT_LCD_ICUCR3	__reg16(M32700UT_LCD_BASE + 0x300024)
+#define M32700UT_LCD_ICUCR4	__reg16(M32700UT_LCD_BASE + 0x300026)
+#define M32700UT_LCD_ICUCR16	__reg16(M32700UT_LCD_BASE + 0x300030)
+#define M32700UT_LCD_ICUCR17	__reg16(M32700UT_LCD_BASE + 0x300032)
+#define M32700UT_LCD_ICUCR18	__reg16(M32700UT_LCD_BASE + 0x300034)
+#define M32700UT_LCD_ICUCR19	__reg16(M32700UT_LCD_BASE + 0x300036)
+#define M32700UT_LCD_ICUCR21	__reg16(M32700UT_LCD_BASE + 0x30003a)
+
+#endif	/* _M32700UT_M32700UT_LCD_H */
diff --git a/include/asm-m32r/m32700ut/m32700ut_pld.h b/include/asm-m32r/m32700ut/m32700ut_pld.h
new file mode 100644
index 0000000..f5e4794
--- /dev/null
+++ b/include/asm-m32r/m32700ut/m32700ut_pld.h
@@ -0,0 +1,265 @@
+/*
+ * include/asm/m32700ut/m32700ut_pld.h
+ *
+ * Definitions for Programable Logic Device(PLD) on M32700UT board.
+ *
+ * Copyright (c) 2002	Takeo Takahashi
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * $Id$
+ */
+
+#ifndef _M32700UT_M32700UT_PLD_H
+#define _M32700UT_M32700UT_PLD_H
+
+#include <linux/config.h>
+
+#if defined(CONFIG_PLAT_M32700UT_Alpha)
+#define PLD_PLAT_BASE		0x08c00000
+#elif defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_USRV)
+#define PLD_PLAT_BASE		0x04c00000
+#else
+#error "no platform configuration"
+#endif
+
+#ifndef __ASSEMBLY__
+/*
+ * C functions use non-cache address.
+ */
+#define PLD_BASE		(PLD_PLAT_BASE /* + NONCACHE_OFFSET */)
+#define __reg8			(volatile unsigned char *)
+#define __reg16			(volatile unsigned short *)
+#define __reg32			(volatile unsigned int *)
+#else
+#define PLD_BASE		(PLD_PLAT_BASE + NONCACHE_OFFSET)
+#define __reg8
+#define __reg16
+#define __reg32
+#endif	/* __ASSEMBLY__ */
+
+/* CFC */
+#define	PLD_CFRSTCR		__reg16(PLD_BASE + 0x0000)
+#define PLD_CFSTS		__reg16(PLD_BASE + 0x0002)
+#define PLD_CFIMASK		__reg16(PLD_BASE + 0x0004)
+#define PLD_CFBUFCR		__reg16(PLD_BASE + 0x0006)
+#define PLD_CFVENCR		__reg16(PLD_BASE + 0x0008)
+#define PLD_CFCR0		__reg16(PLD_BASE + 0x000a)
+#define PLD_CFCR1		__reg16(PLD_BASE + 0x000c)
+#define PLD_IDERSTCR		__reg16(PLD_BASE + 0x0010)
+
+/* MMC */
+#define PLD_MMCCR		__reg16(PLD_BASE + 0x4000)
+#define PLD_MMCMOD		__reg16(PLD_BASE + 0x4002)
+#define PLD_MMCSTS		__reg16(PLD_BASE + 0x4006)
+#define PLD_MMCBAUR		__reg16(PLD_BASE + 0x400a)
+#define PLD_MMCCMDBCUT		__reg16(PLD_BASE + 0x400c)
+#define PLD_MMCCDTBCUT		__reg16(PLD_BASE + 0x400e)
+#define PLD_MMCDET		__reg16(PLD_BASE + 0x4010)
+#define PLD_MMCWP		__reg16(PLD_BASE + 0x4012)
+#define PLD_MMCWDATA		__reg16(PLD_BASE + 0x5000)
+#define PLD_MMCRDATA		__reg16(PLD_BASE + 0x6000)
+#define PLD_MMCCMDDATA		__reg16(PLD_BASE + 0x7000)
+#define PLD_MMCRSPDATA		__reg16(PLD_BASE + 0x7006)
+
+/* ICU
+ *  ICUISTS:	status register
+ *  ICUIREQ0: 	request register
+ *  ICUIREQ1: 	request register
+ *  ICUCR3:	control register for CFIREQ# interrupt
+ *  ICUCR4:	control register for CFC Card insert interrupt
+ *  ICUCR5:	control register for CFC Card eject interrupt
+ *  ICUCR6:	control register for external interrupt
+ *  ICUCR11:	control register for MMC Card insert/eject interrupt
+ *  ICUCR13:	control register for SC error interrupt
+ *  ICUCR14:	control register for SC receive interrupt
+ *  ICUCR15:	control register for SC send interrupt
+ *  ICUCR16:	control register for SIO0 receive interrupt
+ *  ICUCR17:	control register for SIO0 send interrupt
+ */
+#if !defined(CONFIG_PLAT_USRV)
+#define PLD_IRQ_INT0		(M32700UT_PLD_IRQ_BASE + 0)	/* None */
+#define PLD_IRQ_INT1		(M32700UT_PLD_IRQ_BASE + 1)	/* reserved */
+#define PLD_IRQ_INT2		(M32700UT_PLD_IRQ_BASE + 2)	/* reserved */
+#define PLD_IRQ_CFIREQ		(M32700UT_PLD_IRQ_BASE + 3)	/* CF IREQ */
+#define PLD_IRQ_CFC_INSERT	(M32700UT_PLD_IRQ_BASE + 4)	/* CF Insert */
+#define PLD_IRQ_CFC_EJECT	(M32700UT_PLD_IRQ_BASE + 5)	/* CF Eject */
+#define PLD_IRQ_EXINT		(M32700UT_PLD_IRQ_BASE + 6)	/* EXINT */
+#define PLD_IRQ_INT7		(M32700UT_PLD_IRQ_BASE + 7)	/* reserved */
+#define PLD_IRQ_INT8		(M32700UT_PLD_IRQ_BASE + 8)	/* reserved */
+#define PLD_IRQ_INT9		(M32700UT_PLD_IRQ_BASE + 9)	/* reserved */
+#define PLD_IRQ_INT10		(M32700UT_PLD_IRQ_BASE + 10)	/* reserved */
+#define PLD_IRQ_MMCCARD		(M32700UT_PLD_IRQ_BASE + 11)	/* MMC Insert/Eject */
+#define PLD_IRQ_INT12		(M32700UT_PLD_IRQ_BASE + 12)	/* reserved */
+#define PLD_IRQ_SC_ERROR	(M32700UT_PLD_IRQ_BASE + 13)	/* SC error */
+#define PLD_IRQ_SC_RCV		(M32700UT_PLD_IRQ_BASE + 14)	/* SC receive */
+#define PLD_IRQ_SC_SND		(M32700UT_PLD_IRQ_BASE + 15)	/* SC send */
+#define PLD_IRQ_SIO0_RCV	(M32700UT_PLD_IRQ_BASE + 16)	/* SIO receive */
+#define PLD_IRQ_SIO0_SND	(M32700UT_PLD_IRQ_BASE + 17)	/* SIO send */
+#define PLD_IRQ_INT18		(M32700UT_PLD_IRQ_BASE + 18)	/* reserved */
+#define PLD_IRQ_INT19		(M32700UT_PLD_IRQ_BASE + 19)	/* reserved */
+#define PLD_IRQ_INT20		(M32700UT_PLD_IRQ_BASE + 20)	/* reserved */
+#define PLD_IRQ_INT21		(M32700UT_PLD_IRQ_BASE + 21)	/* reserved */
+#define PLD_IRQ_INT22		(M32700UT_PLD_IRQ_BASE + 22)	/* reserved */
+#define PLD_IRQ_INT23		(M32700UT_PLD_IRQ_BASE + 23)	/* reserved */
+#define PLD_IRQ_INT24		(M32700UT_PLD_IRQ_BASE + 24)	/* reserved */
+#define PLD_IRQ_INT25		(M32700UT_PLD_IRQ_BASE + 25)	/* reserved */
+#define PLD_IRQ_INT26		(M32700UT_PLD_IRQ_BASE + 26)	/* reserved */
+#define PLD_IRQ_INT27		(M32700UT_PLD_IRQ_BASE + 27)	/* reserved */
+#define PLD_IRQ_INT28		(M32700UT_PLD_IRQ_BASE + 28)	/* reserved */
+#define PLD_IRQ_INT29		(M32700UT_PLD_IRQ_BASE + 29)	/* reserved */
+#define PLD_IRQ_INT30		(M32700UT_PLD_IRQ_BASE + 30)	/* reserved */
+#define PLD_IRQ_INT31		(M32700UT_PLD_IRQ_BASE + 31)	/* reserved */
+
+#else	/* CONFIG_PLAT_USRV */
+
+#define PLD_IRQ_INT0		(M32700UT_PLD_IRQ_BASE + 0)	/* None */
+#define PLD_IRQ_INT1		(M32700UT_PLD_IRQ_BASE + 1)	/* reserved */
+#define PLD_IRQ_INT2		(M32700UT_PLD_IRQ_BASE + 2)	/* reserved */
+#define PLD_IRQ_CF0		(M32700UT_PLD_IRQ_BASE + 3)	/* CF0# */
+#define PLD_IRQ_CF1		(M32700UT_PLD_IRQ_BASE + 4)	/* CF1# */
+#define PLD_IRQ_CF2		(M32700UT_PLD_IRQ_BASE + 5)	/* CF2# */
+#define PLD_IRQ_CF3		(M32700UT_PLD_IRQ_BASE + 6)	/* CF3# */
+#define PLD_IRQ_CF4		(M32700UT_PLD_IRQ_BASE + 7)	/* CF4# */
+#define PLD_IRQ_INT8		(M32700UT_PLD_IRQ_BASE + 8)	/* reserved */
+#define PLD_IRQ_INT9		(M32700UT_PLD_IRQ_BASE + 9)	/* reserved */
+#define PLD_IRQ_INT10		(M32700UT_PLD_IRQ_BASE + 10)	/* reserved */
+#define PLD_IRQ_INT11		(M32700UT_PLD_IRQ_BASE + 11)	/* reserved */
+#define PLD_IRQ_UART0		(M32700UT_PLD_IRQ_BASE + 12)	/* UARTIRQ0 */
+#define PLD_IRQ_UART1		(M32700UT_PLD_IRQ_BASE + 13)	/* UARTIRQ1 */
+#define PLD_IRQ_INT14		(M32700UT_PLD_IRQ_BASE + 14)	/* reserved */
+#define PLD_IRQ_INT15		(M32700UT_PLD_IRQ_BASE + 15)	/* reserved */
+#define PLD_IRQ_SNDINT		(M32700UT_PLD_IRQ_BASE + 16)	/* SNDINT# */
+#define PLD_IRQ_INT17		(M32700UT_PLD_IRQ_BASE + 17)	/* reserved */
+#define PLD_IRQ_INT18		(M32700UT_PLD_IRQ_BASE + 18)	/* reserved */
+#define PLD_IRQ_INT19		(M32700UT_PLD_IRQ_BASE + 19)	/* reserved */
+#define PLD_IRQ_INT20		(M32700UT_PLD_IRQ_BASE + 20)	/* reserved */
+#define PLD_IRQ_INT21		(M32700UT_PLD_IRQ_BASE + 21)	/* reserved */
+#define PLD_IRQ_INT22		(M32700UT_PLD_IRQ_BASE + 22)	/* reserved */
+#define PLD_IRQ_INT23		(M32700UT_PLD_IRQ_BASE + 23)	/* reserved */
+#define PLD_IRQ_INT24		(M32700UT_PLD_IRQ_BASE + 24)	/* reserved */
+#define PLD_IRQ_INT25		(M32700UT_PLD_IRQ_BASE + 25)	/* reserved */
+#define PLD_IRQ_INT26		(M32700UT_PLD_IRQ_BASE + 26)	/* reserved */
+#define PLD_IRQ_INT27		(M32700UT_PLD_IRQ_BASE + 27)	/* reserved */
+#define PLD_IRQ_INT28		(M32700UT_PLD_IRQ_BASE + 28)	/* reserved */
+#define PLD_IRQ_INT29		(M32700UT_PLD_IRQ_BASE + 29)	/* reserved */
+#define PLD_IRQ_INT30		(M32700UT_PLD_IRQ_BASE + 30)	/* reserved */
+
+#endif	/* CONFIG_PLAT_USRV */
+
+#define PLD_ICUISTS		__reg16(PLD_BASE + 0x8002)
+#define PLD_ICUISTS_VECB_MASK	(0xf000)
+#define PLD_ICUISTS_VECB(x)	((x) & PLD_ICUISTS_VECB_MASK)
+#define PLD_ICUISTS_ISN_MASK	(0x07c0)
+#define PLD_ICUISTS_ISN(x)	((x) & PLD_ICUISTS_ISN_MASK)
+#define PLD_ICUIREQ0		__reg16(PLD_BASE + 0x8004)
+#define PLD_ICUIREQ1		__reg16(PLD_BASE + 0x8006)
+#define PLD_ICUCR1		__reg16(PLD_BASE + 0x8100)
+#define PLD_ICUCR2		__reg16(PLD_BASE + 0x8102)
+#define PLD_ICUCR3		__reg16(PLD_BASE + 0x8104)
+#define PLD_ICUCR4		__reg16(PLD_BASE + 0x8106)
+#define PLD_ICUCR5		__reg16(PLD_BASE + 0x8108)
+#define PLD_ICUCR6		__reg16(PLD_BASE + 0x810a)
+#define PLD_ICUCR7		__reg16(PLD_BASE + 0x810c)
+#define PLD_ICUCR8		__reg16(PLD_BASE + 0x810e)
+#define PLD_ICUCR9		__reg16(PLD_BASE + 0x8110)
+#define PLD_ICUCR10		__reg16(PLD_BASE + 0x8112)
+#define PLD_ICUCR11		__reg16(PLD_BASE + 0x8114)
+#define PLD_ICUCR12		__reg16(PLD_BASE + 0x8116)
+#define PLD_ICUCR13		__reg16(PLD_BASE + 0x8118)
+#define PLD_ICUCR14		__reg16(PLD_BASE + 0x811a)
+#define PLD_ICUCR15		__reg16(PLD_BASE + 0x811c)
+#define PLD_ICUCR16		__reg16(PLD_BASE + 0x811e)
+#define PLD_ICUCR17		__reg16(PLD_BASE + 0x8120)
+#define PLD_ICUCR_IEN		(0x1000)
+#define PLD_ICUCR_IREQ		(0x0100)
+#define PLD_ICUCR_ISMOD00	(0x0000)	/* Low edge */
+#define PLD_ICUCR_ISMOD01	(0x0010)	/* Low level */
+#define PLD_ICUCR_ISMOD02	(0x0020)	/* High edge */
+#define PLD_ICUCR_ISMOD03	(0x0030)	/* High level */
+#define PLD_ICUCR_ILEVEL0	(0x0000)
+#define PLD_ICUCR_ILEVEL1	(0x0001)
+#define PLD_ICUCR_ILEVEL2	(0x0002)
+#define PLD_ICUCR_ILEVEL3	(0x0003)
+#define PLD_ICUCR_ILEVEL4	(0x0004)
+#define PLD_ICUCR_ILEVEL5	(0x0005)
+#define PLD_ICUCR_ILEVEL6	(0x0006)
+#define PLD_ICUCR_ILEVEL7	(0x0007)
+
+/* Power Control of MMC and CF */
+#define PLD_CPCR		__reg16(PLD_BASE + 0x14000)
+#define PLD_CPCR_CF		0x0001
+#define PLD_CPCR_MMC		0x0002
+
+/* LED Control
+ *
+ * 1: DIP swich side
+ * 2: Reset switch side
+ */
+#define PLD_IOLEDCR		__reg16(PLD_BASE + 0x14002)
+#define PLD_IOLED_1_ON		0x001
+#define PLD_IOLED_1_OFF		0x000
+#define PLD_IOLED_2_ON		0x002
+#define PLD_IOLED_2_OFF		0x000
+
+/* DIP Switch
+ *  0: Write-protect of Flash Memory (0:protected, 1:non-protected)
+ *  1: -
+ *  2: -
+ *  3: -
+ */
+#define PLD_IOSWSTS		__reg16(PLD_BASE + 0x14004)
+#define	PLD_IOSWSTS_IOSW2	0x0200
+#define	PLD_IOSWSTS_IOSW1	0x0100
+#define	PLD_IOSWSTS_IOWP0	0x0001
+
+/* CRC */
+#define PLD_CRC7DATA		__reg16(PLD_BASE + 0x18000)
+#define PLD_CRC7INDATA		__reg16(PLD_BASE + 0x18002)
+#define PLD_CRC16DATA		__reg16(PLD_BASE + 0x18004)
+#define PLD_CRC16INDATA		__reg16(PLD_BASE + 0x18006)
+#define PLD_CRC16ADATA		__reg16(PLD_BASE + 0x18008)
+#define PLD_CRC16AINDATA	__reg16(PLD_BASE + 0x1800a)
+
+/* RTC */
+#define PLD_RTCCR		__reg16(PLD_BASE + 0x1c000)
+#define PLD_RTCBAUR		__reg16(PLD_BASE + 0x1c002)
+#define PLD_RTCWRDATA		__reg16(PLD_BASE + 0x1c004)
+#define PLD_RTCRDDATA		__reg16(PLD_BASE + 0x1c006)
+#define PLD_RTCRSTODT		__reg16(PLD_BASE + 0x1c008)
+
+/* SIO0 */
+#define PLD_ESIO0CR		__reg16(PLD_BASE + 0x20000)
+#define	PLD_ESIO0CR_TXEN	0x0001
+#define	PLD_ESIO0CR_RXEN	0x0002
+#define PLD_ESIO0MOD0		__reg16(PLD_BASE + 0x20002)
+#define	PLD_ESIO0MOD0_CTSS	0x0040
+#define	PLD_ESIO0MOD0_RTSS	0x0080
+#define PLD_ESIO0MOD1		__reg16(PLD_BASE + 0x20004)
+#define	PLD_ESIO0MOD1_LMFS	0x0010
+#define PLD_ESIO0STS		__reg16(PLD_BASE + 0x20006)
+#define	PLD_ESIO0STS_TEMP	0x0001
+#define	PLD_ESIO0STS_TXCP	0x0002
+#define	PLD_ESIO0STS_RXCP	0x0004
+#define	PLD_ESIO0STS_TXSC	0x0100
+#define	PLD_ESIO0STS_RXSC	0x0200
+#define PLD_ESIO0STS_TXREADY	(PLD_ESIO0STS_TXCP | PLD_ESIO0STS_TEMP)
+#define PLD_ESIO0INTCR		__reg16(PLD_BASE + 0x20008)
+#define	PLD_ESIO0INTCR_TXIEN	0x0002
+#define	PLD_ESIO0INTCR_RXCEN	0x0004
+#define PLD_ESIO0BAUR		__reg16(PLD_BASE + 0x2000a)
+#define PLD_ESIO0TXB		__reg16(PLD_BASE + 0x2000c)
+#define PLD_ESIO0RXB		__reg16(PLD_BASE + 0x2000e)
+
+/* SIM Card */
+#define PLD_SCCR		__reg16(PLD_BASE + 0x38000)
+#define PLD_SCMOD		__reg16(PLD_BASE + 0x38004)
+#define PLD_SCSTS		__reg16(PLD_BASE + 0x38006)
+#define PLD_SCINTCR		__reg16(PLD_BASE + 0x38008)
+#define PLD_SCBAUR		__reg16(PLD_BASE + 0x3800a)
+#define PLD_SCTXB		__reg16(PLD_BASE + 0x3800c)
+#define PLD_SCRXB		__reg16(PLD_BASE + 0x3800e)
+
+#endif	/* _M32700UT_M32700UT_PLD.H */
diff --git a/include/asm-m32r/m32r.h b/include/asm-m32r/m32r.h
new file mode 100644
index 0000000..f116649
--- /dev/null
+++ b/include/asm-m32r/m32r.h
@@ -0,0 +1,134 @@
+#ifndef _ASM_M32R_M32R_H_
+#define _ASM_M32R_M32R_H_
+
+/*
+ * Renesas M32R processor
+ *
+ * Copyright (C) 2003, 2004  Renesas Technology Corp.
+ */
+
+#include <linux/config.h>
+
+/* Chip type */
+#if defined(CONFIG_CHIP_XNUX_MP) || defined(CONFIG_CHIP_XNUX2_MP)
+#include <asm/m32r_mp_fpga.h>
+#elif defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_XNUX2) \
+	|| defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \
+        || defined(CONFIG_CHIP_OPSP)
+#include <asm/m32102.h>
+#include <asm/m32102peri.h>
+#endif
+
+/* Platform type */
+#if defined(CONFIG_PLAT_M32700UT)
+#include <asm/m32700ut/m32700ut_pld.h>
+#include <asm/m32700ut/m32700ut_lan.h>
+#include <asm/m32700ut/m32700ut_lcd.h>
+#endif  /* CONFIG_PLAT_M32700UT */
+
+#if defined(CONFIG_PLAT_OPSPUT)
+#include <asm/opsput/opsput_pld.h>
+#include <asm/opsput/opsput_lan.h>
+#include <asm/opsput/opsput_lcd.h>
+#endif  /* CONFIG_PLAT_OPSPUT */
+
+#if defined(CONFIG_PLAT_MAPPI2)
+#include <asm/mappi2/mappi2_pld.h>
+#endif	/* CONFIG_PLAT_MAPPI2 */
+
+#if defined(CONFIG_PLAT_USRV)
+#include <asm/m32700ut/m32700ut_pld.h>
+#endif
+
+/*
+ * M32R Register
+ */
+
+/*
+ * MMU Register
+ */
+
+#define MMU_REG_BASE	(0xffff0000)
+#define ITLB_BASE	(0xfe000000)
+#define DTLB_BASE	(0xfe000800)
+
+#define NR_TLB_ENTRIES	CONFIG_TLB_ENTRIES
+
+#define MATM	MMU_REG_BASE		/* MMU Address Translation Mode
+					   Register */
+#define MPSZ	(0x04 + MMU_REG_BASE)	/* MMU Page Size Designation Register */
+#define MASID	(0x08 + MMU_REG_BASE)	/* MMU Address Space ID Register */
+#define MESTS	(0x0c + MMU_REG_BASE)	/* MMU Exception Status Register */
+#define MDEVA	(0x10 + MMU_REG_BASE)	/* MMU Operand Exception Virtual
+					   Address Register */
+#define MDEVP	(0x14 + MMU_REG_BASE)	/* MMU Operand Exception Virtual Page
+					   Number Register */
+#define MPTB	(0x18 + MMU_REG_BASE)	/* MMU Page Table Base Register */
+#define MSVA	(0x20 + MMU_REG_BASE)	/* MMU Search Virtual Address
+					   Register */
+#define MTOP	(0x24 + MMU_REG_BASE)	/* MMU TLB Operation Register */
+#define MIDXI	(0x28 + MMU_REG_BASE)	/* MMU Index Register for
+					   Instruciton */
+#define MIDXD	(0x2c + MMU_REG_BASE)	/* MMU Index Register for Operand */
+
+#define MATM_offset	(MATM - MMU_REG_BASE)
+#define MPSZ_offset	(MPSZ - MMU_REG_BASE)
+#define MASID_offset	(MASID - MMU_REG_BASE)
+#define MESTS_offset	(MESTS - MMU_REG_BASE)
+#define MDEVA_offset	(MDEVA - MMU_REG_BASE)
+#define MDEVP_offset	(MDEVP - MMU_REG_BASE)
+#define MPTB_offset	(MPTB - MMU_REG_BASE)
+#define MSVA_offset	(MSVA - MMU_REG_BASE)
+#define MTOP_offset	(MTOP - MMU_REG_BASE)
+#define MIDXI_offset	(MIDXI - MMU_REG_BASE)
+#define MIDXD_offset	(MIDXD - MMU_REG_BASE)
+
+#define MESTS_IT	(1 << 0)	/* Instruction TLB miss */
+#define MESTS_IA	(1 << 1)	/* Instruction Access Exception */
+#define MESTS_DT	(1 << 4)	/* Operand TLB miss */
+#define MESTS_DA	(1 << 5)	/* Operand Access Exception */
+#define MESTS_DRW	(1 << 6)	/* Operand Write Exception Flag */
+
+/*
+ * PSW (Processor Status Word)
+ */
+
+/* PSW bit */
+#define M32R_PSW_BIT_SM   (7)    /* Stack Mode */
+#define M32R_PSW_BIT_IE   (6)    /* Interrupt Enable */
+#define M32R_PSW_BIT_PM   (3)    /* Processor Mode [0:Supervisor,1:User] */
+#define M32R_PSW_BIT_C    (0)    /* Condition */
+#define M32R_PSW_BIT_BSM  (7+8)  /* Backup Stack Mode */
+#define M32R_PSW_BIT_BIE  (6+8)  /* Backup Interrupt Enable */
+#define M32R_PSW_BIT_BPM  (3+8)  /* Backup Processor Mode */
+#define M32R_PSW_BIT_BC   (0+8)  /* Backup Condition */
+
+/* PSW bit map */
+#define M32R_PSW_SM   (1UL<< M32R_PSW_BIT_SM)   /* Stack Mode */
+#define M32R_PSW_IE   (1UL<< M32R_PSW_BIT_IE)   /* Interrupt Enable */
+#define M32R_PSW_PM   (1UL<< M32R_PSW_BIT_PM)   /* Processor Mode */
+#define M32R_PSW_C    (1UL<< M32R_PSW_BIT_C)    /* Condition */
+#define M32R_PSW_BSM  (1UL<< M32R_PSW_BIT_BSM)  /* Backup Stack Mode */
+#define M32R_PSW_BIE  (1UL<< M32R_PSW_BIT_BIE)  /* Backup Interrupt Enable */
+#define M32R_PSW_BPM  (1UL<< M32R_PSW_BIT_BPM)  /* Backup Processor Mode */
+#define M32R_PSW_BC   (1UL<< M32R_PSW_BIT_BC)   /* Backup Condition */
+
+/*
+ * Direct address to SFR
+ */
+
+#include <asm/page.h>
+#ifdef CONFIG_MMU
+#define NONCACHE_OFFSET  __PAGE_OFFSET+0x20000000
+#else
+#define NONCACHE_OFFSET  __PAGE_OFFSET
+#endif /* CONFIG_MMU */
+
+#define M32R_ICU_ISTS_ADDR  M32R_ICU_ISTS_PORTL+NONCACHE_OFFSET
+#define M32R_ICU_IPICR_ADDR  M32R_ICU_IPICR0_PORTL+NONCACHE_OFFSET
+#define M32R_ICU_IMASK_ADDR  M32R_ICU_IMASK_PORTL+NONCACHE_OFFSET
+#define M32R_FPGA_CPU_NAME_ADDR  M32R_FPGA_CPU_NAME0_PORTL+NONCACHE_OFFSET
+#define M32R_FPGA_MODEL_ID_ADDR  M32R_FPGA_MODEL_ID0_PORTL+NONCACHE_OFFSET
+#define M32R_FPGA_VERSION_ADDR   M32R_FPGA_VERSION0_PORTL+NONCACHE_OFFSET
+
+#endif /* _ASM_M32R_M32R_H_ */
diff --git a/include/asm-m32r/m32r_mp_fpga.h b/include/asm-m32r/m32r_mp_fpga.h
new file mode 100644
index 0000000..976d2b9
--- /dev/null
+++ b/include/asm-m32r/m32r_mp_fpga.h
@@ -0,0 +1,313 @@
+#ifndef _ASM_M32R_M32R_MP_FPGA_
+#define _ASM_M32R_M32R_MP_FPGA_
+
+/*
+ * Renesas M32R-MP-FPGA
+ *
+ * Copyright (c) 2002  Hitoshi Yamamoto
+ * Copyright (c) 2003, 2004  Renesas Technology Corp.
+ */
+
+/*
+ * ========================================================
+ * M32R-MP-FPGA Memory Map
+ * ========================================================
+ * 0x00000000 : Block#0 : 64[MB]
+ *              0x03E00000 : SFR
+ *                           0x03E00000 : reserved
+ *                           0x03EF0000 : FPGA
+ *                           0x03EF1000 : reserved
+ *                           0x03EF4000 : CKM
+ *                           0x03EF4000 : BSELC
+ *                           0x03EF5000 : reserved
+ *                           0x03EFC000 : MFT
+ *                           0x03EFD000 : SIO
+ *                           0x03EFE000 : reserved
+ *                           0x03EFF000 : ICU
+ *              0x03F00000 : Internal SRAM 64[KB]
+ *              0x03F10000 : reserved
+ * --------------------------------------------------------
+ * 0x04000000 : Block#1 : 64[MB]
+ *              0x04000000 : Debug board SRAM 4[MB]
+ *              0x04400000 : reserved
+ * --------------------------------------------------------
+ * 0x08000000 : Block#2 : 64[MB]
+ * --------------------------------------------------------
+ * 0x0C000000 : Block#3 : 64[MB]
+ * --------------------------------------------------------
+ * 0x10000000 : Block#4 : 64[MB]
+ * --------------------------------------------------------
+ * 0x14000000 : Block#5 : 64[MB]
+ * --------------------------------------------------------
+ * 0x18000000 : Block#6 : 64[MB]
+ * --------------------------------------------------------
+ * 0x1C000000 : Block#7 : 64[MB]
+ * --------------------------------------------------------
+ * 0xFE000000 : TLB
+ *              0xFE000000 : ITLB
+ *              0xFE000080 : reserved
+ *              0xFE000800 : DTLB
+ *              0xFE000880 : reserved
+ * --------------------------------------------------------
+ * 0xFF000000 : System area
+ *              0xFFFF0000 : MMU
+ *              0xFFFF0030 : reserved
+ *              0xFFFF8000 : Debug function
+ *              0xFFFFA000 : reserved
+ *              0xFFFFC000 : CPU control
+ * 0xFFFFFFFF
+ * ========================================================
+ */
+
+/*======================================================================*
+ * Special Function Register
+ *======================================================================*/
+#define M32R_SFR_OFFSET  (0x00E00000)  /* 0x03E00000-0x03EFFFFF 1[MB] */
+
+/*
+ * FPGA registers.
+ */
+#define M32R_FPGA_TOP  (0x000F0000+M32R_SFR_OFFSET)
+
+#define M32R_FPGA_NUM_OF_CPUS_PORTL  (0x00+M32R_FPGA_TOP)
+#define M32R_FPGA_CPU_NAME0_PORTL    (0x10+M32R_FPGA_TOP)
+#define M32R_FPGA_CPU_NAME1_PORTL    (0x14+M32R_FPGA_TOP)
+#define M32R_FPGA_CPU_NAME2_PORTL    (0x18+M32R_FPGA_TOP)
+#define M32R_FPGA_CPU_NAME3_PORTL    (0x1C+M32R_FPGA_TOP)
+#define M32R_FPGA_MODEL_ID0_PORTL    (0x20+M32R_FPGA_TOP)
+#define M32R_FPGA_MODEL_ID1_PORTL    (0x24+M32R_FPGA_TOP)
+#define M32R_FPGA_MODEL_ID2_PORTL    (0x28+M32R_FPGA_TOP)
+#define M32R_FPGA_MODEL_ID3_PORTL    (0x2C+M32R_FPGA_TOP)
+#define M32R_FPGA_VERSION0_PORTL     (0x30+M32R_FPGA_TOP)
+#define M32R_FPGA_VERSION1_PORTL     (0x34+M32R_FPGA_TOP)
+
+/*
+ * Clock and Power Manager registers.
+ */
+#define M32R_CPM_OFFSET  (0x000F4000+M32R_SFR_OFFSET)
+
+#define M32R_CPM_CPUCLKCR_PORTL  (0x00+M32R_CPM_OFFSET)
+#define M32R_CPM_CLKMOD_PORTL    (0x04+M32R_CPM_OFFSET)
+#define M32R_CPM_PLLCR_PORTL     (0x08+M32R_CPM_OFFSET)
+
+/*
+ * Block SELect Controller registers.
+ */
+#define M32R_BSELC_OFFSET  (0x000F5000+M32R_SFR_OFFSET)
+
+#define M32R_BSEL0_CR0_PORTL  (0x000+M32R_BSELC_OFFSET)
+#define M32R_BSEL0_CR1_PORTL  (0x004+M32R_BSELC_OFFSET)
+#define M32R_BSEL1_CR0_PORTL  (0x100+M32R_BSELC_OFFSET)
+#define M32R_BSEL1_CR1_PORTL  (0x104+M32R_BSELC_OFFSET)
+#define M32R_BSEL2_CR0_PORTL  (0x200+M32R_BSELC_OFFSET)
+#define M32R_BSEL2_CR1_PORTL  (0x204+M32R_BSELC_OFFSET)
+#define M32R_BSEL3_CR0_PORTL  (0x300+M32R_BSELC_OFFSET)
+#define M32R_BSEL3_CR1_PORTL  (0x304+M32R_BSELC_OFFSET)
+#define M32R_BSEL4_CR0_PORTL  (0x400+M32R_BSELC_OFFSET)
+#define M32R_BSEL4_CR1_PORTL  (0x404+M32R_BSELC_OFFSET)
+#define M32R_BSEL5_CR0_PORTL  (0x500+M32R_BSELC_OFFSET)
+#define M32R_BSEL5_CR1_PORTL  (0x504+M32R_BSELC_OFFSET)
+#define M32R_BSEL6_CR0_PORTL  (0x600+M32R_BSELC_OFFSET)
+#define M32R_BSEL6_CR1_PORTL  (0x604+M32R_BSELC_OFFSET)
+#define M32R_BSEL7_CR0_PORTL  (0x700+M32R_BSELC_OFFSET)
+#define M32R_BSEL7_CR1_PORTL  (0x704+M32R_BSELC_OFFSET)
+
+/*
+ * Multi Function Timer registers.
+ */
+#define M32R_MFT_OFFSET        (0x000FC000+M32R_SFR_OFFSET)
+
+#define M32R_MFTCR_PORTL       (0x000+M32R_MFT_OFFSET)  /* MFT control */
+#define M32R_MFTRPR_PORTL      (0x004+M32R_MFT_OFFSET)  /* MFT real port */
+
+#define M32R_MFT0_OFFSET       (0x100+M32R_MFT_OFFSET)
+#define M32R_MFT0MOD_PORTL     (0x00+M32R_MFT0_OFFSET)  /* MFT0 mode */
+#define M32R_MFT0BOS_PORTL     (0x04+M32R_MFT0_OFFSET)  /* MFT0 b-port output status */
+#define M32R_MFT0CUT_PORTL     (0x08+M32R_MFT0_OFFSET)  /* MFT0 count */
+#define M32R_MFT0RLD_PORTL     (0x0C+M32R_MFT0_OFFSET)  /* MFT0 reload */
+#define M32R_MFT0CMPRLD_PORTL  (0x10+M32R_MFT0_OFFSET)  /* MFT0 compare reload */
+
+#define M32R_MFT1_OFFSET       (0x200+M32R_MFT_OFFSET)
+#define M32R_MFT1MOD_PORTL     (0x00+M32R_MFT1_OFFSET)  /* MFT1 mode */
+#define M32R_MFT1BOS_PORTL     (0x04+M32R_MFT1_OFFSET)  /* MFT1 b-port output status */
+#define M32R_MFT1CUT_PORTL     (0x08+M32R_MFT1_OFFSET)  /* MFT1 count */
+#define M32R_MFT1RLD_PORTL     (0x0C+M32R_MFT1_OFFSET)  /* MFT1 reload */
+#define M32R_MFT1CMPRLD_PORTL  (0x10+M32R_MFT1_OFFSET)  /* MFT1 compare reload */
+
+#define M32R_MFT2_OFFSET       (0x300+M32R_MFT_OFFSET)
+#define M32R_MFT2MOD_PORTL     (0x00+M32R_MFT2_OFFSET)  /* MFT2 mode */
+#define M32R_MFT2BOS_PORTL     (0x04+M32R_MFT2_OFFSET)  /* MFT2 b-port output status */
+#define M32R_MFT2CUT_PORTL     (0x08+M32R_MFT2_OFFSET)  /* MFT2 count */
+#define M32R_MFT2RLD_PORTL     (0x0C+M32R_MFT2_OFFSET)  /* MFT2 reload */
+#define M32R_MFT2CMPRLD_PORTL  (0x10+M32R_MFT2_OFFSET)  /* MFT2 compare reload */
+
+#define M32R_MFT3_OFFSET       (0x400+M32R_MFT_OFFSET)
+#define M32R_MFT3MOD_PORTL     (0x00+M32R_MFT3_OFFSET)  /* MFT3 mode */
+#define M32R_MFT3BOS_PORTL     (0x04+M32R_MFT3_OFFSET)  /* MFT3 b-port output status */
+#define M32R_MFT3CUT_PORTL     (0x08+M32R_MFT3_OFFSET)  /* MFT3 count */
+#define M32R_MFT3RLD_PORTL     (0x0C+M32R_MFT3_OFFSET)  /* MFT3 reload */
+#define M32R_MFT3CMPRLD_PORTL  (0x10+M32R_MFT3_OFFSET)  /* MFT3 compare reload */
+
+#define M32R_MFT4_OFFSET       (0x500+M32R_MFT_OFFSET)
+#define M32R_MFT4MOD_PORTL     (0x00+M32R_MFT4_OFFSET)  /* MFT4 mode */
+#define M32R_MFT4BOS_PORTL     (0x04+M32R_MFT4_OFFSET)  /* MFT4 b-port output status */
+#define M32R_MFT4CUT_PORTL     (0x08+M32R_MFT4_OFFSET)  /* MFT4 count */
+#define M32R_MFT4RLD_PORTL     (0x0C+M32R_MFT4_OFFSET)  /* MFT4 reload */
+#define M32R_MFT4CMPRLD_PORTL  (0x10+M32R_MFT4_OFFSET)  /* MFT4 compare reload */
+
+#define M32R_MFT5_OFFSET       (0x600+M32R_MFT_OFFSET)
+#define M32R_MFT5MOD_PORTL     (0x00+M32R_MFT5_OFFSET)  /* MFT4 mode */
+#define M32R_MFT5BOS_PORTL     (0x04+M32R_MFT5_OFFSET)  /* MFT4 b-port output status */
+#define M32R_MFT5CUT_PORTL     (0x08+M32R_MFT5_OFFSET)  /* MFT4 count */
+#define M32R_MFT5RLD_PORTL     (0x0C+M32R_MFT5_OFFSET)  /* MFT4 reload */
+#define M32R_MFT5CMPRLD_PORTL  (0x10+M32R_MFT5_OFFSET)  /* MFT4 compare reload */
+
+#define M32R_MFTCR_MFT0MSK  (1UL<<15)  /* b16 */
+#define M32R_MFTCR_MFT1MSK  (1UL<<14)  /* b17 */
+#define M32R_MFTCR_MFT2MSK  (1UL<<13)  /* b18 */
+#define M32R_MFTCR_MFT3MSK  (1UL<<12)  /* b19 */
+#define M32R_MFTCR_MFT4MSK  (1UL<<11)  /* b20 */
+#define M32R_MFTCR_MFT5MSK  (1UL<<10)  /* b21 */
+#define M32R_MFTCR_MFT0EN   (1UL<<7)   /* b24 */
+#define M32R_MFTCR_MFT1EN   (1UL<<6)   /* b25 */
+#define M32R_MFTCR_MFT2EN   (1UL<<5)   /* b26 */
+#define M32R_MFTCR_MFT3EN   (1UL<<4)   /* b27 */
+#define M32R_MFTCR_MFT4EN   (1UL<<3)   /* b28 */
+#define M32R_MFTCR_MFT5EN   (1UL<<2)   /* b29 */
+
+#define M32R_MFTMOD_CC_MASK    (1UL<<15)  /* b16 */
+#define M32R_MFTMOD_TCCR       (1UL<<13)  /* b18 */
+#define M32R_MFTMOD_GTSEL000   (0UL<<8)   /* b21-23 : 000 */
+#define M32R_MFTMOD_GTSEL001   (1UL<<8)   /* b21-23 : 001 */
+#define M32R_MFTMOD_GTSEL010   (2UL<<8)   /* b21-23 : 010 */
+#define M32R_MFTMOD_GTSEL011   (3UL<<8)   /* b21-23 : 011 */
+#define M32R_MFTMOD_GTSEL110   (6UL<<8)   /* b21-23 : 110 */
+#define M32R_MFTMOD_GTSEL111   (7UL<<8)   /* b21-23 : 111 */
+#define M32R_MFTMOD_CMSEL      (1UL<<3)   /* b28 */
+#define M32R_MFTMOD_CSSEL000   (0UL<<0)   /* b29-b31 : 000 */
+#define M32R_MFTMOD_CSSEL001   (1UL<<0)   /* b29-b31 : 001 */
+#define M32R_MFTMOD_CSSEL010   (2UL<<0)   /* b29-b31 : 010 */
+#define M32R_MFTMOD_CSSEL011   (3UL<<0)   /* b29-b31 : 011 */
+#define M32R_MFTMOD_CSSEL100   (4UL<<0)   /* b29-b31 : 100 */
+#define M32R_MFTMOD_CSSEL110   (6UL<<0)   /* b29-b31 : 110 */
+
+/*
+ * Serial I/O registers.
+ */
+#define M32R_SIO_OFFSET  (0x000FD000+M32R_SFR_OFFSET)
+
+#define M32R_SIO0_CR_PORTL     (0x000+M32R_SIO_OFFSET)
+#define M32R_SIO0_MOD0_PORTL   (0x004+M32R_SIO_OFFSET)
+#define M32R_SIO0_MOD1_PORTL   (0x008+M32R_SIO_OFFSET)
+#define M32R_SIO0_STS_PORTL    (0x00C+M32R_SIO_OFFSET)
+#define M32R_SIO0_TRCR_PORTL   (0x010+M32R_SIO_OFFSET)
+#define M32R_SIO0_BAUR_PORTL   (0x014+M32R_SIO_OFFSET)
+#define M32R_SIO0_RBAUR_PORTL  (0x018+M32R_SIO_OFFSET)
+#define M32R_SIO0_TXB_PORTL    (0x01C+M32R_SIO_OFFSET)
+#define M32R_SIO0_RXB_PORTL    (0x020+M32R_SIO_OFFSET)
+
+/*
+ * Interrupt Control Unit registers.
+ */
+#define M32R_ICU_OFFSET  (0x000FF000+M32R_SFR_OFFSET)
+
+#define M32R_ICU_ISTS_PORTL     (0x004+M32R_ICU_OFFSET)
+#define M32R_ICU_IREQ0_PORTL    (0x008+M32R_ICU_OFFSET)
+#define M32R_ICU_IREQ1_PORTL    (0x00C+M32R_ICU_OFFSET)
+#define M32R_ICU_SBICR_PORTL    (0x018+M32R_ICU_OFFSET)
+#define M32R_ICU_IMASK_PORTL    (0x01C+M32R_ICU_OFFSET)
+#define M32R_ICU_CR1_PORTL      (0x200+M32R_ICU_OFFSET)  /* INT0 */
+#define M32R_ICU_CR2_PORTL      (0x204+M32R_ICU_OFFSET)  /* INT1 */
+#define M32R_ICU_CR3_PORTL      (0x208+M32R_ICU_OFFSET)  /* INT2 */
+#define M32R_ICU_CR4_PORTL      (0x20C+M32R_ICU_OFFSET)  /* INT3 */
+#define M32R_ICU_CR5_PORTL      (0x210+M32R_ICU_OFFSET)  /* INT4 */
+#define M32R_ICU_CR6_PORTL      (0x214+M32R_ICU_OFFSET)  /* INT5 */
+#define M32R_ICU_CR7_PORTL      (0x218+M32R_ICU_OFFSET)  /* INT6 */
+#define M32R_ICU_CR8_PORTL      (0x218+M32R_ICU_OFFSET)  /* INT7 */
+#define M32R_ICU_CR32_PORTL     (0x27C+M32R_ICU_OFFSET)  /* SIO0 RX */
+#define M32R_ICU_CR33_PORTL     (0x280+M32R_ICU_OFFSET)  /* SIO0 TX */
+#define M32R_ICU_CR40_PORTL     (0x29C+M32R_ICU_OFFSET)  /* DMAC0 */
+#define M32R_ICU_CR41_PORTL     (0x2A0+M32R_ICU_OFFSET)  /* DMAC1 */
+#define M32R_ICU_CR48_PORTL     (0x2BC+M32R_ICU_OFFSET)  /* MFT0 */
+#define M32R_ICU_CR49_PORTL     (0x2C0+M32R_ICU_OFFSET)  /* MFT1 */
+#define M32R_ICU_CR50_PORTL     (0x2C4+M32R_ICU_OFFSET)  /* MFT2 */
+#define M32R_ICU_CR51_PORTL     (0x2C8+M32R_ICU_OFFSET)  /* MFT3 */
+#define M32R_ICU_CR52_PORTL     (0x2CC+M32R_ICU_OFFSET)  /* MFT4 */
+#define M32R_ICU_CR53_PORTL     (0x2D0+M32R_ICU_OFFSET)  /* MFT5 */
+#define M32R_ICU_IPICR0_PORTL   (0x2DC+M32R_ICU_OFFSET)  /* IPI0 */
+#define M32R_ICU_IPICR1_PORTL   (0x2E0+M32R_ICU_OFFSET)  /* IPI1 */
+#define M32R_ICU_IPICR2_PORTL   (0x2E4+M32R_ICU_OFFSET)  /* IPI2 */
+#define M32R_ICU_IPICR3_PORTL   (0x2E8+M32R_ICU_OFFSET)  /* IPI3 */
+#define M32R_ICU_IPICR4_PORTL   (0x2EC+M32R_ICU_OFFSET)  /* IPI4 */
+#define M32R_ICU_IPICR5_PORTL   (0x2F0+M32R_ICU_OFFSET)  /* IPI5 */
+#define M32R_ICU_IPICR6_PORTL   (0x2F4+M32R_ICU_OFFSET)  /* IPI6 */
+#define M32R_ICU_IPICR7_PORTL   (0x2FC+M32R_ICU_OFFSET)  /* IPI7 */
+
+#define M32R_ICUISTS_VECB(val)  ((val>>28) & 0xF)
+#define M32R_ICUISTS_ISN(val)   ((val>>22) & 0x3F)
+#define M32R_ICUISTS_PIML(val)  ((val>>16) & 0x7)
+
+#define M32R_ICUIMASK_IMSK0  (0UL<<16)  /* b13-b15: Disable interrupt */
+#define M32R_ICUIMASK_IMSK1  (1UL<<16)  /* b13-b15: Enable level 0 interrupt */
+#define M32R_ICUIMASK_IMSK2  (2UL<<16)  /* b13-b15: Enable level 0,1 interrupt */
+#define M32R_ICUIMASK_IMSK3  (3UL<<16)  /* b13-b15: Enable level 0-2 interrupt */
+#define M32R_ICUIMASK_IMSK4  (4UL<<16)  /* b13-b15: Enable level 0-3 interrupt */
+#define M32R_ICUIMASK_IMSK5  (5UL<<16)  /* b13-b15: Enable level 0-4 interrupt */
+#define M32R_ICUIMASK_IMSK6  (6UL<<16)  /* b13-b15: Enable level 0-5 interrupt */
+#define M32R_ICUIMASK_IMSK7  (7UL<<16)  /* b13-b15: Enable level 0-6 interrupt */
+
+#define M32R_ICUCR_IEN      (1UL<<12)  /* b19: Interrupt enable */
+#define M32R_ICUCR_IRQ      (1UL<<8)   /* b23: Interrupt request */
+#define M32R_ICUCR_ISMOD00  (0UL<<4)   /* b26-b27: Interrupt sense mode Edge HtoL */
+#define M32R_ICUCR_ISMOD01  (1UL<<4)   /* b26-b27: Interrupt sense mode Level L */
+#define M32R_ICUCR_ISMOD10  (2UL<<4)   /* b26-b27: Interrupt sense mode Edge LtoH*/
+#define M32R_ICUCR_ISMOD11  (3UL<<4)   /* b26-b27: Interrupt sense mode Level H */
+#define M32R_ICUCR_ILEVEL0  (0UL<<0)   /* b29-b31: Interrupt priority level 0 */
+#define M32R_ICUCR_ILEVEL1  (1UL<<0)   /* b29-b31: Interrupt priority level 1 */
+#define M32R_ICUCR_ILEVEL2  (2UL<<0)   /* b29-b31: Interrupt priority level 2 */
+#define M32R_ICUCR_ILEVEL3  (3UL<<0)   /* b29-b31: Interrupt priority level 3 */
+#define M32R_ICUCR_ILEVEL4  (4UL<<0)   /* b29-b31: Interrupt priority level 4 */
+#define M32R_ICUCR_ILEVEL5  (5UL<<0)   /* b29-b31: Interrupt priority level 5 */
+#define M32R_ICUCR_ILEVEL6  (6UL<<0)   /* b29-b31: Interrupt priority level 6 */
+#define M32R_ICUCR_ILEVEL7  (7UL<<0)   /* b29-b31: Disable interrupt */
+#define M32R_ICUCR_ILEVEL_MASK  (7UL)
+
+#define M32R_IRQ_INT0    (1)   /* INT0 */
+#define M32R_IRQ_INT1    (2)   /* INT1 */
+#define M32R_IRQ_INT2    (3)   /* INT2 */
+#define M32R_IRQ_INT3    (4)   /* INT3 */
+#define M32R_IRQ_INT4    (5)   /* INT4 */
+#define M32R_IRQ_INT5    (6)   /* INT5 */
+#define M32R_IRQ_INT6    (7)   /* INT6 */
+#define M32R_IRQ_INT7    (8)   /* INT7 */
+#define M32R_IRQ_MFT0    (16)  /* MFT0 */
+#define M32R_IRQ_MFT1    (17)  /* MFT1 */
+#define M32R_IRQ_MFT2    (18)  /* MFT2 */
+#define M32R_IRQ_MFT3    (19)  /* MFT3 */
+#define M32R_IRQ_MFT4    (20)  /* MFT4 */
+#define M32R_IRQ_MFT5    (21)  /* MFT5 */
+#define M32R_IRQ_DMAC0   (32)  /* DMAC0 */
+#define M32R_IRQ_DMAC1   (33)  /* DMAC1 */
+#define M32R_IRQ_SIO0_R  (48)  /* SIO0 receive */
+#define M32R_IRQ_SIO0_S  (49)  /* SIO0 send    */
+#define M32R_IRQ_SIO1_R  (50)  /* SIO1 send    */
+#define M32R_IRQ_SIO1_S  (51)  /* SIO1 receive */
+#define M32R_IRQ_IPI0    (56)  /* IPI0 */
+#define M32R_IRQ_IPI1    (57)  /* IPI1 */
+#define M32R_IRQ_IPI2    (58)  /* IPI2 */
+#define M32R_IRQ_IPI3    (59)  /* IPI3 */
+#define M32R_IRQ_IPI4    (60)  /* IPI4 */
+#define M32R_IRQ_IPI5    (61)  /* IPI5 */
+#define M32R_IRQ_IPI6    (62)  /* IPI6 */
+#define M32R_IRQ_IPI7    (63)  /* IPI7 */
+
+/*======================================================================*
+ * CPU
+ *======================================================================*/
+
+#define M32R_CPUID_PORTL   (0xFFFFFFE0)
+#define M32R_MCICAR_PORTL  (0xFFFFFFF0)
+#define M32R_MCDCAR_PORTL  (0xFFFFFFF4)
+#define M32R_MCCR_PORTL    (0xFFFFFFFC)
+
+#endif  /* _ASM_M32R_M32R_MP_FPGA_ */
diff --git a/include/asm-m32r/mappi2/mappi2_pld.h b/include/asm-m32r/mappi2/mappi2_pld.h
new file mode 100644
index 0000000..01dcdd1
--- /dev/null
+++ b/include/asm-m32r/mappi2/mappi2_pld.h
@@ -0,0 +1,151 @@
+/*
+ * include/asm/mappi2/mappi2_pld.h
+ *
+ * Definitions for Extended IO Logic on MAPPI2 board.
+ *  based on m32700ut_pld.h by
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ */
+
+#ifndef _MAPPI2_PLD_H
+#define _MAPPI2_PLD_H
+
+#ifndef __ASSEMBLY__
+/* FIXME:
+ * Some C functions use non-cache address, so can't define non-cache address.
+ */
+#define PLD_BASE		(0x10c00000 /* + NONCACHE_OFFSET */)
+#define __reg8			(volatile unsigned char *)
+#define __reg16			(volatile unsigned short *)
+#define __reg32			(volatile unsigned int *)
+#else
+#define PLD_BASE		(0x10c00000 + NONCACHE_OFFSET)
+#define __reg8
+#define __reg16
+#define __reg32
+#endif	/* __ASSEMBLY__ */
+
+/* CFC */
+#define	PLD_CFRSTCR		__reg16(PLD_BASE + 0x0000)
+#define PLD_CFSTS		__reg16(PLD_BASE + 0x0002)
+#define PLD_CFIMASK		__reg16(PLD_BASE + 0x0004)
+#define PLD_CFBUFCR		__reg16(PLD_BASE + 0x0006)
+#define PLD_CFCR0		__reg16(PLD_BASE + 0x000a)
+#define PLD_CFCR1		__reg16(PLD_BASE + 0x000c)
+
+/* MMC */
+#define PLD_MMCCR		__reg16(PLD_BASE + 0x4000)
+#define PLD_MMCMOD		__reg16(PLD_BASE + 0x4002)
+#define PLD_MMCSTS		__reg16(PLD_BASE + 0x4006)
+#define PLD_MMCBAUR		__reg16(PLD_BASE + 0x400a)
+#define PLD_MMCCMDBCUT		__reg16(PLD_BASE + 0x400c)
+#define PLD_MMCCDTBCUT		__reg16(PLD_BASE + 0x400e)
+#define PLD_MMCDET		__reg16(PLD_BASE + 0x4010)
+#define PLD_MMCWP		__reg16(PLD_BASE + 0x4012)
+#define PLD_MMCWDATA		__reg16(PLD_BASE + 0x5000)
+#define PLD_MMCRDATA		__reg16(PLD_BASE + 0x6000)
+#define PLD_MMCCMDDATA		__reg16(PLD_BASE + 0x7000)
+#define PLD_MMCRSPDATA		__reg16(PLD_BASE + 0x7006)
+
+/* Power Control of MMC and CF */
+#define PLD_CPCR		__reg16(PLD_BASE + 0x14000)
+
+
+/*==== ICU ====*/
+#define  M32R_IRQ_PC104        (5)   /* INT4(PC/104) */
+#define  M32R_IRQ_I2C          (28)  /* I2C-BUS     */
+#if 1
+#define  PLD_IRQ_CFIREQ       (40)  /* CFC Card Interrupt */
+#define  PLD_IRQ_CFC_INSERT   (41)  /* CFC Card Insert */
+#define  PLD_IRQ_CFC_EJECT    (42)  /* CFC Card Eject */
+#define  PLD_IRQ_MMCCARD      (43)  /* MMC Card Insert */
+#define  PLD_IRQ_MMCIRQ       (44)  /* MMC Transfer Done */
+#else
+#define  PLD_IRQ_CFIREQ       (34)  /* CFC Card Interrupt */
+#define  PLD_IRQ_CFC_INSERT   (35)  /* CFC Card Insert */
+#define  PLD_IRQ_CFC_EJECT    (36)  /* CFC Card Eject */
+#define  PLD_IRQ_MMCCARD      (37)  /* MMC Card Insert */
+#define  PLD_IRQ_MMCIRQ       (38)  /* MMC Transfer Done */
+#endif
+
+
+#if 0
+/* LED Control
+ *
+ * 1: DIP swich side
+ * 2: Reset switch side
+ */
+#define PLD_IOLEDCR		__reg16(PLD_BASE + 0x14002)
+#define PLD_IOLED_1_ON		0x001
+#define PLD_IOLED_1_OFF		0x000
+#define PLD_IOLED_2_ON		0x002
+#define PLD_IOLED_2_OFF		0x000
+
+/* DIP Switch
+ *  0: Write-protect of Flash Memory (0:protected, 1:non-protected)
+ *  1: -
+ *  2: -
+ *  3: -
+ */
+#define PLD_IOSWSTS		__reg16(PLD_BASE + 0x14004)
+#define	PLD_IOSWSTS_IOSW2	0x0200
+#define	PLD_IOSWSTS_IOSW1	0x0100
+#define	PLD_IOSWSTS_IOWP0	0x0001
+
+#endif
+
+/* CRC */
+#define PLD_CRC7DATA		__reg16(PLD_BASE + 0x18000)
+#define PLD_CRC7INDATA		__reg16(PLD_BASE + 0x18002)
+#define PLD_CRC16DATA		__reg16(PLD_BASE + 0x18004)
+#define PLD_CRC16INDATA		__reg16(PLD_BASE + 0x18006)
+#define PLD_CRC16ADATA		__reg16(PLD_BASE + 0x18008)
+#define PLD_CRC16AINDATA	__reg16(PLD_BASE + 0x1800a)
+
+
+#if 0
+/* RTC */
+#define PLD_RTCCR		__reg16(PLD_BASE + 0x1c000)
+#define PLD_RTCBAUR		__reg16(PLD_BASE + 0x1c002)
+#define PLD_RTCWRDATA		__reg16(PLD_BASE + 0x1c004)
+#define PLD_RTCRDDATA		__reg16(PLD_BASE + 0x1c006)
+#define PLD_RTCRSTODT		__reg16(PLD_BASE + 0x1c008)
+
+/* SIO0 */
+#define PLD_ESIO0CR		__reg16(PLD_BASE + 0x20000)
+#define	PLD_ESIO0CR_TXEN	0x0001
+#define	PLD_ESIO0CR_RXEN	0x0002
+#define PLD_ESIO0MOD0		__reg16(PLD_BASE + 0x20002)
+#define	PLD_ESIO0MOD0_CTSS	0x0040
+#define	PLD_ESIO0MOD0_RTSS	0x0080
+#define PLD_ESIO0MOD1		__reg16(PLD_BASE + 0x20004)
+#define	PLD_ESIO0MOD1_LMFS	0x0010
+#define PLD_ESIO0STS		__reg16(PLD_BASE + 0x20006)
+#define	PLD_ESIO0STS_TEMP	0x0001
+#define	PLD_ESIO0STS_TXCP	0x0002
+#define	PLD_ESIO0STS_RXCP	0x0004
+#define	PLD_ESIO0STS_TXSC	0x0100
+#define	PLD_ESIO0STS_RXSC	0x0200
+#define PLD_ESIO0STS_TXREADY	(PLD_ESIO0STS_TXCP | PLD_ESIO0STS_TEMP)
+#define PLD_ESIO0INTCR		__reg16(PLD_BASE + 0x20008)
+#define	PLD_ESIO0INTCR_TXIEN	0x0002
+#define	PLD_ESIO0INTCR_RXCEN	0x0004
+#define PLD_ESIO0BAUR		__reg16(PLD_BASE + 0x2000a)
+#define PLD_ESIO0TXB		__reg16(PLD_BASE + 0x2000c)
+#define PLD_ESIO0RXB		__reg16(PLD_BASE + 0x2000e)
+
+/* SIM Card */
+#define PLD_SCCR		__reg16(PLD_BASE + 0x38000)
+#define PLD_SCMOD		__reg16(PLD_BASE + 0x38004)
+#define PLD_SCSTS		__reg16(PLD_BASE + 0x38006)
+#define PLD_SCINTCR		__reg16(PLD_BASE + 0x38008)
+#define PLD_SCBAUR		__reg16(PLD_BASE + 0x3800a)
+#define PLD_SCTXB		__reg16(PLD_BASE + 0x3800c)
+#define PLD_SCRXB		__reg16(PLD_BASE + 0x3800e)
+
+#endif
+
+#endif	/* _MAPPI2_PLD.H */
diff --git a/include/asm-m32r/mc146818rtc.h b/include/asm-m32r/mc146818rtc.h
new file mode 100644
index 0000000..755601d
--- /dev/null
+++ b/include/asm-m32r/mc146818rtc.h
@@ -0,0 +1,32 @@
+/*
+ * Machine dependent access functions for RTC registers.
+ */
+#ifndef _ASM_MC146818RTC_H
+#define _ASM_MC146818RTC_H
+
+#include <asm/io.h>
+
+#ifndef RTC_PORT
+// #define RTC_PORT(x)	(0x70 + (x))
+#define RTC_PORT(x)	((x))
+#define RTC_ALWAYS_BCD	1	/* RTC operates in binary mode */
+#endif
+
+/*
+ * The yet supported machines all access the RTC index register via
+ * an ISA port access but the way to access the date register differs ...
+ */
+#define CMOS_READ(addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+inb_p(RTC_PORT(1)); \
+})
+#define CMOS_WRITE(val, addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+outb_p((val),RTC_PORT(1)); \
+})
+
+#define RTC_IRQ 8
+#if 0
+#endif
+
+#endif /* _ASM_MC146818RTC_H */
diff --git a/include/asm-m32r/mman.h b/include/asm-m32r/mman.h
new file mode 100644
index 0000000..011f6d9
--- /dev/null
+++ b/include/asm-m32r/mman.h
@@ -0,0 +1,45 @@
+#ifndef __M32R_MMAN_H__
+#define __M32R_MMAN_H__
+
+/* orig : i386 2.6.0-test6 */
+
+#define PROT_READ	0x1		/* page can be read */
+#define PROT_WRITE	0x2		/* page can be written */
+#define PROT_EXEC	0x4		/* page can be executed */
+#define PROT_SEM	0x8		/* page may be used for atomic ops */
+#define PROT_NONE	0x0		/* page can not be accessed */
+#define PROT_GROWSDOWN	0x01000000	/* mprotect flag: extend change to start of growsdown vma */
+#define PROT_GROWSUP	0x02000000	/* mprotect flag: extend change to end of growsup vma */
+
+#define MAP_SHARED	0x01		/* Share changes */
+#define MAP_PRIVATE	0x02		/* Changes are private */
+#define MAP_TYPE	0x0f		/* Mask for type of mapping */
+#define MAP_FIXED	0x10		/* Interpret addr exactly */
+#define MAP_ANONYMOUS	0x20		/* don't use a file */
+
+#define MAP_GROWSDOWN	0x0100		/* stack-like segment */
+#define MAP_DENYWRITE	0x0800		/* ETXTBSY */
+#define MAP_EXECUTABLE	0x1000		/* mark it as an executable */
+#define MAP_LOCKED	0x2000		/* pages are locked */
+#define MAP_NORESERVE	0x4000		/* don't check for reservations */
+#define MAP_POPULATE	0x8000		/* populate (prefault) pagetables */
+#define MAP_NONBLOCK	0x10000		/* do not block on IO */
+
+#define MS_ASYNC	1		/* sync memory asynchronously */
+#define MS_INVALIDATE	2		/* invalidate the caches */
+#define MS_SYNC		4		/* synchronous memory sync */
+
+#define MCL_CURRENT	1		/* lock all current mappings */
+#define MCL_FUTURE	2		/* lock all future mappings */
+
+#define MADV_NORMAL	0x0		/* default page-in behavior */
+#define MADV_RANDOM	0x1		/* page-in minimum required */
+#define MADV_SEQUENTIAL	0x2		/* read-ahead aggressively */
+#define MADV_WILLNEED	0x3		/* pre-fault pages */
+#define MADV_DONTNEED	0x4		/* discard these pages */
+
+/* compatibility flags */
+#define MAP_ANON	MAP_ANONYMOUS
+#define MAP_FILE	0
+
+#endif /* __M32R_MMAN_H__ */
diff --git a/include/asm-m32r/mmu.h b/include/asm-m32r/mmu.h
new file mode 100644
index 0000000..9c00eb7
--- /dev/null
+++ b/include/asm-m32r/mmu.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_M32R_MMU_H
+#define _ASM_M32R_MMU_H
+
+#include <linux/config.h>
+
+#if !defined(CONFIG_MMU)
+typedef struct {
+	struct vm_list_struct	*vmlist;
+	unsigned long		end_brk;
+} mm_context_t;
+#else
+
+/* Default "unsigned long" context */
+#ifndef CONFIG_SMP
+typedef unsigned long mm_context_t;
+#else
+typedef unsigned long mm_context_t[NR_CPUS];
+#endif
+
+#endif  /* CONFIG_MMU */
+#endif  /* _ASM_M32R_MMU_H */
diff --git a/include/asm-m32r/mmu_context.h b/include/asm-m32r/mmu_context.h
new file mode 100644
index 0000000..3634c53
--- /dev/null
+++ b/include/asm-m32r/mmu_context.h
@@ -0,0 +1,170 @@
+#ifndef _ASM_M32R_MMU_CONTEXT_H
+#define _ASM_M32R_MMU_CONTEXT_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+
+#include <asm/m32r.h>
+
+#define MMU_CONTEXT_ASID_MASK      (0x000000FF)
+#define MMU_CONTEXT_VERSION_MASK   (0xFFFFFF00)
+#define MMU_CONTEXT_FIRST_VERSION  (0x00000100)
+#define NO_CONTEXT                 (0x00000000)
+
+
+#ifndef __ASSEMBLY__
+
+#include <linux/config.h>
+#include <asm/atomic.h>
+#include <asm/pgalloc.h>
+#include <asm/mmu.h>
+#include <asm/tlbflush.h>
+
+/*
+ * Cache of MMU context last used.
+ */
+#ifndef CONFIG_SMP
+extern unsigned long mmu_context_cache_dat;
+#define mmu_context_cache	mmu_context_cache_dat
+#define mm_context(mm)		mm->context
+#else /* not CONFIG_SMP */
+extern unsigned long mmu_context_cache_dat[];
+#define mmu_context_cache	mmu_context_cache_dat[smp_processor_id()]
+#define mm_context(mm)		mm->context[smp_processor_id()]
+#endif /* not CONFIG_SMP */
+
+#define set_tlb_tag(entry, tag)		(*entry = (tag & PAGE_MASK)|get_asid())
+#define set_tlb_data(entry, data)	(*entry = (data | _PAGE_PRESENT))
+
+#ifdef CONFIG_MMU
+#define enter_lazy_tlb(mm, tsk)	do { } while (0)
+
+static inline void get_new_mmu_context(struct mm_struct *mm)
+{
+	unsigned long mc = ++mmu_context_cache;
+
+	if (!(mc & MMU_CONTEXT_ASID_MASK)) {
+		/* We exhaust ASID of this version.
+		   Flush all TLB and start new cycle. */
+		local_flush_tlb_all();
+		/* Fix version if needed.
+		   Note that we avoid version #0 to distingush NO_CONTEXT. */
+		if (!mc)
+			mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
+	}
+	mm_context(mm) = mc;
+}
+
+/*
+ * Get MMU context if needed.
+ */
+static inline void get_mmu_context(struct mm_struct *mm)
+{
+	if (mm) {
+		unsigned long mc = mmu_context_cache;
+
+		/* Check if we have old version of context.
+		   If it's old, we need to get new context with new version. */
+		if ((mm_context(mm) ^ mc) & MMU_CONTEXT_VERSION_MASK)
+			get_new_mmu_context(mm);
+	}
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+static inline int init_new_context(struct task_struct *tsk,
+	struct mm_struct *mm)
+{
+#ifndef CONFIG_SMP
+	mm->context = NO_CONTEXT;
+#else /* CONFIG_SMP */
+	int num_cpus = num_online_cpus();
+	int i;
+
+	for (i = 0 ; i < num_cpus ; i++)
+		mm->context[i] = NO_CONTEXT;
+#endif /* CONFIG_SMP */
+
+	return 0;
+}
+
+/*
+ * Destroy context related info for an mm_struct that is about
+ * to be put to rest.
+ */
+#define destroy_context(mm)	do { } while (0)
+
+static inline void set_asid(unsigned long asid)
+{
+	*(volatile unsigned long *)MASID = (asid & MMU_CONTEXT_ASID_MASK);
+}
+
+static inline unsigned long get_asid(void)
+{
+	unsigned long asid;
+
+	asid = *(volatile long *)MASID;
+	asid &= MMU_CONTEXT_ASID_MASK;
+
+	return asid;
+}
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+static inline void activate_context(struct mm_struct *mm)
+{
+	get_mmu_context(mm);
+	set_asid(mm_context(mm) & MMU_CONTEXT_ASID_MASK);
+}
+
+static inline void switch_mm(struct mm_struct *prev,
+	struct mm_struct *next, struct task_struct *tsk)
+{
+#ifdef CONFIG_SMP
+	int cpu = smp_processor_id();
+#endif	/* CONFIG_SMP */
+
+	if (prev != next) {
+#ifdef CONFIG_SMP
+		cpu_set(cpu, next->cpu_vm_mask);
+#endif /* CONFIG_SMP */
+		/* Set MPTB = next->pgd */
+		*(volatile unsigned long *)MPTB = (unsigned long)next->pgd;
+		activate_context(next);
+	}
+#ifdef CONFIG_SMP
+	else
+		if (!cpu_test_and_set(cpu, next->cpu_vm_mask))
+			activate_context(next);
+#endif /* CONFIG_SMP */
+}
+
+#define deactivate_mm(tsk, mm)	do { } while (0)
+
+#define activate_mm(prev, next)	\
+	switch_mm((prev), (next), NULL)
+
+#else
+#define get_mmu_context(mm)             do { } while (0)
+#define init_new_context(tsk,mm)        (0)
+#define destroy_context(mm)             do { } while (0)
+#define set_asid(asid)                  do { } while (0)
+#define get_asid()                      (0)
+#define activate_context(mm)            do { } while (0)
+#define switch_mm(prev,next,tsk)        do { } while (0)
+#define deactivate_mm(mm,tsk)           do { } while (0)
+#define activate_mm(prev,next)          do { } while (0)
+#define enter_lazy_tlb(mm,tsk)          do { } while (0)
+#endif /* CONFIG_MMU */
+
+
+#endif /* not __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_M32R_MMU_CONTEXT_H */
diff --git a/include/asm-m32r/mmzone.h b/include/asm-m32r/mmzone.h
new file mode 100644
index 0000000..ebf0228
--- /dev/null
+++ b/include/asm-m32r/mmzone.h
@@ -0,0 +1,80 @@
+/*
+ * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002
+ *
+ */
+
+#ifndef _ASM_MMZONE_H_
+#define _ASM_MMZONE_H_
+
+#include <asm/smp.h>
+
+#ifdef CONFIG_DISCONTIGMEM
+
+extern struct pglist_data *node_data[];
+#define NODE_DATA(nid)		(node_data[nid])
+
+#define node_localnr(pfn, nid)	((pfn) - NODE_DATA(nid)->node_start_pfn)
+#define node_mem_map(nid)	(NODE_DATA(nid)->node_mem_map)
+#define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
+#define node_end_pfn(nid)						\
+({									\
+	pg_data_t *__pgdat = NODE_DATA(nid);				\
+	__pgdat->node_start_pfn + __pgdat->node_spanned_pages - 1;	\
+})
+
+#define local_mapnr(kvaddr)						\
+({									\
+	unsigned long __pfn = __pa(kvaddr) >> PAGE_SHIFT;		\
+	(__pfn - node_start_pfn(pfn_to_nid(__pfn)));			\
+})
+
+#define pfn_to_page(pfn)						\
+({									\
+	unsigned long __pfn = pfn;					\
+	int __node  = pfn_to_nid(__pfn);				\
+	&node_mem_map(__node)[node_localnr(__pfn,__node)];		\
+})
+
+#define page_to_pfn(pg)							\
+({									\
+	struct page *__page = pg;					\
+	struct zone *__zone = page_zone(__page);			\
+	(unsigned long)(__page - __zone->zone_mem_map)			\
+		+ __zone->zone_start_pfn;				\
+})
+#define pmd_page(pmd)		(pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+/*
+ * pfn_valid should be made as fast as possible, and the current definition
+ * is valid for machines that are NUMA, but still contiguous, which is what
+ * is currently supported. A more generalised, but slower definition would
+ * be something like this - mbligh:
+ * ( pfn_to_pgdat(pfn) && ((pfn) < node_end_pfn(pfn_to_nid(pfn))) )
+ */
+#if 1	/* M32R_FIXME */
+#define pfn_valid(pfn)	(1)
+#else
+#define pfn_valid(pfn)	((pfn) < num_physpages)
+#endif
+
+/*
+ * generic node memory support, the following assumptions apply:
+ */
+
+static __inline__ int pfn_to_nid(unsigned long pfn)
+{
+	int node;
+
+	for (node = 0 ; node < MAX_NUMNODES ; node++)
+		if (pfn >= node_start_pfn(node) && pfn <= node_end_pfn(node))
+			break;
+
+	return node;
+}
+
+static __inline__ struct pglist_data *pfn_to_pgdat(unsigned long pfn)
+{
+	return(NODE_DATA(pfn_to_nid(pfn)));
+}
+
+#endif /* CONFIG_DISCONTIGMEM */
+#endif /* _ASM_MMZONE_H_ */
diff --git a/include/asm-m32r/module.h b/include/asm-m32r/module.h
new file mode 100644
index 0000000..3f2541c
--- /dev/null
+++ b/include/asm-m32r/module.h
@@ -0,0 +1,13 @@
+#ifndef _ASM_M32R_MODULE_H
+#define _ASM_M32R_MODULE_H
+
+/* $Id$ */
+
+struct mod_arch_specific { };
+
+#define Elf_Shdr	Elf32_Shdr
+#define Elf_Sym		Elf32_Sym
+#define Elf_Ehdr	Elf32_Ehdr
+
+#endif /* _ASM_M32R_MODULE_H */
+
diff --git a/include/asm-m32r/msgbuf.h b/include/asm-m32r/msgbuf.h
new file mode 100644
index 0000000..852ff52
--- /dev/null
+++ b/include/asm-m32r/msgbuf.h
@@ -0,0 +1,35 @@
+#ifndef _ASM_M32R_MSGBUF_H
+#define _ASM_M32R_MSGBUF_H
+
+/* $Id$ */
+
+/* orig : i386 2.4.18 */
+
+/*
+ * The msqid64_ds structure for m32r architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct msqid64_ds {
+	struct ipc64_perm msg_perm;
+	__kernel_time_t msg_stime;	/* last msgsnd time */
+	unsigned long	__unused1;
+	__kernel_time_t msg_rtime;	/* last msgrcv time */
+	unsigned long	__unused2;
+	__kernel_time_t msg_ctime;	/* last change time */
+	unsigned long	__unused3;
+	unsigned long  msg_cbytes;	/* current number of bytes on queue */
+	unsigned long  msg_qnum;	/* number of messages in queue */
+	unsigned long  msg_qbytes;	/* max number of bytes on queue */
+	__kernel_pid_t msg_lspid;	/* pid of last msgsnd */
+	__kernel_pid_t msg_lrpid;	/* last receive pid */
+	unsigned long  __unused4;
+	unsigned long  __unused5;
+};
+
+#endif /* _ASM_M32R_MSGBUF_H */
diff --git a/include/asm-m32r/namei.h b/include/asm-m32r/namei.h
new file mode 100644
index 0000000..7172d3d
--- /dev/null
+++ b/include/asm-m32r/namei.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_M32R_NAMEI_H
+#define _ASM_M32R_NAMEI_H
+
+/* $Id$ */
+
+/* orig : i386 2.4.18 */
+
+/*
+ * linux/include/asm-m32r/namei.h
+ *
+ * Included from linux/fs/namei.c
+ */
+
+/* This dummy routine maybe changed to something useful
+ * for /usr/gnemul/ emulation stuff.
+ * Look at asm-sparc/namei.h for details.
+ */
+
+#define __emul_prefix() NULL
+
+#endif /* _ASM_M32R_NAMEI_H */
diff --git a/include/asm-m32r/numnodes.h b/include/asm-m32r/numnodes.h
new file mode 100644
index 0000000..479a39d4
--- /dev/null
+++ b/include/asm-m32r/numnodes.h
@@ -0,0 +1,15 @@
+#ifndef _ASM_NUMNODES_H_
+#define _ASM_NUMNODES_H_
+
+#include <linux/config.h>
+
+#ifdef CONFIG_DISCONTIGMEM
+
+#if defined(CONFIG_CHIP_M32700)
+#define	NODES_SHIFT	1	/* Max 2 Nodes */
+#endif	/* CONFIG_CHIP_M32700 */
+
+#endif	/* CONFIG_DISCONTIGMEM */
+
+#endif	/* _ASM_NUMNODES_H_ */
+
diff --git a/include/asm-m32r/opsput/opsput_lan.h b/include/asm-m32r/opsput/opsput_lan.h
new file mode 100644
index 0000000..7a2a839
--- /dev/null
+++ b/include/asm-m32r/opsput/opsput_lan.h
@@ -0,0 +1,56 @@
+/*
+ * include/asm/opsput_lan.h
+ *
+ * OPSPUT-LAN board
+ *
+ * Copyright (c) 2002-2004	Takeo Takahashi, Mamoru Sakugawa
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * $Id: opsput_lan.h,v 1.1 2004/07/27 06:54:20 sakugawa Exp $
+ */
+
+#ifndef _OPSPUT_OPSPUT_LAN_H
+#define _OPSPUT_OPSPUT_LAN_H
+
+#include <linux/config.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * C functions use non-cache address.
+ */
+#define OPSPUT_LAN_BASE	(0x10000000 /* + NONCACHE_OFFSET */)
+#else
+#define OPSPUT_LAN_BASE	(0x10000000 + NONCACHE_OFFSET)
+#endif	/* __ASSEMBLY__ */
+
+/* ICU
+ *  ICUISTS:	status register
+ *  ICUIREQ0: 	request register
+ *  ICUIREQ1: 	request register
+ *  ICUCR3:	control register for CFIREQ# interrupt
+ *  ICUCR4:	control register for CFC Card insert interrupt
+ *  ICUCR5:	control register for CFC Card eject interrupt
+ *  ICUCR6:	control register for external interrupt
+ *  ICUCR11:	control register for MMC Card insert/eject interrupt
+ *  ICUCR13:	control register for SC error interrupt
+ *  ICUCR14:	control register for SC receive interrupt
+ *  ICUCR15:	control register for SC send interrupt
+ *  ICUCR16:	control register for SIO0 receive interrupt
+ *  ICUCR17:	control register for SIO0 send interrupt
+ */
+#define OPSPUT_LAN_IRQ_LAN	(OPSPUT_LAN_PLD_IRQ_BASE + 1)	/* LAN */
+#define OPSPUT_LAN_IRQ_I2C	(OPSPUT_LAN_PLD_IRQ_BASE + 3)	/* I2C */
+
+#define OPSPUT_LAN_ICUISTS	__reg16(OPSPUT_LAN_BASE + 0xc0002)
+#define OPSPUT_LAN_ICUISTS_VECB_MASK	(0xf000)
+#define OPSPUT_LAN_VECB(x)	((x) & OPSPUT_LAN_ICUISTS_VECB_MASK)
+#define OPSPUT_LAN_ICUISTS_ISN_MASK	(0x07c0)
+#define OPSPUT_LAN_ICUISTS_ISN(x)	((x) & OPSPUT_LAN_ICUISTS_ISN_MASK)
+#define OPSPUT_LAN_ICUIREQ0	__reg16(OPSPUT_LAN_BASE + 0xc0004)
+#define OPSPUT_LAN_ICUCR1	__reg16(OPSPUT_LAN_BASE + 0xc0010)
+#define OPSPUT_LAN_ICUCR3	__reg16(OPSPUT_LAN_BASE + 0xc0014)
+
+#endif	/* _OPSPUT_OPSPUT_LAN_H */
diff --git a/include/asm-m32r/opsput/opsput_lcd.h b/include/asm-m32r/opsput/opsput_lcd.h
new file mode 100644
index 0000000..3a883e3
--- /dev/null
+++ b/include/asm-m32r/opsput/opsput_lcd.h
@@ -0,0 +1,59 @@
+/*
+ * include/asm/opsput_lcd.h
+ *
+ * OPSPUT-LCD board
+ *
+ * Copyright (c) 2002	Takeo Takahashi
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * $Id: opsput_lcd.h,v 1.1 2004/07/27 06:54:20 sakugawa Exp $
+ */
+
+#ifndef _OPSPUT_OPSPUT_LCD_H
+#define _OPSPUT_OPSPUT_LCD_H
+
+#include <linux/config.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * C functions use non-cache address.
+ */
+#define OPSPUT_LCD_BASE	(0x10000000 /* + NONCACHE_OFFSET */)
+#else
+#define OPSPUT_LCD_BASE	(0x10000000 + NONCACHE_OFFSET)
+#endif	/* __ASSEMBLY__ */
+
+/*
+ * ICU
+ */
+#define OPSPUT_LCD_IRQ_BAT_INT	(OPSPUT_LCD_PLD_IRQ_BASE + 1)
+#define OPSPUT_LCD_IRQ_USB_INT1	(OPSPUT_LCD_PLD_IRQ_BASE + 2)
+#define OPSPUT_LCD_IRQ_AUDT0		(OPSPUT_LCD_PLD_IRQ_BASE + 3)
+#define OPSPUT_LCD_IRQ_AUDT2		(OPSPUT_LCD_PLD_IRQ_BASE + 4)
+#define OPSPUT_LCD_IRQ_BATSIO_RCV	(OPSPUT_LCD_PLD_IRQ_BASE + 16)
+#define OPSPUT_LCD_IRQ_BATSIO_SND	(OPSPUT_LCD_PLD_IRQ_BASE + 17)
+#define OPSPUT_LCD_IRQ_ASNDSIO_RCV	(OPSPUT_LCD_PLD_IRQ_BASE + 18)
+#define OPSPUT_LCD_IRQ_ASNDSIO_SND	(OPSPUT_LCD_PLD_IRQ_BASE + 19)
+#define OPSPUT_LCD_IRQ_ACNLSIO_SND	(OPSPUT_LCD_PLD_IRQ_BASE + 21)
+
+#define OPSPUT_LCD_ICUISTS	__reg16(OPSPUT_LCD_BASE + 0x300002)
+#define OPSPUT_LCD_ICUISTS_VECB_MASK	(0xf000)
+#define OPSPUT_LCD_VECB(x)	((x) & OPSPUT_LCD_ICUISTS_VECB_MASK)
+#define OPSPUT_LCD_ICUISTS_ISN_MASK	(0x07c0)
+#define OPSPUT_LCD_ICUISTS_ISN(x)	((x) & OPSPUT_LCD_ICUISTS_ISN_MASK)
+#define OPSPUT_LCD_ICUIREQ0	__reg16(OPSPUT_LCD_BASE + 0x300004)
+#define OPSPUT_LCD_ICUIREQ1	__reg16(OPSPUT_LCD_BASE + 0x300006)
+#define OPSPUT_LCD_ICUCR1	__reg16(OPSPUT_LCD_BASE + 0x300020)
+#define OPSPUT_LCD_ICUCR2	__reg16(OPSPUT_LCD_BASE + 0x300022)
+#define OPSPUT_LCD_ICUCR3	__reg16(OPSPUT_LCD_BASE + 0x300024)
+#define OPSPUT_LCD_ICUCR4	__reg16(OPSPUT_LCD_BASE + 0x300026)
+#define OPSPUT_LCD_ICUCR16	__reg16(OPSPUT_LCD_BASE + 0x300030)
+#define OPSPUT_LCD_ICUCR17	__reg16(OPSPUT_LCD_BASE + 0x300032)
+#define OPSPUT_LCD_ICUCR18	__reg16(OPSPUT_LCD_BASE + 0x300034)
+#define OPSPUT_LCD_ICUCR19	__reg16(OPSPUT_LCD_BASE + 0x300036)
+#define OPSPUT_LCD_ICUCR21	__reg16(OPSPUT_LCD_BASE + 0x30003a)
+
+#endif	/* _OPSPUT_OPSPUT_LCD_H */
diff --git a/include/asm-m32r/opsput/opsput_pld.h b/include/asm-m32r/opsput/opsput_pld.h
new file mode 100644
index 0000000..2018e69
--- /dev/null
+++ b/include/asm-m32r/opsput/opsput_pld.h
@@ -0,0 +1,259 @@
+/*
+ * include/asm/opsput/opsput_pld.h
+ *
+ * Definitions for Programable Logic Device(PLD) on OPSPUT board.
+ *
+ * Copyright (c) 2002	Takeo Takahashi
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * $Id: opsput_pld.h,v 1.1 2004/07/27 06:54:20 sakugawa Exp $
+ */
+
+#ifndef _OPSPUT_OPSPUT_PLD_H
+#define _OPSPUT_OPSPUT_PLD_H
+
+#include <linux/config.h>
+
+#define PLD_PLAT_BASE		0x1cc00000
+
+#ifndef __ASSEMBLY__
+/*
+ * C functions use non-cache address.
+ */
+#define PLD_BASE		(PLD_PLAT_BASE /* + NONCACHE_OFFSET */)
+#define __reg8			(volatile unsigned char *)
+#define __reg16			(volatile unsigned short *)
+#define __reg32			(volatile unsigned int *)
+#else
+#define PLD_BASE		(PLD_PLAT_BASE + NONCACHE_OFFSET)
+#define __reg8
+#define __reg16
+#define __reg32
+#endif	/* __ASSEMBLY__ */
+
+/* CFC */
+#define	PLD_CFRSTCR		__reg16(PLD_BASE + 0x0000)
+#define PLD_CFSTS		__reg16(PLD_BASE + 0x0002)
+#define PLD_CFIMASK		__reg16(PLD_BASE + 0x0004)
+#define PLD_CFBUFCR		__reg16(PLD_BASE + 0x0006)
+#define PLD_CFVENCR		__reg16(PLD_BASE + 0x0008)
+#define PLD_CFCR0		__reg16(PLD_BASE + 0x000a)
+#define PLD_CFCR1		__reg16(PLD_BASE + 0x000c)
+#define PLD_IDERSTCR		__reg16(PLD_BASE + 0x0010)
+
+/* MMC */
+#define PLD_MMCCR		__reg16(PLD_BASE + 0x4000)
+#define PLD_MMCMOD		__reg16(PLD_BASE + 0x4002)
+#define PLD_MMCSTS		__reg16(PLD_BASE + 0x4006)
+#define PLD_MMCBAUR		__reg16(PLD_BASE + 0x400a)
+#define PLD_MMCCMDBCUT		__reg16(PLD_BASE + 0x400c)
+#define PLD_MMCCDTBCUT		__reg16(PLD_BASE + 0x400e)
+#define PLD_MMCDET		__reg16(PLD_BASE + 0x4010)
+#define PLD_MMCWP		__reg16(PLD_BASE + 0x4012)
+#define PLD_MMCWDATA		__reg16(PLD_BASE + 0x5000)
+#define PLD_MMCRDATA		__reg16(PLD_BASE + 0x6000)
+#define PLD_MMCCMDDATA		__reg16(PLD_BASE + 0x7000)
+#define PLD_MMCRSPDATA		__reg16(PLD_BASE + 0x7006)
+
+/* ICU
+ *  ICUISTS:	status register
+ *  ICUIREQ0: 	request register
+ *  ICUIREQ1: 	request register
+ *  ICUCR3:	control register for CFIREQ# interrupt
+ *  ICUCR4:	control register for CFC Card insert interrupt
+ *  ICUCR5:	control register for CFC Card eject interrupt
+ *  ICUCR6:	control register for external interrupt
+ *  ICUCR11:	control register for MMC Card insert/eject interrupt
+ *  ICUCR13:	control register for SC error interrupt
+ *  ICUCR14:	control register for SC receive interrupt
+ *  ICUCR15:	control register for SC send interrupt
+ *  ICUCR16:	control register for SIO0 receive interrupt
+ *  ICUCR17:	control register for SIO0 send interrupt
+ */
+#if !defined(CONFIG_PLAT_USRV)
+#define PLD_IRQ_INT0		(OPSPUT_PLD_IRQ_BASE + 0)	/* None */
+#define PLD_IRQ_INT1		(OPSPUT_PLD_IRQ_BASE + 1)	/* reserved */
+#define PLD_IRQ_INT2		(OPSPUT_PLD_IRQ_BASE + 2)	/* reserved */
+#define PLD_IRQ_CFIREQ		(OPSPUT_PLD_IRQ_BASE + 3)	/* CF IREQ */
+#define PLD_IRQ_CFC_INSERT	(OPSPUT_PLD_IRQ_BASE + 4)	/* CF Insert */
+#define PLD_IRQ_CFC_EJECT	(OPSPUT_PLD_IRQ_BASE + 5)	/* CF Eject */
+#define PLD_IRQ_EXINT		(OPSPUT_PLD_IRQ_BASE + 6)	/* EXINT */
+#define PLD_IRQ_INT7		(OPSPUT_PLD_IRQ_BASE + 7)	/* reserved */
+#define PLD_IRQ_INT8		(OPSPUT_PLD_IRQ_BASE + 8)	/* reserved */
+#define PLD_IRQ_INT9		(OPSPUT_PLD_IRQ_BASE + 9)	/* reserved */
+#define PLD_IRQ_INT10		(OPSPUT_PLD_IRQ_BASE + 10)	/* reserved */
+#define PLD_IRQ_MMCCARD		(OPSPUT_PLD_IRQ_BASE + 11)	/* MMC Insert/Eject */
+#define PLD_IRQ_INT12		(OPSPUT_PLD_IRQ_BASE + 12)	/* reserved */
+#define PLD_IRQ_SC_ERROR	(OPSPUT_PLD_IRQ_BASE + 13)	/* SC error */
+#define PLD_IRQ_SC_RCV		(OPSPUT_PLD_IRQ_BASE + 14)	/* SC receive */
+#define PLD_IRQ_SC_SND		(OPSPUT_PLD_IRQ_BASE + 15)	/* SC send */
+#define PLD_IRQ_SIO0_RCV	(OPSPUT_PLD_IRQ_BASE + 16)	/* SIO receive */
+#define PLD_IRQ_SIO0_SND	(OPSPUT_PLD_IRQ_BASE + 17)	/* SIO send */
+#define PLD_IRQ_INT18		(OPSPUT_PLD_IRQ_BASE + 18)	/* reserved */
+#define PLD_IRQ_INT19		(OPSPUT_PLD_IRQ_BASE + 19)	/* reserved */
+#define PLD_IRQ_INT20		(OPSPUT_PLD_IRQ_BASE + 20)	/* reserved */
+#define PLD_IRQ_INT21		(OPSPUT_PLD_IRQ_BASE + 21)	/* reserved */
+#define PLD_IRQ_INT22		(OPSPUT_PLD_IRQ_BASE + 22)	/* reserved */
+#define PLD_IRQ_INT23		(OPSPUT_PLD_IRQ_BASE + 23)	/* reserved */
+#define PLD_IRQ_INT24		(OPSPUT_PLD_IRQ_BASE + 24)	/* reserved */
+#define PLD_IRQ_INT25		(OPSPUT_PLD_IRQ_BASE + 25)	/* reserved */
+#define PLD_IRQ_INT26		(OPSPUT_PLD_IRQ_BASE + 26)	/* reserved */
+#define PLD_IRQ_INT27		(OPSPUT_PLD_IRQ_BASE + 27)	/* reserved */
+#define PLD_IRQ_INT28		(OPSPUT_PLD_IRQ_BASE + 28)	/* reserved */
+#define PLD_IRQ_INT29		(OPSPUT_PLD_IRQ_BASE + 29)	/* reserved */
+#define PLD_IRQ_INT30		(OPSPUT_PLD_IRQ_BASE + 30)	/* reserved */
+#define PLD_IRQ_INT31		(OPSPUT_PLD_IRQ_BASE + 31)	/* reserved */
+
+#else	/* CONFIG_PLAT_USRV */
+
+#define PLD_IRQ_INT0		(OPSPUT_PLD_IRQ_BASE + 0)	/* None */
+#define PLD_IRQ_INT1		(OPSPUT_PLD_IRQ_BASE + 1)	/* reserved */
+#define PLD_IRQ_INT2		(OPSPUT_PLD_IRQ_BASE + 2)	/* reserved */
+#define PLD_IRQ_CF0		(OPSPUT_PLD_IRQ_BASE + 3)	/* CF0# */
+#define PLD_IRQ_CF1		(OPSPUT_PLD_IRQ_BASE + 4)	/* CF1# */
+#define PLD_IRQ_CF2		(OPSPUT_PLD_IRQ_BASE + 5)	/* CF2# */
+#define PLD_IRQ_CF3		(OPSPUT_PLD_IRQ_BASE + 6)	/* CF3# */
+#define PLD_IRQ_CF4		(OPSPUT_PLD_IRQ_BASE + 7)	/* CF4# */
+#define PLD_IRQ_INT8		(OPSPUT_PLD_IRQ_BASE + 8)	/* reserved */
+#define PLD_IRQ_INT9		(OPSPUT_PLD_IRQ_BASE + 9)	/* reserved */
+#define PLD_IRQ_INT10		(OPSPUT_PLD_IRQ_BASE + 10)	/* reserved */
+#define PLD_IRQ_INT11		(OPSPUT_PLD_IRQ_BASE + 11)	/* reserved */
+#define PLD_IRQ_UART0		(OPSPUT_PLD_IRQ_BASE + 12)	/* UARTIRQ0 */
+#define PLD_IRQ_UART1		(OPSPUT_PLD_IRQ_BASE + 13)	/* UARTIRQ1 */
+#define PLD_IRQ_INT14		(OPSPUT_PLD_IRQ_BASE + 14)	/* reserved */
+#define PLD_IRQ_INT15		(OPSPUT_PLD_IRQ_BASE + 15)	/* reserved */
+#define PLD_IRQ_SNDINT		(OPSPUT_PLD_IRQ_BASE + 16)	/* SNDINT# */
+#define PLD_IRQ_INT17		(OPSPUT_PLD_IRQ_BASE + 17)	/* reserved */
+#define PLD_IRQ_INT18		(OPSPUT_PLD_IRQ_BASE + 18)	/* reserved */
+#define PLD_IRQ_INT19		(OPSPUT_PLD_IRQ_BASE + 19)	/* reserved */
+#define PLD_IRQ_INT20		(OPSPUT_PLD_IRQ_BASE + 20)	/* reserved */
+#define PLD_IRQ_INT21		(OPSPUT_PLD_IRQ_BASE + 21)	/* reserved */
+#define PLD_IRQ_INT22		(OPSPUT_PLD_IRQ_BASE + 22)	/* reserved */
+#define PLD_IRQ_INT23		(OPSPUT_PLD_IRQ_BASE + 23)	/* reserved */
+#define PLD_IRQ_INT24		(OPSPUT_PLD_IRQ_BASE + 24)	/* reserved */
+#define PLD_IRQ_INT25		(OPSPUT_PLD_IRQ_BASE + 25)	/* reserved */
+#define PLD_IRQ_INT26		(OPSPUT_PLD_IRQ_BASE + 26)	/* reserved */
+#define PLD_IRQ_INT27		(OPSPUT_PLD_IRQ_BASE + 27)	/* reserved */
+#define PLD_IRQ_INT28		(OPSPUT_PLD_IRQ_BASE + 28)	/* reserved */
+#define PLD_IRQ_INT29		(OPSPUT_PLD_IRQ_BASE + 29)	/* reserved */
+#define PLD_IRQ_INT30		(OPSPUT_PLD_IRQ_BASE + 30)	/* reserved */
+
+#endif	/* CONFIG_PLAT_USRV */
+
+#define PLD_ICUISTS		__reg16(PLD_BASE + 0x8002)
+#define PLD_ICUISTS_VECB_MASK	(0xf000)
+#define PLD_ICUISTS_VECB(x)	((x) & PLD_ICUISTS_VECB_MASK)
+#define PLD_ICUISTS_ISN_MASK	(0x07c0)
+#define PLD_ICUISTS_ISN(x)	((x) & PLD_ICUISTS_ISN_MASK)
+#define PLD_ICUIREQ0		__reg16(PLD_BASE + 0x8004)
+#define PLD_ICUIREQ1		__reg16(PLD_BASE + 0x8006)
+#define PLD_ICUCR1		__reg16(PLD_BASE + 0x8100)
+#define PLD_ICUCR2		__reg16(PLD_BASE + 0x8102)
+#define PLD_ICUCR3		__reg16(PLD_BASE + 0x8104)
+#define PLD_ICUCR4		__reg16(PLD_BASE + 0x8106)
+#define PLD_ICUCR5		__reg16(PLD_BASE + 0x8108)
+#define PLD_ICUCR6		__reg16(PLD_BASE + 0x810a)
+#define PLD_ICUCR7		__reg16(PLD_BASE + 0x810c)
+#define PLD_ICUCR8		__reg16(PLD_BASE + 0x810e)
+#define PLD_ICUCR9		__reg16(PLD_BASE + 0x8110)
+#define PLD_ICUCR10		__reg16(PLD_BASE + 0x8112)
+#define PLD_ICUCR11		__reg16(PLD_BASE + 0x8114)
+#define PLD_ICUCR12		__reg16(PLD_BASE + 0x8116)
+#define PLD_ICUCR13		__reg16(PLD_BASE + 0x8118)
+#define PLD_ICUCR14		__reg16(PLD_BASE + 0x811a)
+#define PLD_ICUCR15		__reg16(PLD_BASE + 0x811c)
+#define PLD_ICUCR16		__reg16(PLD_BASE + 0x811e)
+#define PLD_ICUCR17		__reg16(PLD_BASE + 0x8120)
+#define PLD_ICUCR_IEN		(0x1000)
+#define PLD_ICUCR_IREQ		(0x0100)
+#define PLD_ICUCR_ISMOD00	(0x0000)	/* Low edge */
+#define PLD_ICUCR_ISMOD01	(0x0010)	/* Low level */
+#define PLD_ICUCR_ISMOD02	(0x0020)	/* High edge */
+#define PLD_ICUCR_ISMOD03	(0x0030)	/* High level */
+#define PLD_ICUCR_ILEVEL0	(0x0000)
+#define PLD_ICUCR_ILEVEL1	(0x0001)
+#define PLD_ICUCR_ILEVEL2	(0x0002)
+#define PLD_ICUCR_ILEVEL3	(0x0003)
+#define PLD_ICUCR_ILEVEL4	(0x0004)
+#define PLD_ICUCR_ILEVEL5	(0x0005)
+#define PLD_ICUCR_ILEVEL6	(0x0006)
+#define PLD_ICUCR_ILEVEL7	(0x0007)
+
+/* Power Control of MMC and CF */
+#define PLD_CPCR		__reg16(PLD_BASE + 0x14000)
+#define PLD_CPCR_CF		0x0001
+#define PLD_CPCR_MMC		0x0002
+
+/* LED Control
+ *
+ * 1: DIP swich side
+ * 2: Reset switch side
+ */
+#define PLD_IOLEDCR		__reg16(PLD_BASE + 0x14002)
+#define PLD_IOLED_1_ON		0x001
+#define PLD_IOLED_1_OFF		0x000
+#define PLD_IOLED_2_ON		0x002
+#define PLD_IOLED_2_OFF		0x000
+
+/* DIP Switch
+ *  0: Write-protect of Flash Memory (0:protected, 1:non-protected)
+ *  1: -
+ *  2: -
+ *  3: -
+ */
+#define PLD_IOSWSTS		__reg16(PLD_BASE + 0x14004)
+#define	PLD_IOSWSTS_IOSW2	0x0200
+#define	PLD_IOSWSTS_IOSW1	0x0100
+#define	PLD_IOSWSTS_IOWP0	0x0001
+
+/* CRC */
+#define PLD_CRC7DATA		__reg16(PLD_BASE + 0x18000)
+#define PLD_CRC7INDATA		__reg16(PLD_BASE + 0x18002)
+#define PLD_CRC16DATA		__reg16(PLD_BASE + 0x18004)
+#define PLD_CRC16INDATA		__reg16(PLD_BASE + 0x18006)
+#define PLD_CRC16ADATA		__reg16(PLD_BASE + 0x18008)
+#define PLD_CRC16AINDATA	__reg16(PLD_BASE + 0x1800a)
+
+/* RTC */
+#define PLD_RTCCR		__reg16(PLD_BASE + 0x1c000)
+#define PLD_RTCBAUR		__reg16(PLD_BASE + 0x1c002)
+#define PLD_RTCWRDATA		__reg16(PLD_BASE + 0x1c004)
+#define PLD_RTCRDDATA		__reg16(PLD_BASE + 0x1c006)
+#define PLD_RTCRSTODT		__reg16(PLD_BASE + 0x1c008)
+
+/* SIO0 */
+#define PLD_ESIO0CR		__reg16(PLD_BASE + 0x20000)
+#define	PLD_ESIO0CR_TXEN	0x0001
+#define	PLD_ESIO0CR_RXEN	0x0002
+#define PLD_ESIO0MOD0		__reg16(PLD_BASE + 0x20002)
+#define	PLD_ESIO0MOD0_CTSS	0x0040
+#define	PLD_ESIO0MOD0_RTSS	0x0080
+#define PLD_ESIO0MOD1		__reg16(PLD_BASE + 0x20004)
+#define	PLD_ESIO0MOD1_LMFS	0x0010
+#define PLD_ESIO0STS		__reg16(PLD_BASE + 0x20006)
+#define	PLD_ESIO0STS_TEMP	0x0001
+#define	PLD_ESIO0STS_TXCP	0x0002
+#define	PLD_ESIO0STS_RXCP	0x0004
+#define	PLD_ESIO0STS_TXSC	0x0100
+#define	PLD_ESIO0STS_RXSC	0x0200
+#define PLD_ESIO0STS_TXREADY	(PLD_ESIO0STS_TXCP | PLD_ESIO0STS_TEMP)
+#define PLD_ESIO0INTCR		__reg16(PLD_BASE + 0x20008)
+#define	PLD_ESIO0INTCR_TXIEN	0x0002
+#define	PLD_ESIO0INTCR_RXCEN	0x0004
+#define PLD_ESIO0BAUR		__reg16(PLD_BASE + 0x2000a)
+#define PLD_ESIO0TXB		__reg16(PLD_BASE + 0x2000c)
+#define PLD_ESIO0RXB		__reg16(PLD_BASE + 0x2000e)
+
+/* SIM Card */
+#define PLD_SCCR		__reg16(PLD_BASE + 0x38000)
+#define PLD_SCMOD		__reg16(PLD_BASE + 0x38004)
+#define PLD_SCSTS		__reg16(PLD_BASE + 0x38006)
+#define PLD_SCINTCR		__reg16(PLD_BASE + 0x38008)
+#define PLD_SCBAUR		__reg16(PLD_BASE + 0x3800a)
+#define PLD_SCTXB		__reg16(PLD_BASE + 0x3800c)
+#define PLD_SCRXB		__reg16(PLD_BASE + 0x3800e)
+
+#endif	/* _OPSPUT_OPSPUT_PLD.H */
diff --git a/include/asm-m32r/page.h b/include/asm-m32r/page.h
new file mode 100644
index 0000000..1c6abb9
--- /dev/null
+++ b/include/asm-m32r/page.h
@@ -0,0 +1,115 @@
+#ifndef _ASM_M32R_PAGE_H
+#define _ASM_M32R_PAGE_H
+
+#include <linux/config.h>
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT	12
+#define PAGE_SIZE	(1UL << PAGE_SHIFT)
+#define PAGE_MASK	(~(PAGE_SIZE-1))
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+extern void clear_page(void *to);
+extern void copy_page(void *to, void *from);
+
+#define clear_user_page(page, vaddr, pg)	clear_page(page)
+#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
+
+#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+#define pte_val(x)	((x).pte)
+#define PTE_MASK	PAGE_MASK
+
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pmd_val(x)	((x).pmd)
+#define pgd_val(x)	((x).pgd)
+#define pgprot_val(x)	((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x)	((pgprot_t) { (x) } )
+
+#endif /* !__ASSEMBLY__ */
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr)	(((addr) + PAGE_SIZE - 1) & PAGE_MASK)
+
+/*
+ * This handles the memory map.. We could make this a config
+ * option, but too many people screw it up, and too few need
+ * it.
+ *
+ * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
+ * a virtual address space of one gigabyte, which limits the
+ * amount of physical memory you can use to about 950MB.
+ *
+ * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
+ * and CONFIG_HIGHMEM64G options in the kernel configuration.
+ */
+
+
+/* This handles the memory map.. */
+
+#ifndef __ASSEMBLY__
+
+/* Pure 2^n version of get_order */
+static __inline__ int get_order(unsigned long size)
+{
+	int order;
+
+	size = (size - 1) >> (PAGE_SHIFT - 1);
+	order = -1;
+	do {
+		size >>= 1;
+		order++;
+	} while (size);
+
+	return order;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#define __MEMORY_START  CONFIG_MEMORY_START
+#define __MEMORY_SIZE   CONFIG_MEMORY_SIZE
+
+#ifdef CONFIG_MMU
+#define __PAGE_OFFSET  (0x80000000)
+#else
+#define __PAGE_OFFSET  (0x00000000)
+#endif
+
+#define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)
+#define __pa(x)			((unsigned long)(x) - PAGE_OFFSET)
+#define __va(x)			((void *)((unsigned long)(x) + PAGE_OFFSET))
+
+#ifndef CONFIG_DISCONTIGMEM
+#define PFN_BASE		(CONFIG_MEMORY_START >> PAGE_SHIFT)
+#define pfn_to_page(pfn)	(mem_map + ((pfn) - PFN_BASE))
+#define page_to_pfn(page)	\
+	((unsigned long)((page) - mem_map) + PFN_BASE)
+#define pfn_valid(pfn)		(((pfn) - PFN_BASE) < max_mapnr)
+#endif  /* !CONFIG_DISCONTIGMEM */
+
+#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \
+				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC )
+
+#define devmem_is_allowed(x) 1
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_M32R_PAGE_H */
+
diff --git a/include/asm-m32r/param.h b/include/asm-m32r/param.h
new file mode 100644
index 0000000..750b938
--- /dev/null
+++ b/include/asm-m32r/param.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_M32R_PARAM_H
+#define _ASM_M32R_PARAM_H
+
+/* $Id$ */
+
+/* orig : i386 2.5.67 */
+
+#ifdef __KERNEL__
+# define HZ		100		/* Internal kernel timer frequency */
+# define USER_HZ	100		/* .. some user interfaces are in "ticks" */
+# define CLOCKS_PER_SEC	(USER_HZ)	/* like times() */
+#endif
+
+#ifndef HZ
+#define HZ 100
+#endif
+
+#define EXEC_PAGESIZE	4096
+
+#ifndef NOGROUP
+#define NOGROUP		(-1)
+#endif
+
+#define MAXHOSTNAMELEN	64	/* max length of hostname */
+
+#endif /* _ASM_M32R_PARAM_H */
+
diff --git a/include/asm-m32r/pci.h b/include/asm-m32r/pci.h
new file mode 100644
index 0000000..00d7b6f
--- /dev/null
+++ b/include/asm-m32r/pci.h
@@ -0,0 +1,10 @@
+#ifndef _ASM_M32R_PCI_H
+#define _ASM_M32R_PCI_H
+
+/* $Id$ */
+
+#include <asm-generic/pci.h>
+
+#define PCI_DMA_BUS_IS_PHYS	(1)
+
+#endif /* _ASM_M32R_PCI_H */
diff --git a/include/asm-m32r/percpu.h b/include/asm-m32r/percpu.h
new file mode 100644
index 0000000..e316930
--- /dev/null
+++ b/include/asm-m32r/percpu.h
@@ -0,0 +1,6 @@
+#ifndef __ARCH_M32R_PERCPU__
+#define __ARCH_M32R_PERCPU__
+
+#include <asm-generic/percpu.h>
+
+#endif /* __ARCH_M32R_PERCPU__ */
diff --git a/include/asm-m32r/pgalloc.h b/include/asm-m32r/pgalloc.h
new file mode 100644
index 0000000..6da309b
--- /dev/null
+++ b/include/asm-m32r/pgalloc.h
@@ -0,0 +1,78 @@
+#ifndef _ASM_M32R_PGALLOC_H
+#define _ASM_M32R_PGALLOC_H
+
+/* $Id$ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+
+#define pmd_populate_kernel(mm, pmd, pte)	\
+	set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
+
+static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+	struct page *pte)
+{
+	set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte)));
+}
+
+/*
+ * Allocate and free page tables.
+ */
+static __inline__ pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
+
+	return pgd;
+}
+
+static __inline__ void pgd_free(pgd_t *pgd)
+{
+	free_page((unsigned long)pgd);
+}
+
+static __inline__ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+	unsigned long address)
+{
+	pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
+
+	return pte;
+}
+
+static __inline__ struct page *pte_alloc_one(struct mm_struct *mm,
+	unsigned long address)
+{
+	struct page *pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
+
+
+	return pte;
+}
+
+static __inline__ void pte_free_kernel(pte_t *pte)
+{
+	free_page((unsigned long)pte);
+}
+
+static __inline__ void pte_free(struct page *pte)
+{
+	__free_page(pte);
+}
+
+#define __pte_free_tlb(tlb, pte)	pte_free((pte))
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ * (In the PAE case we free the pmds as part of the pgd.)
+ */
+
+#define pmd_alloc_one(mm, addr)		({ BUG(); ((pmd_t *)2); })
+#define pmd_free(x)			do { } while (0)
+#define __pmd_free_tlb(tlb, x)		do { } while (0)
+#define pgd_populate(mm, pmd, pte)	BUG()
+
+#define check_pgt_cache()	do { } while (0)
+
+#endif /* _ASM_M32R_PGALLOC_H */
+
diff --git a/include/asm-m32r/pgtable-2level.h b/include/asm-m32r/pgtable-2level.h
new file mode 100644
index 0000000..861727c
--- /dev/null
+++ b/include/asm-m32r/pgtable-2level.h
@@ -0,0 +1,78 @@
+#ifndef _ASM_M32R_PGTABLE_2LEVEL_H
+#define _ASM_M32R_PGTABLE_2LEVEL_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+
+/*
+ * traditional M32R two-level paging structure:
+ */
+
+#define PGDIR_SHIFT	22
+#define PTRS_PER_PGD	1024
+
+/*
+ * the M32R is two-level, so we don't really have any
+ * PMD directory physically.
+ */
+#define PMD_SHIFT	22
+#define PTRS_PER_PMD	1
+
+#define PTRS_PER_PTE	1024
+
+#define pte_ERROR(e) \
+	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+	printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pgd is never bad, and a pmd always exists (as it's folded
+ * into the pgd entry)
+ */
+static inline int pgd_none(pgd_t pgd)	{ return 0; }
+static inline int pgd_bad(pgd_t pgd)	{ return 0; }
+static inline int pgd_present(pgd_t pgd)	{ return 1; }
+#define pgd_clear(xp)				do { } while (0)
+
+/*
+ * Certain architectures need to do special things when PTEs
+ * within a page table are directly modified.  Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+#define set_pte_atomic(pteptr, pteval)	set_pte(pteptr, pteval)
+/*
+ * (pmds are folded into pgds so this doesnt get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+#define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval)
+
+#define pgd_page(pgd) \
+((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
+
+static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address)
+{
+	return (pmd_t *) dir;
+}
+
+#define ptep_get_and_clear(mm,addr,xp)	__pte(xchg(&(xp)->pte, 0))
+#define pte_same(a, b)		(pte_val(a) == pte_val(b))
+#define pte_page(x)		pfn_to_page(pte_pfn(x))
+#define pte_none(x)		(!pte_val(x))
+#define pte_pfn(x)		(pte_val(x) >> PAGE_SHIFT)
+#define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot)	__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+
+#define PTE_FILE_MAX_BITS	29
+#define pte_to_pgoff(pte)	(((pte_val(pte) >> 2) & 0xef) | (((pte_val(pte) >> 10)) << 7))
+#define pgoff_to_pte(off)	((pte_t) { (((off) & 0xef) << 2) | (((off) >> 7) << 10) | _PAGE_FILE })
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_M32R_PGTABLE_2LEVEL_H */
diff --git a/include/asm-m32r/pgtable.h b/include/asm-m32r/pgtable.h
new file mode 100644
index 0000000..70a0eb6
--- /dev/null
+++ b/include/asm-m32r/pgtable.h
@@ -0,0 +1,400 @@
+#ifndef _ASM_M32R_PGTABLE_H
+#define _ASM_M32R_PGTABLE_H
+
+#include <asm-generic/4level-fixup.h>
+
+#ifdef __KERNEL__
+/*
+ * The Linux memory management assumes a three-level page table setup. On
+ * the M32R, we use that, but "fold" the mid level into the top-level page
+ * table, so that we physically have the same two-level page table as the
+ * M32R mmu expects.
+ *
+ * This file contains the functions and defines necessary to modify and use
+ * the M32R page table tree.
+ */
+
+/* CAUTION!: If you change macro definitions in this file, you might have to
+ * change arch/m32r/mmu.S manually.
+ */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <asm/processor.h>
+#include <asm/addrspace.h>
+#include <asm/bitops.h>
+#include <asm/page.h>
+
+extern pgd_t swapper_pg_dir[1024];
+extern void paging_init(void);
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[1024];
+#define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef __ASSEMBLY__
+#include <asm/pgtable-2level.h>
+#endif
+
+#define pgtable_cache_init()	do { } while (0)
+
+#define PMD_SIZE	(1UL << PMD_SHIFT)
+#define PMD_MASK	(~(PMD_SIZE - 1))
+#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
+#define PGDIR_MASK	(~(PGDIR_SIZE - 1))
+
+#define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
+#define FIRST_USER_PGD_NR	0
+
+#ifndef __ASSEMBLY__
+/* Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts.  That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ */
+#define VMALLOC_START		KSEG2
+#define VMALLOC_END		KSEG3
+
+/*
+ *     M32R TLB format
+ *
+ *     [0]    [1:19]           [20:23]       [24:31]
+ *     +-----------------------+----+-------------+
+ *     |          VPN          |0000|    ASID     |
+ *     +-----------------------+----+-------------+
+ *     +-+---------------------+----+-+---+-+-+-+-+
+ *     |0         PPN          |0000|N|AC |L|G|V| |
+ *     +-+---------------------+----+-+---+-+-+-+-+
+ *                                     RWX
+ */
+
+#define _PAGE_BIT_DIRTY		0	/* software: page changed */
+#define _PAGE_BIT_FILE		0	/* when !present: nonlinear file
+					   mapping */
+#define _PAGE_BIT_PRESENT	1	/* Valid: page is valid */
+#define _PAGE_BIT_GLOBAL	2	/* Global */
+#define _PAGE_BIT_LARGE		3	/* Large */
+#define _PAGE_BIT_EXEC		4	/* Execute */
+#define _PAGE_BIT_WRITE		5	/* Write */
+#define _PAGE_BIT_READ		6	/* Read */
+#define _PAGE_BIT_NONCACHABLE	7	/* Non cachable */
+#define _PAGE_BIT_ACCESSED	8	/* software: page referenced */
+#define _PAGE_BIT_PROTNONE	9	/* software: if not present */
+
+#define _PAGE_DIRTY		(1UL << _PAGE_BIT_DIRTY)
+#define _PAGE_FILE		(1UL << _PAGE_BIT_FILE)
+#define _PAGE_PRESENT		(1UL << _PAGE_BIT_PRESENT)
+#define _PAGE_GLOBAL		(1UL << _PAGE_BIT_GLOBAL)
+#define _PAGE_LARGE		(1UL << _PAGE_BIT_LARGE)
+#define _PAGE_EXEC		(1UL << _PAGE_BIT_EXEC)
+#define _PAGE_WRITE		(1UL << _PAGE_BIT_WRITE)
+#define _PAGE_READ		(1UL << _PAGE_BIT_READ)
+#define _PAGE_NONCACHABLE	(1UL << _PAGE_BIT_NONCACHABLE)
+#define _PAGE_ACCESSED		(1UL << _PAGE_BIT_ACCESSED)
+#define _PAGE_PROTNONE		(1UL << _PAGE_BIT_PROTNONE)
+
+#define _PAGE_TABLE	\
+	( _PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED \
+	| _PAGE_DIRTY )
+#define _KERNPG_TABLE	\
+	( _PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED \
+	| _PAGE_DIRTY )
+#define _PAGE_CHG_MASK	\
+	( PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY )
+
+#ifdef CONFIG_MMU
+#define PAGE_NONE	\
+	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED	\
+	__pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED)
+#define PAGE_SHARED_EXEC \
+	__pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_WRITE | _PAGE_READ \
+		| _PAGE_ACCESSED)
+#define PAGE_COPY	\
+	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)
+#define PAGE_COPY_EXEC	\
+	__pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_ACCESSED)
+#define PAGE_READONLY	\
+	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)
+#define PAGE_READONLY_EXEC \
+	__pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_ACCESSED)
+
+#define __PAGE_KERNEL	\
+	( _PAGE_PRESENT | _PAGE_EXEC | _PAGE_WRITE | _PAGE_READ | _PAGE_DIRTY \
+	| _PAGE_ACCESSED )
+#define __PAGE_KERNEL_RO	( __PAGE_KERNEL & ~_PAGE_WRITE )
+#define __PAGE_KERNEL_NOCACHE	( __PAGE_KERNEL | _PAGE_NONCACHABLE)
+
+#define MAKE_GLOBAL(x)	__pgprot((x) | _PAGE_GLOBAL)
+
+#define PAGE_KERNEL		MAKE_GLOBAL(__PAGE_KERNEL)
+#define PAGE_KERNEL_RO		MAKE_GLOBAL(__PAGE_KERNEL_RO)
+#define PAGE_KERNEL_NOCACHE	MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
+
+#else
+#define PAGE_NONE		__pgprot(0)
+#define PAGE_SHARED		__pgprot(0)
+#define PAGE_SHARED_EXEC	__pgprot(0)
+#define PAGE_COPY		__pgprot(0)
+#define PAGE_COPY_EXEC		__pgprot(0)
+#define PAGE_READONLY		__pgprot(0)
+#define PAGE_READONLY_EXEC	__pgprot(0)
+
+#define PAGE_KERNEL		__pgprot(0)
+#define PAGE_KERNEL_RO		__pgprot(0)
+#define PAGE_KERNEL_NOCACHE	__pgprot(0)
+#endif /* CONFIG_MMU */
+
+	/* xwr */
+#define __P000	PAGE_NONE
+#define __P001	PAGE_READONLY
+#define __P010	PAGE_COPY
+#define __P011	PAGE_COPY
+#define __P100	PAGE_READONLY_EXEC
+#define __P101	PAGE_READONLY_EXEC
+#define __P110	PAGE_COPY_EXEC
+#define __P111	PAGE_COPY_EXEC
+
+#define __S000	PAGE_NONE
+#define __S001	PAGE_READONLY
+#define __S010	PAGE_SHARED
+#define __S011	PAGE_SHARED
+#define __S100	PAGE_READONLY_EXEC
+#define __S101	PAGE_READONLY_EXEC
+#define __S110	PAGE_SHARED_EXEC
+#define __S111	PAGE_SHARED_EXEC
+
+/* page table for 0-4MB for everybody */
+
+#define pte_present(x)	(pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pte_clear(mm,addr,xp)	do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
+
+#define pmd_none(x)	(!pmd_val(x))
+#define pmd_present(x)	(pmd_val(x) & _PAGE_PRESENT)
+#define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
+#define	pmd_bad(x)	((pmd_val(x) & ~PAGE_MASK) != _KERNPG_TABLE)
+
+#define pages_to_mb(x)	((x) >> (20 - PAGE_SHIFT))
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_read(pte_t pte)
+{
+	return pte_val(pte) & _PAGE_READ;
+}
+
+static inline int pte_exec(pte_t pte)
+{
+	return pte_val(pte) & _PAGE_EXEC;
+}
+
+static inline int pte_dirty(pte_t pte)
+{
+	return pte_val(pte) & _PAGE_DIRTY;
+}
+
+static inline int pte_young(pte_t pte)
+{
+	return pte_val(pte) & _PAGE_ACCESSED;
+}
+
+static inline int pte_write(pte_t pte)
+{
+	return pte_val(pte) & _PAGE_WRITE;
+}
+
+/*
+ * The following only works if pte_present() is not true.
+ */
+static inline int pte_file(pte_t pte)
+{
+	return pte_val(pte) & _PAGE_FILE;
+}
+
+static inline pte_t pte_rdprotect(pte_t pte)
+{
+	pte_val(pte) &= ~_PAGE_READ;
+	return pte;
+}
+
+static inline pte_t pte_exprotect(pte_t pte)
+{
+	pte_val(pte) &= ~_PAGE_EXEC;
+	return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+	pte_val(pte) &= ~_PAGE_DIRTY;
+	return pte;
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+	pte_val(pte) &= ~_PAGE_ACCESSED;
+	return pte;
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+	pte_val(pte) &= ~_PAGE_WRITE;
+	return pte;
+}
+
+static inline pte_t pte_mkread(pte_t pte)
+{
+	pte_val(pte) |= _PAGE_READ;
+	return pte;
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+	pte_val(pte) |= _PAGE_EXEC;
+	return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+	pte_val(pte) |= _PAGE_DIRTY;
+	return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+	pte_val(pte) |= _PAGE_ACCESSED;
+	return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+	pte_val(pte) |= _PAGE_WRITE;
+	return pte;
+}
+
+static inline  int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+	return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep);
+}
+
+static inline  int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+	return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
+}
+
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+	clear_bit(_PAGE_BIT_WRITE, ptep);
+}
+
+/*
+ * Macro and implementation to make a page protection as uncachable.
+ */
+static inline pgprot_t pgprot_noncached(pgprot_t _prot)
+{
+	unsigned long prot = pgprot_val(_prot);
+
+	prot |= _PAGE_NONCACHABLE;
+	return __pgprot(prot);
+}
+
+#define pgprot_writecombine(prot) pgprot_noncached(prot)
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), pgprot)
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+	set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) \
+		| pgprot_val(newprot)));
+
+	return pte;
+}
+
+#define page_pte(page)	page_pte_prot(page, __pgprot(0))
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
+{
+	pmd_val(*pmdp) = (((unsigned long) ptep) & PAGE_MASK);
+}
+
+#define pmd_page_kernel(pmd)	\
+	((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+#ifndef CONFIG_DISCONTIGMEM
+#define pmd_page(pmd)	(mem_map + ((pmd_val(pmd) >> PAGE_SHIFT) - PFN_BASE))
+#endif /* !CONFIG_DISCONTIGMEM */
+
+/* to find an entry in a page-table-directory. */
+#define pgd_index(address)	\
+	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+
+#define pgd_offset(mm, address)	((mm)->pgd + pgd_index(address))
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address)	pgd_offset(&init_mm, address)
+
+#define pmd_index(address)	\
+	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
+
+#define pte_index(address)	\
+	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address)	\
+	((pte_t *)pmd_page_kernel(*(dir)) + pte_index(address))
+#define pte_offset_map(dir, address)	\
+	((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
+#define pte_offset_map_nested(dir, address)	pte_offset_map(dir, address)
+#define pte_unmap(pte)		do { } while (0)
+#define pte_unmap_nested(pte)	do { } while (0)
+
+/* Encode and de-code a swap entry */
+#define __swp_type(x)			(((x).val >> 2) & 0x3f)
+#define __swp_offset(x)			((x).val >> 10)
+#define __swp_entry(type, offset)	\
+	((swp_entry_t) { ((type) << 2) | ((offset) << 10) })
+#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
+
+#endif /* !__ASSEMBLY__ */
+
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+#define kern_addr_valid(addr)	(1)
+
+#define io_remap_page_range(vma, vaddr, paddr, size, prot)	\
+	remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)	\
+		remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#define MK_IOSPACE_PFN(space, pfn)	(pfn)
+#define GET_IOSPACE(pfn)		0
+#define GET_PFN(pfn)			(pfn)
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTE_SAME
+#include <asm-generic/pgtable.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_M32R_PGTABLE_H */
diff --git a/include/asm-m32r/poll.h b/include/asm-m32r/poll.h
new file mode 100644
index 0000000..43b7acf
--- /dev/null
+++ b/include/asm-m32r/poll.h
@@ -0,0 +1,31 @@
+#ifndef _ASM_M32R_POLL_H
+#define _ASM_M32R_POLL_H
+
+/*
+ * poll(2) bit definitions.  Based on <asm-i386/poll.h>.
+ *
+ * Modified 2004
+ *      Hirokazu Takata <takata at linux-m32r.org>
+ */
+
+#define POLLIN		0x0001
+#define POLLPRI		0x0002
+#define POLLOUT		0x0004
+#define POLLERR		0x0008
+#define POLLHUP		0x0010
+#define POLLNVAL	0x0020
+
+#define POLLRDNORM	0x0040
+#define POLLRDBAND	0x0080
+#define POLLWRNORM	0x0100
+#define POLLWRBAND	0x0200
+#define POLLMSG		0x0400
+#define POLLREMOVE	0x1000
+
+struct pollfd {
+	int fd;
+	short events;
+	short revents;
+};
+
+#endif  /* _ASM_M32R_POLL_H */
diff --git a/include/asm-m32r/posix_types.h b/include/asm-m32r/posix_types.h
new file mode 100644
index 0000000..47e7e85
--- /dev/null
+++ b/include/asm-m32r/posix_types.h
@@ -0,0 +1,126 @@
+#ifndef _ASM_M32R_POSIX_TYPES_H
+#define _ASM_M32R_POSIX_TYPES_H
+
+/* $Id$ */
+
+/* orig : i386, sh 2.4.18 */
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc.  Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned long	__kernel_ino_t;
+typedef unsigned short	__kernel_mode_t;
+typedef unsigned short	__kernel_nlink_t;
+typedef long		__kernel_off_t;
+typedef int		__kernel_pid_t;
+typedef unsigned short	__kernel_ipc_pid_t;
+typedef unsigned short	__kernel_uid_t;
+typedef unsigned short	__kernel_gid_t;
+typedef unsigned int	__kernel_size_t;
+typedef int		__kernel_ssize_t;
+typedef int		__kernel_ptrdiff_t;
+typedef long		__kernel_time_t;
+typedef long		__kernel_suseconds_t;
+typedef long		__kernel_clock_t;
+typedef int		__kernel_timer_t;
+typedef int		__kernel_clockid_t;
+typedef int		__kernel_daddr_t;
+typedef char *		__kernel_caddr_t;
+typedef unsigned short	__kernel_uid16_t;
+typedef unsigned short	__kernel_gid16_t;
+typedef unsigned int	__kernel_uid32_t;
+typedef unsigned int	__kernel_gid32_t;
+
+typedef unsigned short	__kernel_old_uid_t;
+typedef unsigned short	__kernel_old_gid_t;
+typedef unsigned short	__kernel_old_dev_t;
+
+#ifdef __GNUC__
+typedef long long	__kernel_loff_t;
+#endif
+
+typedef struct {
+#if defined(__KERNEL__) || defined(__USE_ALL)
+	int	val[2];
+#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
+	int	__val[2];
+#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
+} __kernel_fsid_t;
+
+#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
+
+#undef	__FD_SET
+static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+	unsigned long __tmp = __fd / __NFDBITS;
+	unsigned long __rem = __fd % __NFDBITS;
+	__fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
+}
+
+#undef	__FD_CLR
+static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+	unsigned long __tmp = __fd / __NFDBITS;
+	unsigned long __rem = __fd % __NFDBITS;
+	__fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
+}
+
+
+#undef	__FD_ISSET
+static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
+{
+	unsigned long __tmp = __fd / __NFDBITS;
+	unsigned long __rem = __fd % __NFDBITS;
+	return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
+}
+
+/*
+ * This will unroll the loop for the normal constant case (8 ints,
+ * for a 256-bit fd_set)
+ */
+#undef	__FD_ZERO
+static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
+{
+	unsigned long *__tmp = __p->fds_bits;
+	int __i;
+
+	if (__builtin_constant_p(__FDSET_LONGS)) {
+		switch (__FDSET_LONGS) {
+		case 16:
+			__tmp[ 0] = 0; __tmp[ 1] = 0;
+			__tmp[ 2] = 0; __tmp[ 3] = 0;
+			__tmp[ 4] = 0; __tmp[ 5] = 0;
+			__tmp[ 6] = 0; __tmp[ 7] = 0;
+			__tmp[ 8] = 0; __tmp[ 9] = 0;
+			__tmp[10] = 0; __tmp[11] = 0;
+			__tmp[12] = 0; __tmp[13] = 0;
+			__tmp[14] = 0; __tmp[15] = 0;
+			return;
+
+		case 8:
+			__tmp[ 0] = 0; __tmp[ 1] = 0;
+			__tmp[ 2] = 0; __tmp[ 3] = 0;
+			__tmp[ 4] = 0; __tmp[ 5] = 0;
+			__tmp[ 6] = 0; __tmp[ 7] = 0;
+			return;
+
+		case 4:
+			__tmp[ 0] = 0; __tmp[ 1] = 0;
+			__tmp[ 2] = 0; __tmp[ 3] = 0;
+			return;
+		}
+	}
+	__i = __FDSET_LONGS;
+	while (__i) {
+		__i--;
+		*__tmp = 0;
+		__tmp++;
+	}
+}
+
+#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
+
+#endif  /* _ASM_M32R_POSIX_TYPES_H */
diff --git a/include/asm-m32r/processor.h b/include/asm-m32r/processor.h
new file mode 100644
index 0000000..09fd181
--- /dev/null
+++ b/include/asm-m32r/processor.h
@@ -0,0 +1,143 @@
+#ifndef _ASM_M32R_PROCESSOR_H
+#define _ASM_M32R_PROCESSOR_H
+
+/*
+ * include/asm-m32r/processor.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994  Linus Torvalds
+ * Copyright (C) 2001  Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
+ * Copyright (C) 2004  Hirokazu Takata <takata at linux-m32r.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/config.h>
+#include <asm/cache.h>
+#include <asm/ptrace.h>  /* pt_regs */
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+/*
+ *  CPU type and hardware bug flags. Kept separately for each CPU.
+ *  Members of this structure are referenced in head.S, so think twice
+ *  before touching them. [mj]
+ */
+
+struct cpuinfo_m32r {
+	unsigned long pgtable_cache_sz;
+	unsigned long cpu_clock;
+	unsigned long bus_clock;
+	unsigned long timer_divide;
+	unsigned long loops_per_jiffy;
+};
+
+/*
+ * capabilities of CPUs
+ */
+
+extern struct cpuinfo_m32r boot_cpu_data;
+
+#ifdef CONFIG_SMP
+extern struct cpuinfo_m32r cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#else
+#define cpu_data (&boot_cpu_data)
+#define current_cpu_data boot_cpu_data
+#endif
+
+/*
+ * User space process size: 2GB (default).
+ */
+#ifdef CONFIG_MMU
+#define TASK_SIZE  (0x80000000UL)
+#else
+#define TASK_SIZE  (0x00400000UL)
+#endif
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE	PAGE_ALIGN(TASK_SIZE / 3)
+
+typedef struct {
+	unsigned long seg;
+} mm_segment_t;
+
+#define MAX_TRAPS 10
+
+struct debug_trap {
+	int nr_trap;
+	unsigned long	addr[MAX_TRAPS];
+	unsigned long	insn[MAX_TRAPS];
+};
+
+struct thread_struct {
+	unsigned long address;
+	unsigned long trap_no;		/* Trap number  */
+	unsigned long error_code;	/* Error code of trap */
+	unsigned long lr;		/* saved pc */
+	unsigned long sp;		/* user stack pointer */
+	struct debug_trap debug_trap;
+};
+
+#define INIT_SP	(sizeof(init_stack) + (unsigned long) &init_stack)
+
+#define INIT_THREAD	{	\
+	.sp = INIT_SP,		\
+}
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ */
+
+/* User process Backup PSW */
+#define USERPS_BPSW (M32R_PSW_BSM|M32R_PSW_BIE|M32R_PSW_BPM)
+
+#define start_thread(regs, new_pc, new_spu) 				\
+	do {								\
+		set_fs(USER_DS); 					\
+		regs->psw = (regs->psw | USERPS_BPSW) & 0x0000FFFFUL;	\
+		regs->bpc = new_pc;					\
+		regs->spu = new_spu;					\
+	} while (0)
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+struct mm_struct;
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+
+#define prepare_to_copy(tsk)	do { } while (0)
+
+/*
+ * create a kernel thread without removing it from tasklists
+ */
+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+/* Copy and release all segment info associated with a VM */
+extern void copy_segments(struct task_struct *p, struct mm_struct * mm);
+extern void release_segments(struct mm_struct * mm);
+
+extern unsigned long thread_saved_pc(struct task_struct *);
+
+/* Copy and release all segment info associated with a VM */
+#define copy_segments(p, mm)  do { } while (0)
+#define release_segments(mm)  do { } while (0)
+
+unsigned long get_wchan(struct task_struct *p);
+#define KSTK_EIP(tsk)  ((tsk)->thread.lr)
+#define KSTK_ESP(tsk)  ((tsk)->thread.sp)
+
+#define THREAD_SIZE (2*PAGE_SIZE)
+
+#define cpu_relax()	barrier()
+
+#endif /* _ASM_M32R_PROCESSOR_H */
diff --git a/include/asm-m32r/ptrace.h b/include/asm-m32r/ptrace.h
new file mode 100644
index 0000000..9764171
--- /dev/null
+++ b/include/asm-m32r/ptrace.h
@@ -0,0 +1,165 @@
+#ifndef _ASM_M32R_PTRACE_H
+#define _ASM_M32R_PTRACE_H
+
+/*
+ * linux/include/asm-m32r/ptrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * M32R version:
+ *   Copyright (C) 2001-2002, 2004  Hirokazu Takata <takata at linux-m32r.org>
+ */
+
+#include <linux/config.h>
+#include <asm/m32r.h>		/* M32R_PSW_BSM, M32R_PSW_BPM */
+
+/* 0 - 13 are integer registers (general purpose registers).  */
+#define PT_R4		0
+#define PT_R5		1
+#define PT_R6		2
+#define PT_REGS 	3
+#define PT_R0		4
+#define PT_R1		5
+#define PT_R2		6
+#define PT_R3		7
+#define PT_R7		8
+#define PT_R8		9
+#define PT_R9		10
+#define PT_R10		11
+#define PT_R11		12
+#define PT_R12		13
+#define PT_SYSCNR	14
+#define PT_R13		PT_FP
+#define PT_R14		PT_LR
+#define PT_R15		PT_SP
+
+/* processor status and miscellaneous context registers.  */
+#if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2)
+#define PT_ACC0H	15
+#define PT_ACC0L	16
+#define PT_ACC1H	17
+#define PT_ACC1L	18
+#define PT_ACCH		PT_ACC0H
+#define PT_ACCL		PT_ACC0L
+#define PT_PSW		19
+#define PT_BPC		20
+#define PT_BBPSW	21
+#define PT_BBPC		22
+#define PT_SPU		23
+#define PT_FP		24
+#define PT_LR		25
+#define PT_SPI		26
+#define PT_ORIGR0	27
+#elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
+#define PT_ACCH		15
+#define PT_ACCL		16
+#define PT_PSW		17
+#define PT_BPC		18
+#define PT_BBPSW	19
+#define PT_BBPC		20
+#define PT_SPU		21
+#define PT_FP		22
+#define PT_LR		23
+#define PT_SPI		24
+#define PT_ORIGR0	25
+#else
+#error unknown isa conifiguration
+#endif
+
+/* virtual pt_reg entry for gdb */
+#define PT_PC		30
+#define PT_CBR		31
+#define PT_EVB		32
+
+
+/* Control registers.  */
+#define SPR_CR0 PT_PSW
+#define SPR_CR1 PT_CBR		/* read only */
+#define SPR_CR2 PT_SPI
+#define SPR_CR3 PT_SPU
+#define SPR_CR4
+#define SPR_CR5 PT_EVB		/* part of M32R/E, M32R/I core only */
+#define SPR_CR6 PT_BPC
+#define SPR_CR7
+#define SPR_CR8 PT_BBPSW
+#define SPR_CR9
+#define SPR_CR10
+#define SPR_CR11
+#define SPR_CR12
+#define SPR_CR13 PT_WR
+#define SPR_CR14 PT_BBPC
+#define SPR_CR15
+
+/* this struct defines the way the registers are stored on the
+   stack during a system call. */
+struct pt_regs {
+	/* Saved main processor registers. */
+	unsigned long r4;
+	unsigned long r5;
+	unsigned long r6;
+	struct pt_regs *pt_regs;
+	unsigned long r0;
+	unsigned long r1;
+	unsigned long r2;
+	unsigned long r3;
+	unsigned long r7;
+	unsigned long r8;
+	unsigned long r9;
+	unsigned long r10;
+	unsigned long r11;
+	unsigned long r12;
+	long syscall_nr;
+
+	/* Saved main processor status and miscellaneous context registers. */
+#if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2)
+	unsigned long acc0h;
+	unsigned long acc0l;
+	unsigned long acc1h;
+	unsigned long acc1l;
+#elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
+	unsigned long acch;
+	unsigned long accl;
+#else
+#error unknown isa configuration
+#endif
+	unsigned long psw;
+	unsigned long bpc;		/* saved PC for TRAP syscalls */
+	unsigned long bbpsw;
+	unsigned long bbpc;
+	unsigned long spu;		/* saved user stack */
+	unsigned long fp;
+	unsigned long lr;		/* saved PC for JL syscalls */
+	unsigned long spi;		/* saved kernel stack */
+	unsigned long orig_r0;
+};
+
+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
+#define PTRACE_GETREGS		12
+#define PTRACE_SETREGS		13
+
+#define PTRACE_OLDSETOPTIONS	21
+
+/* options set using PTRACE_SETOPTIONS */
+#define PTRACE_O_TRACESYSGOOD	0x00000001
+
+#ifdef __KERNEL__
+#if defined(CONFIG_ISA_M32R2) || defined(CONFIG_CHIP_VDEC2)
+#define user_mode(regs) ((M32R_PSW_BPM & (regs)->psw) != 0)
+#elif defined(CONFIG_ISA_M32R)
+#define user_mode(regs) ((M32R_PSW_BSM & (regs)->psw) != 0)
+#else
+#error unknown isa configuration
+#endif
+
+#define instruction_pointer(regs) ((regs)->bpc)
+#define profile_pc(regs) instruction_pointer(regs)
+
+extern void show_regs(struct pt_regs *);
+
+extern void withdraw_debug_trap(struct pt_regs *regs);
+
+#endif /* __KERNEL */
+
+#endif /* _ASM_M32R_PTRACE_H */
diff --git a/include/asm-m32r/resource.h b/include/asm-m32r/resource.h
new file mode 100644
index 0000000..b1ce766
--- /dev/null
+++ b/include/asm-m32r/resource.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_M32R_RESOURCE_H
+#define _ASM_M32R_RESOURCE_H
+
+#include <asm-generic/resource.h>
+
+#endif  /* _ASM_M32R_RESOURCE_H */
diff --git a/include/asm-m32r/rtc.h b/include/asm-m32r/rtc.h
new file mode 100644
index 0000000..ec3cdf6
--- /dev/null
+++ b/include/asm-m32r/rtc.h
@@ -0,0 +1,70 @@
+/* $Id: rtc.h,v 1.1.1.1 2004/03/25 04:29:22 hitoshiy Exp $ */
+
+#ifndef __RTC_H__
+#define __RTC_H__
+
+
+#include <linux/config.h>
+
+   /* Dallas DS1302 clock/calendar register numbers. */
+#  define RTC_SECONDS      0
+#  define RTC_MINUTES      1
+#  define RTC_HOURS        2
+#  define RTC_DAY_OF_MONTH 3
+#  define RTC_MONTH        4
+#  define RTC_WEEKDAY      5
+#  define RTC_YEAR         6
+#  define RTC_CONTROL      7
+
+   /* Bits in CONTROL register. */
+#  define RTC_CONTROL_WRITEPROTECT 	0x80
+#  define RTC_TRICKLECHARGER 		8
+
+  /* Bits in TRICKLECHARGER register TCS TCS TCS TCS DS DS RS RS. */
+#  define RTC_TCR_PATTERN 	0xA0	/* 1010xxxx */
+#  define RTC_TCR_1DIOD 	0x04	/* xxxx01xx */
+#  define RTC_TCR_2DIOD 	0x08	/* xxxx10xx */
+#  define RTC_TCR_DISABLED 	0x00	/* xxxxxx00 Disabled */
+#  define RTC_TCR_2KOHM 	0x01	/* xxxxxx01 2KOhm */
+#  define RTC_TCR_4KOHM 	0x02	/* xxxxxx10 4kOhm */
+#  define RTC_TCR_8KOHM 	0x03	/* xxxxxx11 8kOhm */
+
+#ifdef CONFIG_DS1302
+extern unsigned char ds1302_readreg(int reg);
+extern void ds1302_writereg(int reg, unsigned char val);
+extern int ds1302_init(void);
+#  define CMOS_READ(x) ds1302_readreg(x)
+#  define CMOS_WRITE(val,reg) ds1302_writereg(reg,val)
+#  define RTC_INIT() ds1302_init()
+#else
+  /* No RTC configured so we shouldn't try to access any. */
+#  define CMOS_READ(x) 42
+#  define CMOS_WRITE(x,y)
+#  define RTC_INIT() (-1)
+#endif
+
+/*
+ * The struct used to pass data via the following ioctl. Similar to the
+ * struct tm in <time.h>, but it needs to be here so that the kernel
+ * source is self contained, allowing cross-compiles, etc. etc.
+ */
+struct rtc_time {
+	int tm_sec;
+	int tm_min;
+	int tm_hour;
+	int tm_mday;
+	int tm_mon;
+	int tm_year;
+	int tm_wday;
+	int tm_yday;
+	int tm_isdst;
+};
+
+/* ioctl() calls that are permitted to the /dev/rtc interface. */
+#define RTC_MAGIC 'p'
+#define RTC_RD_TIME		_IOR(RTC_MAGIC, 0x09, struct rtc_time)	/* Read RTC time. */
+#define RTC_SET_TIME		_IOW(RTC_MAGIC, 0x0a, struct rtc_time)	/* Set RTC time. */
+#define RTC_SET_CHARGE  	_IOW(RTC_MAGIC, 0x0b, int)
+#define RTC_MAX_IOCTL 0x0b
+
+#endif /* __RTC_H__ */
diff --git a/include/asm-m32r/scatterlist.h b/include/asm-m32r/scatterlist.h
new file mode 100644
index 0000000..09a10e4
--- /dev/null
+++ b/include/asm-m32r/scatterlist.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_M32R_SCATTERLIST_H
+#define _ASM_M32R_SCATTERLIST_H
+
+/* $Id$ */
+
+struct scatterlist {
+    char *  address;    /* Location data is to be transferred to, NULL for
+                         * highmem page */
+    struct page * page; /* Location for highmem page, if any */
+    unsigned int offset;/* for highmem, page offset */
+
+    dma_addr_t dma_address;
+    unsigned int length;
+};
+
+#define ISA_DMA_THRESHOLD (0x1fffffff)
+
+#endif /* _ASM_M32R_SCATTERLIST_H */
diff --git a/include/asm-m32r/sections.h b/include/asm-m32r/sections.h
new file mode 100644
index 0000000..6b969e5
--- /dev/null
+++ b/include/asm-m32r/sections.h
@@ -0,0 +1,8 @@
+#ifndef _M32R_SECTIONS_H
+#define _M32R_SECTIONS_H
+
+/* nothing to see, move along */
+#include <asm-generic/sections.h>
+
+#endif	/* _M32R_SECTIONS_H */
+
diff --git a/include/asm-m32r/segment.h b/include/asm-m32r/segment.h
new file mode 100644
index 0000000..e45db68
--- /dev/null
+++ b/include/asm-m32r/segment.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_M32R_SEGMENT_H
+#define _ASM_M32R_SEGMENT_H
+
+/* $Id$ */
+
+/* orig : i386 (2.4.18) */
+
+#define __KERNEL_CS	0x10
+#define __KERNEL_DS	0x18
+
+#define __USER_CS	0x23
+#define __USER_DS	0x2B
+
+#endif  /* _ASM_M32R_SEGMENT_H */
diff --git a/include/asm-m32r/semaphore.h b/include/asm-m32r/semaphore.h
new file mode 100644
index 0000000..53e3c60
--- /dev/null
+++ b/include/asm-m32r/semaphore.h
@@ -0,0 +1,205 @@
+#ifndef _ASM_M32R_SEMAPHORE_H
+#define _ASM_M32R_SEMAPHORE_H
+
+#include <linux/linkage.h>
+
+#ifdef __KERNEL__
+
+/*
+ * SMP- and interrupt-safe semaphores..
+ *
+ * Copyright (C) 1996  Linus Torvalds
+ * Copyright (C) 2004  Hirokazu Takata <takata at linux-m32r.org>
+ */
+
+#include <linux/config.h>
+#include <linux/wait.h>
+#include <linux/rwsem.h>
+#include <asm/assembler.h>
+#include <asm/system.h>
+#include <asm/atomic.h>
+
+struct semaphore {
+	atomic_t count;
+	int sleepers;
+	wait_queue_head_t wait;
+};
+
+#define __SEMAPHORE_INITIALIZER(name, n)				\
+{									\
+	.count		= ATOMIC_INIT(n),				\
+	.sleepers	= 0,						\
+	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
+}
+
+#define __MUTEX_INITIALIZER(name) \
+	__SEMAPHORE_INITIALIZER(name,1)
+
+#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
+	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
+
+#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
+#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
+
+static inline void sema_init (struct semaphore *sem, int val)
+{
+/*
+ *	*sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
+ *
+ * i'd rather use the more flexible initialization above, but sadly
+ * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
+ */
+	atomic_set(&sem->count, val);
+	sem->sleepers = 0;
+	init_waitqueue_head(&sem->wait);
+}
+
+static inline void init_MUTEX (struct semaphore *sem)
+{
+	sema_init(sem, 1);
+}
+
+static inline void init_MUTEX_LOCKED (struct semaphore *sem)
+{
+	sema_init(sem, 0);
+}
+
+asmlinkage void __down_failed(void /* special register calling convention */);
+asmlinkage int  __down_failed_interruptible(void  /* params in registers */);
+asmlinkage int  __down_failed_trylock(void  /* params in registers */);
+asmlinkage void __up_wakeup(void /* special register calling convention */);
+
+asmlinkage void __down(struct semaphore * sem);
+asmlinkage int  __down_interruptible(struct semaphore * sem);
+asmlinkage int  __down_trylock(struct semaphore * sem);
+asmlinkage void __up(struct semaphore * sem);
+
+/*
+ * Atomically decrement the semaphore's count.  If it goes negative,
+ * block the calling thread in the TASK_UNINTERRUPTIBLE state.
+ */
+static inline void down(struct semaphore * sem)
+{
+	unsigned long flags;
+	long count;
+
+	might_sleep();
+	local_irq_save(flags);
+	__asm__ __volatile__ (
+		"# down				\n\t"
+		DCACHE_CLEAR("%0", "r4", "%1")
+		M32R_LOCK" %0, @%1;		\n\t"
+		"addi	%0, #-1;		\n\t"
+		M32R_UNLOCK" %0, @%1;		\n\t"
+		: "=&r" (count)
+		: "r" (&sem->count)
+		: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		, "r4"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+	);
+	local_irq_restore(flags);
+
+	if (unlikely(count < 0))
+		__down(sem);
+}
+
+/*
+ * Interruptible try to acquire a semaphore.  If we obtained
+ * it, return zero.  If we were interrupted, returns -EINTR
+ */
+static inline int down_interruptible(struct semaphore * sem)
+{
+	unsigned long flags;
+	long count;
+	int result = 0;
+
+	might_sleep();
+	local_irq_save(flags);
+	__asm__ __volatile__ (
+		"# down_interruptible		\n\t"
+		DCACHE_CLEAR("%0", "r4", "%1")
+		M32R_LOCK" %0, @%1;		\n\t"
+		"addi	%0, #-1;		\n\t"
+		M32R_UNLOCK" %0, @%1;		\n\t"
+		: "=&r" (count)
+		: "r" (&sem->count)
+		: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		, "r4"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+	);
+	local_irq_restore(flags);
+
+	if (unlikely(count < 0))
+		result = __down_interruptible(sem);
+
+	return result;
+}
+
+/*
+ * Non-blockingly attempt to down() a semaphore.
+ * Returns zero if we acquired it
+ */
+static inline int down_trylock(struct semaphore * sem)
+{
+	unsigned long flags;
+	long count;
+	int result = 0;
+
+	local_irq_save(flags);
+	__asm__ __volatile__ (
+		"# down_trylock			\n\t"
+		DCACHE_CLEAR("%0", "r4", "%1")
+		M32R_LOCK" %0, @%1;		\n\t"
+		"addi	%0, #-1;		\n\t"
+		M32R_UNLOCK" %0, @%1;		\n\t"
+		: "=&r" (count)
+		: "r" (&sem->count)
+		: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		, "r4"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+	);
+	local_irq_restore(flags);
+
+	if (unlikely(count < 0))
+		result = __down_trylock(sem);
+
+	return result;
+}
+
+/*
+ * Note! This is subtle. We jump to wake people up only if
+ * the semaphore was negative (== somebody was waiting on it).
+ * The default case (no contention) will result in NO
+ * jumps for both down() and up().
+ */
+static inline void up(struct semaphore * sem)
+{
+	unsigned long flags;
+	long count;
+
+	local_irq_save(flags);
+	__asm__ __volatile__ (
+		"# up				\n\t"
+		DCACHE_CLEAR("%0", "r4", "%1")
+		M32R_LOCK" %0, @%1;		\n\t"
+		"addi	%0, #1;			\n\t"
+		M32R_UNLOCK" %0, @%1;		\n\t"
+		: "=&r" (count)
+		: "r" (&sem->count)
+		: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		, "r4"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+	);
+	local_irq_restore(flags);
+
+	if (unlikely(count <= 0))
+		__up(sem);
+}
+
+#endif  /* __KERNEL__ */
+
+#endif  /* _ASM_M32R_SEMAPHORE_H */
diff --git a/include/asm-m32r/sembuf.h b/include/asm-m32r/sembuf.h
new file mode 100644
index 0000000..e69018e
--- /dev/null
+++ b/include/asm-m32r/sembuf.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_M32R_SEMBUF_H
+#define _ASM_M32R_SEMBUF_H
+
+/* $Id$ */
+
+/* orig : i386 2.4.18 */
+
+/*
+ * The semid64_ds structure for m32r architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct semid64_ds {
+	struct ipc64_perm sem_perm;		/* permissions .. see ipc.h */
+	__kernel_time_t	sem_otime;		/* last semop time */
+	unsigned long	__unused1;
+	__kernel_time_t	sem_ctime;		/* last change time */
+	unsigned long	__unused2;
+	unsigned long	sem_nsems;		/* no. of semaphores in array */
+	unsigned long	__unused3;
+	unsigned long	__unused4;
+};
+
+#endif /* _ASM_M32R_SEMBUF_H */
diff --git a/include/asm-m32r/serial.h b/include/asm-m32r/serial.h
new file mode 100644
index 0000000..1bf480f
--- /dev/null
+++ b/include/asm-m32r/serial.h
@@ -0,0 +1,10 @@
+#ifndef _ASM_M32R_SERIAL_H
+#define _ASM_M32R_SERIAL_H
+
+/* include/asm-m32r/serial.h */
+
+#include <linux/config.h>
+
+#define BASE_BAUD	115200
+
+#endif  /* _ASM_M32R_SERIAL_H */
diff --git a/include/asm-m32r/setup.h b/include/asm-m32r/setup.h
new file mode 100644
index 0000000..5f028dc
--- /dev/null
+++ b/include/asm-m32r/setup.h
@@ -0,0 +1,33 @@
+/*
+ * This is set up by the setup-routine at boot-time
+ */
+#define PARAM			((unsigned char *)empty_zero_page)
+
+#define MOUNT_ROOT_RDONLY	(*(unsigned long *) (PARAM+0x000))
+#define RAMDISK_FLAGS		(*(unsigned long *) (PARAM+0x004))
+#define ORIG_ROOT_DEV		(*(unsigned long *) (PARAM+0x008))
+#define LOADER_TYPE		(*(unsigned long *) (PARAM+0x00c))
+#define INITRD_START		(*(unsigned long *) (PARAM+0x010))
+#define INITRD_SIZE		(*(unsigned long *) (PARAM+0x014))
+
+#define M32R_CPUCLK		(*(unsigned long *) (PARAM+0x018))
+#define M32R_BUSCLK		(*(unsigned long *) (PARAM+0x01c))
+#define M32R_TIMER_DIVIDE	(*(unsigned long *) (PARAM+0x020))
+
+#define COMMAND_LINE		((char *) (PARAM+0x100))
+
+#define SCREEN_INFO		(*(struct screen_info *) (PARAM+0x200))
+
+#define COMMAND_LINE_SIZE	(512)
+
+#define RAMDISK_IMAGE_START_MASK	(0x07FF)
+#define RAMDISK_PROMPT_FLAG		(0x8000)
+#define RAMDISK_LOAD_FLAG		(0x4000)
+
+#define PFN_UP(x)	(((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+#define PFN_DOWN(x)	((x) >> PAGE_SHIFT)
+#define PFN_PHYS(x)	((x) << PAGE_SHIFT)
+
+extern unsigned long memory_start;
+extern unsigned long memory_end;
+
diff --git a/include/asm-m32r/shmbuf.h b/include/asm-m32r/shmbuf.h
new file mode 100644
index 0000000..b84e897
--- /dev/null
+++ b/include/asm-m32r/shmbuf.h
@@ -0,0 +1,46 @@
+#ifndef _ASM_M32R_SHMBUF_H
+#define _ASM_M32R_SHMBUF_H
+
+/* $Id$ */
+
+/* orig : i386 2.4.18 */
+
+/*
+ * The shmid64_ds structure for M32R architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct shmid64_ds {
+	struct ipc64_perm	shm_perm;	/* operation perms */
+	size_t			shm_segsz;	/* size of segment (bytes) */
+	__kernel_time_t		shm_atime;	/* last attach time */
+	unsigned long		__unused1;
+	__kernel_time_t		shm_dtime;	/* last detach time */
+	unsigned long		__unused2;
+	__kernel_time_t		shm_ctime;	/* last change time */
+	unsigned long		__unused3;
+	__kernel_pid_t		shm_cpid;	/* pid of creator */
+	__kernel_pid_t		shm_lpid;	/* pid of last operator */
+	unsigned long		shm_nattch;	/* no. of current attaches */
+	unsigned long		__unused4;
+	unsigned long		__unused5;
+};
+
+struct shminfo64 {
+	unsigned long	shmmax;
+	unsigned long	shmmin;
+	unsigned long	shmmni;
+	unsigned long	shmseg;
+	unsigned long	shmall;
+	unsigned long	__unused1;
+	unsigned long	__unused2;
+	unsigned long	__unused3;
+	unsigned long	__unused4;
+};
+
+#endif /* _ASM_M32R_SHMBUF_H */
diff --git a/include/asm-m32r/shmparam.h b/include/asm-m32r/shmparam.h
new file mode 100644
index 0000000..db0019b
--- /dev/null
+++ b/include/asm-m32r/shmparam.h
@@ -0,0 +1,8 @@
+#ifndef _ASM_M32R_SHMPARAM_H
+#define _ASM_M32R_SHMPARAM_H
+
+/* $Id$ */
+
+#define	SHMLBA PAGE_SIZE		 /* attach addr a multiple of this */
+
+#endif /* _ASM_M32R_SHMPARAM_H */
diff --git a/include/asm-m32r/sigcontext.h b/include/asm-m32r/sigcontext.h
new file mode 100644
index 0000000..c233e2d
--- /dev/null
+++ b/include/asm-m32r/sigcontext.h
@@ -0,0 +1,50 @@
+#ifndef _ASM_M32R_SIGCONTEXT_H
+#define _ASM_M32R_SIGCONTEXT_H
+
+/* $Id$ */
+
+#include <linux/config.h>
+
+struct sigcontext {
+	/* CPU registers */
+	/* Saved main processor registers. */
+	unsigned long sc_r4;
+	unsigned long sc_r5;
+	unsigned long sc_r6;
+	struct pt_regs *sc_pt_regs;
+	unsigned long sc_r0;
+	unsigned long sc_r1;
+	unsigned long sc_r2;
+	unsigned long sc_r3;
+	unsigned long sc_r7;
+	unsigned long sc_r8;
+	unsigned long sc_r9;
+	unsigned long sc_r10;
+	unsigned long sc_r11;
+	unsigned long sc_r12;
+
+	/* Saved main processor status and miscellaneous context registers. */
+#if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2)
+	unsigned long sc_acc0h;
+	unsigned long sc_acc0l;
+	unsigned long sc_acc1h;
+	unsigned long sc_acc1l;
+#elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
+	unsigned long sc_acch;
+	unsigned long sc_accl;
+#else
+#error unknown isa configuration
+#endif
+	unsigned long sc_psw;
+	unsigned long sc_bpc;		/* saved PC for TRAP syscalls */
+	unsigned long sc_bbpsw;
+	unsigned long sc_bbpc;
+	unsigned long sc_spu;		/* saved user stack */
+	unsigned long sc_fp;
+	unsigned long sc_lr;		/* saved PC for JL syscalls */
+	unsigned long sc_spi;		/* saved kernel stack */
+
+	unsigned long	oldmask;
+};
+
+#endif  /* _ASM_M32R_SIGCONTEXT_H */
diff --git a/include/asm-m32r/siginfo.h b/include/asm-m32r/siginfo.h
new file mode 100644
index 0000000..482202f
--- /dev/null
+++ b/include/asm-m32r/siginfo.h
@@ -0,0 +1,8 @@
+#ifndef _M32R_SIGINFO_H
+#define _M32R_SIGINFO_H
+
+/* $Id$ */
+
+#include <asm-generic/siginfo.h>
+
+#endif /* _M32R_SIGINFO_H */
diff --git a/include/asm-m32r/signal.h b/include/asm-m32r/signal.h
new file mode 100644
index 0000000..ce46eae
--- /dev/null
+++ b/include/asm-m32r/signal.h
@@ -0,0 +1,200 @@
+#ifndef _ASM_M32R_SIGNAL_H
+#define _ASM_M32R_SIGNAL_H
+
+/* $Id$ */
+
+/* orig : i386 2.4.18 */
+
+#include <linux/types.h>
+#include <linux/linkage.h>
+#include <linux/time.h>
+#include <linux/compiler.h>
+
+/* Avoid too many header ordering problems.  */
+struct siginfo;
+
+#ifdef __KERNEL__
+/* Most things should be clean enough to redefine this at will, if care
+   is taken to make libc match.  */
+
+#define _NSIG		64
+#define _NSIG_BPW	32
+#define _NSIG_WORDS	(_NSIG / _NSIG_BPW)
+
+typedef unsigned long old_sigset_t;		/* at least 32 bits */
+
+typedef struct {
+	unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#else
+/* Here we must cater to libcs that poke about in kernel headers.  */
+
+#define NSIG		32
+typedef unsigned long sigset_t;
+
+#endif /* __KERNEL__ */
+
+#define SIGHUP		 1
+#define SIGINT		 2
+#define SIGQUIT		 3
+#define SIGILL		 4
+#define SIGTRAP		 5
+#define SIGABRT		 6
+#define SIGIOT		 6
+#define SIGBUS		 7
+#define SIGFPE		 8
+#define SIGKILL		 9
+#define SIGUSR1		10
+#define SIGSEGV		11
+#define SIGUSR2		12
+#define SIGPIPE		13
+#define SIGALRM		14
+#define SIGTERM		15
+#define SIGSTKFLT	16
+#define SIGCHLD		17
+#define SIGCONT		18
+#define SIGSTOP		19
+#define SIGTSTP		20
+#define SIGTTIN		21
+#define SIGTTOU		22
+#define SIGURG		23
+#define SIGXCPU		24
+#define SIGXFSZ		25
+#define SIGVTALRM	26
+#define SIGPROF		27
+#define SIGWINCH	28
+#define SIGIO		29
+#define SIGPOLL		SIGIO
+/*
+#define SIGLOST		29
+*/
+#define SIGPWR		30
+#define SIGSYS		31
+#define	SIGUNUSED	31
+
+/* These should not be considered constants from userland.  */
+#define SIGRTMIN	32
+#define SIGRTMAX	_NSIG
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP	0x00000001u
+#define SA_NOCLDWAIT	0x00000002u
+#define SA_SIGINFO	0x00000004u
+#define SA_ONSTACK	0x08000000u
+#define SA_RESTART	0x10000000u
+#define SA_NODEFER	0x40000000u
+#define SA_RESETHAND	0x80000000u
+
+#define SA_NOMASK	SA_NODEFER
+#define SA_ONESHOT	SA_RESETHAND
+#define SA_INTERRUPT	0x20000000 /* dummy -- ignored */
+
+#define SA_RESTORER	0x04000000
+
+/*
+ * sigaltstack controls
+ */
+#define SS_ONSTACK	1
+#define SS_DISABLE	2
+
+#define MINSIGSTKSZ	2048
+#define SIGSTKSZ	8192
+
+#ifdef __KERNEL__
+
+/*
+ * These values of sa_flags are used only by the kernel as part of the
+ * irq handling routines.
+ *
+ * SA_INTERRUPT is also used by the irq handling routines.
+ * SA_SHIRQ is for shared interrupt support on PCI and EISA.
+ */
+#define SA_PROBE		SA_ONESHOT
+#define SA_SAMPLE_RANDOM	SA_RESTART
+#define SA_SHIRQ		0x04000000
+#endif
+
+#define SIG_BLOCK          0	/* for blocking signals */
+#define SIG_UNBLOCK        1	/* for unblocking signals */
+#define SIG_SETMASK        2	/* for setting the signal mask */
+
+/* Type of a signal handler.  */
+typedef void __signalfn_t(int);
+typedef __signalfn_t __user *__sighandler_t;
+
+typedef void __restorefn_t(void);
+typedef __restorefn_t __user *__sigrestore_t;
+
+#define SIG_DFL	((__sighandler_t)0)	/* default signal handling */
+#define SIG_IGN	((__sighandler_t)1)	/* ignore signal */
+#define SIG_ERR	((__sighandler_t)-1)	/* error return from signal */
+
+#ifdef __KERNEL__
+struct old_sigaction {
+	__sighandler_t sa_handler;
+	old_sigset_t sa_mask;
+	unsigned long sa_flags;
+	__sigrestore_t sa_restorer;
+};
+
+struct sigaction {
+	__sighandler_t sa_handler;
+	unsigned long sa_flags;
+	__sigrestore_t sa_restorer;
+	sigset_t sa_mask;		/* mask last for extensibility */
+};
+
+struct k_sigaction {
+	struct sigaction sa;
+};
+#else
+/* Here we must cater to libcs that poke about in kernel headers.  */
+
+struct sigaction {
+	union {
+	  __sighandler_t _sa_handler;
+	  void (*_sa_sigaction)(int, struct siginfo *, void *);
+	} _u;
+	sigset_t sa_mask;
+	unsigned long sa_flags;
+	void (*sa_restorer)(void);
+};
+
+#define sa_handler	_u._sa_handler
+#define sa_sigaction	_u._sa_sigaction
+
+#endif /* __KERNEL__ */
+
+typedef struct sigaltstack {
+	void __user *ss_sp;
+	int ss_flags;
+	size_t ss_size;
+} stack_t;
+
+#ifdef __KERNEL__
+#include <asm/sigcontext.h>
+
+#undef __HAVE_ARCH_SIG_BITOPS
+
+struct pt_regs;
+extern int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset));
+
+#define ptrace_signal_deliver(regs, cookie)	do { } while (0)
+
+#endif /* __KERNEL__ */
+
+#endif  /* _ASM_M32R_SIGNAL_H */
diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h
new file mode 100644
index 0000000..8cd4d0d
--- /dev/null
+++ b/include/asm-m32r/smp.h
@@ -0,0 +1,118 @@
+#ifndef _ASM_M32R_SMP_H
+#define _ASM_M32R_SMP_H
+
+/* $Id$ */
+
+#include <linux/config.h>
+
+#ifdef CONFIG_SMP
+#ifndef __ASSEMBLY__
+
+#include <linux/cpumask.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <asm/m32r.h>
+
+#define PHYSID_ARRAY_SIZE       1
+
+struct physid_mask
+{
+	unsigned long mask[PHYSID_ARRAY_SIZE];
+};
+
+typedef struct physid_mask physid_mask_t;
+
+#define physid_set(physid, map)                 set_bit(physid, (map).mask)
+#define physid_clear(physid, map)               clear_bit(physid, (map).mask)
+#define physid_isset(physid, map)               test_bit(physid, (map).mask)
+#define physid_test_and_set(physid, map)        test_and_set_bit(physid, (map).mask)
+
+#define physids_and(dst, src1, src2)            bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
+#define physids_or(dst, src1, src2)             bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
+#define physids_clear(map)                      bitmap_zero((map).mask, MAX_APICS)
+#define physids_complement(dst, src)            bitmap_complement((dst).mask,(src).mask, MAX_APICS)
+#define physids_empty(map)                      bitmap_empty((map).mask, MAX_APICS)
+#define physids_equal(map1, map2)               bitmap_equal((map1).mask, (map2).mask, MAX_APICS)
+#define physids_weight(map)                     bitmap_weight((map).mask, MAX_APICS)
+#define physids_shift_right(d, s, n)            bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS)
+#define physids_shift_left(d, s, n)             bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS)
+#define physids_coerce(map)                     ((map).mask[0])
+
+#define physids_promote(physids)					\
+	({								\
+		physid_mask_t __physid_mask = PHYSID_MASK_NONE;		\
+		__physid_mask.mask[0] = physids;			\
+		__physid_mask;						\
+	})
+
+#define physid_mask_of_physid(physid)					\
+	({								\
+		physid_mask_t __physid_mask = PHYSID_MASK_NONE;		\
+		physid_set(physid, __physid_mask);			\
+		__physid_mask;						\
+	})
+
+#define PHYSID_MASK_ALL         { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} }
+#define PHYSID_MASK_NONE        { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }
+
+extern physid_mask_t phys_cpu_present_map;
+
+/*
+ * Some lowlevel functions might want to know about
+ * the real CPU ID <-> CPU # mapping.
+ */
+extern volatile int physid_2_cpu[NR_CPUS];
+extern volatile int cpu_2_physid[NR_CPUS];
+#define physid_to_cpu(physid)	physid_2_cpu[physid]
+#define cpu_to_physid(cpu_id)	cpu_2_physid[cpu_id]
+
+#define smp_processor_id()	(current_thread_info()->cpu)
+
+extern cpumask_t cpu_callout_map;
+#define cpu_possible_map cpu_callout_map
+
+static __inline__ int hard_smp_processor_id(void)
+{
+	return (int)*(volatile long *)M32R_CPUID_PORTL;
+}
+
+static __inline__ int cpu_logical_map(int cpu)
+{
+	return cpu;
+}
+
+static __inline__ int cpu_number_map(int cpu)
+{
+	return cpu;
+}
+
+static __inline__ unsigned int num_booting_cpus(void)
+{
+	return cpus_weight(cpu_callout_map);
+}
+
+extern void smp_send_timer(void);
+extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
+
+#endif	/* not __ASSEMBLY__ */
+
+#define NO_PROC_ID (0xff)	/* No processor magic marker */
+
+#define PROC_CHANGE_PENALTY	(15)	/* Schedule penalty */
+
+/*
+ * M32R-mp IPI
+ */
+#define RESCHEDULE_IPI		(M32R_IRQ_IPI0-M32R_IRQ_IPI0)
+#define INVALIDATE_TLB_IPI	(M32R_IRQ_IPI1-M32R_IRQ_IPI0)
+#define CALL_FUNCTION_IPI	(M32R_IRQ_IPI2-M32R_IRQ_IPI0)
+#define LOCAL_TIMER_IPI		(M32R_IRQ_IPI3-M32R_IRQ_IPI0)
+#define INVALIDATE_CACHE_IPI	(M32R_IRQ_IPI4-M32R_IRQ_IPI0)
+#define CPU_BOOT_IPI		(M32R_IRQ_IPI5-M32R_IRQ_IPI0)
+
+#define IPI_SHIFT	(0)
+#define NR_IPIS		(8)
+
+#endif	/* CONFIG_SMP */
+
+#endif	/* _ASM_M32R_SMP_H */
diff --git a/include/asm-m32r/socket.h b/include/asm-m32r/socket.h
new file mode 100644
index 0000000..159519d
--- /dev/null
+++ b/include/asm-m32r/socket.h
@@ -0,0 +1,50 @@
+#ifndef _ASM_M32R_SOCKET_H
+#define _ASM_M32R_SOCKET_H
+
+#include <asm/sockios.h>
+
+/* For setsockoptions(2) */
+#define SOL_SOCKET	1
+
+#define SO_DEBUG	1
+#define SO_REUSEADDR	2
+#define SO_TYPE		3
+#define SO_ERROR	4
+#define SO_DONTROUTE	5
+#define SO_BROADCAST	6
+#define SO_SNDBUF	7
+#define SO_RCVBUF	8
+#define SO_KEEPALIVE	9
+#define SO_OOBINLINE	10
+#define SO_NO_CHECK	11
+#define SO_PRIORITY	12
+#define SO_LINGER	13
+#define SO_BSDCOMPAT	14
+/* To add :#define SO_REUSEPORT 15 */
+#define SO_PASSCRED	16
+#define SO_PEERCRED	17
+#define SO_RCVLOWAT	18
+#define SO_SNDLOWAT	19
+#define SO_RCVTIMEO	20
+#define SO_SNDTIMEO	21
+
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+#define SO_SECURITY_AUTHENTICATION		22
+#define SO_SECURITY_ENCRYPTION_TRANSPORT	23
+#define SO_SECURITY_ENCRYPTION_NETWORK		24
+
+#define SO_BINDTODEVICE	25
+
+/* Socket filtering */
+#define SO_ATTACH_FILTER        26
+#define SO_DETACH_FILTER        27
+
+#define SO_PEERNAME		28
+#define SO_TIMESTAMP		29
+#define SCM_TIMESTAMP		SO_TIMESTAMP
+
+#define SO_ACCEPTCONN		30
+
+#define SO_PEERSEC		31
+
+#endif /* _ASM_M32R_SOCKET_H */
diff --git a/include/asm-m32r/sockios.h b/include/asm-m32r/sockios.h
new file mode 100644
index 0000000..147a118
--- /dev/null
+++ b/include/asm-m32r/sockios.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_M32R_SOCKIOS_H
+#define _ASM_M32R_SOCKIOS_H
+
+/* $Id$ */
+
+/* Socket-level I/O control calls. */
+#define FIOSETOWN 	0x8901
+#define SIOCSPGRP	0x8902
+#define FIOGETOWN	0x8903
+#define SIOCGPGRP	0x8904
+#define SIOCATMARK	0x8905
+#define SIOCGSTAMP	0x8906		/* Get stamp */
+
+#endif  /* _ASM_M32R_SOCKIOS_H */
diff --git a/include/asm-m32r/spinlock.h b/include/asm-m32r/spinlock.h
new file mode 100644
index 0000000..6608d83
--- /dev/null
+++ b/include/asm-m32r/spinlock.h
@@ -0,0 +1,380 @@
+#ifndef _ASM_M32R_SPINLOCK_H
+#define _ASM_M32R_SPINLOCK_H
+
+/*
+ *  linux/include/asm-m32r/spinlock.h
+ *
+ *  M32R version:
+ *    Copyright (C) 2001, 2002  Hitoshi Yamamoto
+ *    Copyright (C) 2004  Hirokazu Takata <takata at linux-m32r.org>
+ */
+
+#include <linux/config.h>	/* CONFIG_DEBUG_SPINLOCK, CONFIG_SMP */
+#include <linux/compiler.h>
+#include <asm/atomic.h>
+#include <asm/page.h>
+
+extern int printk(const char * fmt, ...)
+	__attribute__ ((format (printf, 1, 2)));
+
+#define RW_LOCK_BIAS		 0x01000000
+#define RW_LOCK_BIAS_STR	"0x01000000"
+
+/*
+ * Your basic SMP spinlocks, allowing only a single CPU anywhere
+ */
+
+typedef struct {
+	volatile int slock;
+#ifdef CONFIG_DEBUG_SPINLOCK
+	unsigned magic;
+#endif
+#ifdef CONFIG_PREEMPT
+	unsigned int break_lock;
+#endif
+} spinlock_t;
+
+#define SPINLOCK_MAGIC	0xdead4ead
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define SPINLOCK_MAGIC_INIT	, SPINLOCK_MAGIC
+#else
+#define SPINLOCK_MAGIC_INIT	/* */
+#endif
+
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
+
+#define spin_lock_init(x)	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
+
+/*
+ * Simple spin lock operations.  There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+#define spin_is_locked(x)	(*(volatile int *)(&(x)->slock) <= 0)
+#define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
+#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+
+/**
+ * _raw_spin_trylock - Try spin lock and return a result
+ * @lock: Pointer to the lock variable
+ *
+ * _raw_spin_trylock() tries to get the lock and returns a result.
+ * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
+ */
+static inline int _raw_spin_trylock(spinlock_t *lock)
+{
+	int oldval;
+	unsigned long tmp1, tmp2;
+
+	/*
+	 * lock->slock :  =1 : unlock
+	 *             : <=0 : lock
+	 * {
+	 *   oldval = lock->slock; <--+ need atomic operation
+	 *   lock->slock = 0;      <--+
+	 * }
+	 */
+	__asm__ __volatile__ (
+		"# spin_trylock			\n\t"
+		"ldi	%1, #0;			\n\t"
+		"mvfc	%2, psw;		\n\t"
+		"clrpsw	#0x40 -> nop;		\n\t"
+		DCACHE_CLEAR("%0", "r6", "%3")
+		"lock	%0, @%3;		\n\t"
+		"unlock	%1, @%3;		\n\t"
+		"mvtc	%2, psw;		\n\t"
+		: "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2)
+		: "r" (&lock->slock)
+		: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		, "r6"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+	);
+
+	return (oldval > 0);
+}
+
+static inline void _raw_spin_lock(spinlock_t *lock)
+{
+	unsigned long tmp0, tmp1;
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+	if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
+		printk("pc: %p\n", __builtin_return_address(0));
+		BUG();
+	}
+#endif
+	/*
+	 * lock->slock :  =1 : unlock
+	 *             : <=0 : lock
+	 *
+	 * for ( ; ; ) {
+	 *   lock->slock -= 1;  <-- need atomic operation
+	 *   if (lock->slock == 0) break;
+	 *   for ( ; lock->slock <= 0 ; );
+	 * }
+	 */
+	__asm__ __volatile__ (
+		"# spin_lock			\n\t"
+		".fillinsn			\n"
+		"1:				\n\t"
+		"mvfc	%1, psw;		\n\t"
+		"clrpsw	#0x40 -> nop;		\n\t"
+		DCACHE_CLEAR("%0", "r6", "%2")
+		"lock	%0, @%2;		\n\t"
+		"addi	%0, #-1;		\n\t"
+		"unlock	%0, @%2;		\n\t"
+		"mvtc	%1, psw;		\n\t"
+		"bltz	%0, 2f;			\n\t"
+		LOCK_SECTION_START(".balign 4 \n\t")
+		".fillinsn			\n"
+		"2:				\n\t"
+		"ld	%0, @%2;		\n\t"
+		"bgtz	%0, 1b;			\n\t"
+		"bra	2b;			\n\t"
+		LOCK_SECTION_END
+		: "=&r" (tmp0), "=&r" (tmp1)
+		: "r" (&lock->slock)
+		: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		, "r6"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+	);
+}
+
+static inline void _raw_spin_unlock(spinlock_t *lock)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+	BUG_ON(lock->magic != SPINLOCK_MAGIC);
+	BUG_ON(!spin_is_locked(lock));
+#endif
+	mb();
+	lock->slock = 1;
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+typedef struct {
+	volatile int lock;
+#ifdef CONFIG_DEBUG_SPINLOCK
+	unsigned magic;
+#endif
+#ifdef CONFIG_PREEMPT
+	unsigned int break_lock;
+#endif
+} rwlock_t;
+
+#define RWLOCK_MAGIC	0xdeaf1eed
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define RWLOCK_MAGIC_INIT	, RWLOCK_MAGIC
+#else
+#define RWLOCK_MAGIC_INIT	/* */
+#endif
+
+#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
+
+#define rwlock_init(x)	do { *(x) = RW_LOCK_UNLOCKED; } while(0)
+
+/**
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define read_can_lock(x) ((int)(x)->lock > 0)
+
+/**
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+
+/*
+ * On x86, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "contended" bit.
+ *
+ * The inline assembly is non-obvious. Think about it.
+ *
+ * Changed to use the same technique as rw semaphores.  See
+ * semaphore.h for details.  -ben
+ */
+/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
+
+static inline void _raw_read_lock(rwlock_t *rw)
+{
+	unsigned long tmp0, tmp1;
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+	BUG_ON(rw->magic != RWLOCK_MAGIC);
+#endif
+	/*
+	 * rw->lock :  >0 : unlock
+	 *          : <=0 : lock
+	 *
+	 * for ( ; ; ) {
+	 *   rw->lock -= 1;  <-- need atomic operation
+	 *   if (rw->lock >= 0) break;
+	 *   rw->lock += 1;  <-- need atomic operation
+	 *   for ( ; rw->lock <= 0 ; );
+	 * }
+	 */
+	__asm__ __volatile__ (
+		"# read_lock			\n\t"
+		".fillinsn			\n"
+		"1:				\n\t"
+		"mvfc	%1, psw;		\n\t"
+		"clrpsw	#0x40 -> nop;		\n\t"
+		DCACHE_CLEAR("%0", "r6", "%2")
+		"lock	%0, @%2;		\n\t"
+		"addi	%0, #-1;		\n\t"
+		"unlock	%0, @%2;		\n\t"
+		"mvtc	%1, psw;		\n\t"
+		"bltz	%0, 2f;			\n\t"
+		LOCK_SECTION_START(".balign 4 \n\t")
+		".fillinsn			\n"
+		"2:				\n\t"
+		"clrpsw	#0x40 -> nop;		\n\t"
+		DCACHE_CLEAR("%0", "r6", "%2")
+		"lock	%0, @%2;		\n\t"
+		"addi	%0, #1;			\n\t"
+		"unlock	%0, @%2;		\n\t"
+		"mvtc	%1, psw;		\n\t"
+		".fillinsn			\n"
+		"3:				\n\t"
+		"ld	%0, @%2;		\n\t"
+		"bgtz	%0, 1b;			\n\t"
+		"bra	3b;			\n\t"
+		LOCK_SECTION_END
+		: "=&r" (tmp0), "=&r" (tmp1)
+		: "r" (&rw->lock)
+		: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		, "r6"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+	);
+}
+
+static inline void _raw_write_lock(rwlock_t *rw)
+{
+	unsigned long tmp0, tmp1, tmp2;
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+	BUG_ON(rw->magic != RWLOCK_MAGIC);
+#endif
+	/*
+	 * rw->lock :  =RW_LOCK_BIAS_STR : unlock
+	 *          : !=RW_LOCK_BIAS_STR : lock
+	 *
+	 * for ( ; ; ) {
+	 *   rw->lock -= RW_LOCK_BIAS_STR;  <-- need atomic operation
+	 *   if (rw->lock == 0) break;
+	 *   rw->lock += RW_LOCK_BIAS_STR;  <-- need atomic operation
+	 *   for ( ; rw->lock != RW_LOCK_BIAS_STR ; ) ;
+	 * }
+	 */
+	__asm__ __volatile__ (
+		"# write_lock					\n\t"
+		"seth	%1, #high(" RW_LOCK_BIAS_STR ");	\n\t"
+		"or3	%1, %1, #low(" RW_LOCK_BIAS_STR ");	\n\t"
+		".fillinsn					\n"
+		"1:						\n\t"
+		"mvfc	%2, psw;				\n\t"
+		"clrpsw	#0x40 -> nop;				\n\t"
+		DCACHE_CLEAR("%0", "r7", "%3")
+		"lock	%0, @%3;				\n\t"
+		"sub	%0, %1;					\n\t"
+		"unlock	%0, @%3;				\n\t"
+		"mvtc	%2, psw;				\n\t"
+		"bnez	%0, 2f;					\n\t"
+		LOCK_SECTION_START(".balign 4 \n\t")
+		".fillinsn					\n"
+		"2:						\n\t"
+		"clrpsw	#0x40 -> nop;				\n\t"
+		DCACHE_CLEAR("%0", "r7", "%3")
+		"lock	%0, @%3;				\n\t"
+		"add	%0, %1;					\n\t"
+		"unlock	%0, @%3;				\n\t"
+		"mvtc	%2, psw;				\n\t"
+		".fillinsn					\n"
+		"3:						\n\t"
+		"ld	%0, @%3;				\n\t"
+		"beq	%0, %1, 1b;				\n\t"
+		"bra	3b;					\n\t"
+		LOCK_SECTION_END
+		: "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
+		: "r" (&rw->lock)
+		: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		, "r7"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+	);
+}
+
+static inline void _raw_read_unlock(rwlock_t *rw)
+{
+	unsigned long tmp0, tmp1;
+
+	__asm__ __volatile__ (
+		"# read_unlock			\n\t"
+		"mvfc	%1, psw;		\n\t"
+		"clrpsw	#0x40 -> nop;		\n\t"
+		DCACHE_CLEAR("%0", "r6", "%2")
+		"lock	%0, @%2;		\n\t"
+		"addi	%0, #1;			\n\t"
+		"unlock	%0, @%2;		\n\t"
+		"mvtc	%1, psw;		\n\t"
+		: "=&r" (tmp0), "=&r" (tmp1)
+		: "r" (&rw->lock)
+		: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		, "r6"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+	);
+}
+
+static inline void _raw_write_unlock(rwlock_t *rw)
+{
+	unsigned long tmp0, tmp1, tmp2;
+
+	__asm__ __volatile__ (
+		"# write_unlock					\n\t"
+		"seth	%1, #high(" RW_LOCK_BIAS_STR ");	\n\t"
+		"or3	%1, %1, #low(" RW_LOCK_BIAS_STR ");	\n\t"
+		"mvfc	%2, psw;				\n\t"
+		"clrpsw	#0x40 -> nop;				\n\t"
+		DCACHE_CLEAR("%0", "r7", "%3")
+		"lock	%0, @%3;				\n\t"
+		"add	%0, %1;					\n\t"
+		"unlock	%0, @%3;				\n\t"
+		"mvtc	%2, psw;				\n\t"
+		: "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
+		: "r" (&rw->lock)
+		: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		, "r7"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+	);
+}
+
+#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
+
+static inline int _raw_write_trylock(rwlock_t *lock)
+{
+	atomic_t *count = (atomic_t *)lock;
+	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
+		return 1;
+	atomic_add(RW_LOCK_BIAS, count);
+	return 0;
+}
+
+#endif	/* _ASM_M32R_SPINLOCK_H */
diff --git a/include/asm-m32r/stat.h b/include/asm-m32r/stat.h
new file mode 100644
index 0000000..05748fe
--- /dev/null
+++ b/include/asm-m32r/stat.h
@@ -0,0 +1,91 @@
+#ifndef _ASM_M32R_STAT_H
+#define _ASM_M32R_STAT_H
+
+/* $Id$ */
+
+/* orig : i386 2.4.18 */
+
+#include <asm/byteorder.h>
+
+struct __old_kernel_stat {
+	unsigned short st_dev;
+	unsigned short st_ino;
+	unsigned short st_mode;
+	unsigned short st_nlink;
+	unsigned short st_uid;
+	unsigned short st_gid;
+	unsigned short st_rdev;
+	unsigned long  st_size;
+	unsigned long  st_atime;
+	unsigned long  st_mtime;
+	unsigned long  st_ctime;
+};
+
+#define STAT_HAVE_NSEC	1
+
+struct stat {
+	unsigned short st_dev;
+	unsigned short __pad1;
+	unsigned long  st_ino;
+	unsigned short st_mode;
+	unsigned short st_nlink;
+	unsigned short st_uid;
+	unsigned short st_gid;
+	unsigned short st_rdev;
+	unsigned short __pad2;
+	unsigned long  st_size;
+	unsigned long  st_blksize;
+	unsigned long  st_blocks;
+	unsigned long  st_atime;
+	unsigned long  st_atime_nsec;
+	unsigned long  st_mtime;
+	unsigned long  st_mtime_nsec;
+	unsigned long  st_ctime;
+	unsigned long  st_ctime_nsec;
+	unsigned long  __unused4;
+	unsigned long  __unused5;
+};
+
+/* This matches struct stat64 in glibc2.1, hence the absolutely
+ * insane amounts of padding around dev_t's.
+ */
+struct stat64 {
+	unsigned long long	st_dev;
+	unsigned char	__pad0[4];
+#define STAT64_HAS_BROKEN_ST_INO
+	unsigned long	__st_ino;
+
+	unsigned int	st_mode;
+	unsigned int	st_nlink;
+
+	unsigned long	st_uid;
+	unsigned long	st_gid;
+
+	unsigned long long	st_rdev;
+	unsigned char	__pad3[4];
+
+	long long	st_size;
+	unsigned long	st_blksize;
+
+#if defined(__BIG_ENDIAN)
+	unsigned long	__pad4;		/* future possible st_blocks high bits */
+	unsigned long	st_blocks;	/* Number 512-byte blocks allocated. */
+#elif defined(__LITTLE_ENDIAN)
+	unsigned long	st_blocks;	/* Number 512-byte blocks allocated. */
+	unsigned long	__pad4;		/* future possible st_blocks high bits */
+#else
+#error no endian defined
+#endif
+	unsigned long	st_atime;
+	unsigned long	st_atime_nsec;
+
+	unsigned long	st_mtime;
+	unsigned long	st_mtime_nsec;
+
+	unsigned long	st_ctime;
+	unsigned long	st_ctime_nsec;
+
+	unsigned long long	st_ino;
+};
+
+#endif  /* _ASM_M32R_STAT_H */
diff --git a/include/asm-m32r/statfs.h b/include/asm-m32r/statfs.h
new file mode 100644
index 0000000..6eb4c60
--- /dev/null
+++ b/include/asm-m32r/statfs.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_M32R_STATFS_H
+#define _ASM_M32R_STATFS_H
+
+#include <asm-generic/statfs.h>
+
+#endif  /* _ASM_M32R_STATFS_H */
diff --git a/include/asm-m32r/string.h b/include/asm-m32r/string.h
new file mode 100644
index 0000000..cb54bcc
--- /dev/null
+++ b/include/asm-m32r/string.h
@@ -0,0 +1,15 @@
+#ifndef _ASM_M32R_STRING_H
+#define _ASM_M32R_STRING_H
+
+/* $Id$ */
+
+#define  __HAVE_ARCH_STRLEN
+extern size_t strlen(const char * s);
+
+#define  __HAVE_ARCH_MEMCPY
+extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+
+#define  __HAVE_ARCH_MEMSET
+extern void *memset(void *__s, int __c, size_t __count);
+
+#endif  /* _ASM_M32R_STRING_H */
diff --git a/include/asm-m32r/syscall.h b/include/asm-m32r/syscall.h
new file mode 100644
index 0000000..d8d4b2c
--- /dev/null
+++ b/include/asm-m32r/syscall.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_M32R_SYSCALL_H
+#define _ASM_M32R_SYSCALL_H
+
+/* $Id$ */
+
+/* Definitions for the system call vector.  */
+#define SYSCALL_VECTOR          "2"
+#define SYSCALL_VECTOR_ADDRESS  "0xa0"
+
+#endif /* _ASM_M32R_SYSCALL_H */
+
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
new file mode 100644
index 0000000..73348c3
--- /dev/null
+++ b/include/asm-m32r/system.h
@@ -0,0 +1,299 @@
+#ifndef _ASM_M32R_SYSTEM_H
+#define _ASM_M32R_SYSTEM_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001  by Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
+ * Copyright (C) 2004  Hirokazu Takata <takata at linux-m32r.org>
+ */
+
+#include <linux/config.h>
+
+#ifdef __KERNEL__
+
+/*
+ * switch_to(prev, next) should switch from task `prev' to `next'
+ * `prev' will never be the same as `next'.
+ *
+ * `next' and `prev' should be struct task_struct, but it isn't always defined
+ */
+
+#ifndef CONFIG_SMP
+#define prepare_to_switch()  do { } while(0)
+#endif	/* not CONFIG_SMP */
+
+#define switch_to(prev, next, last)  do { \
+	register unsigned long  arg0 __asm__ ("r0") = (unsigned long)prev; \
+	register unsigned long  arg1 __asm__ ("r1") = (unsigned long)next; \
+	register unsigned long  *oldsp __asm__ ("r2") = &(prev->thread.sp); \
+	register unsigned long  *newsp __asm__ ("r3") = &(next->thread.sp); \
+	register unsigned long  *oldlr __asm__ ("r4") = &(prev->thread.lr); \
+	register unsigned long  *newlr __asm__ ("r5") = &(next->thread.lr); \
+	register struct task_struct  *__last __asm__ ("r6"); \
+	__asm__ __volatile__ ( \
+		"st     r8, @-r15                                 \n\t" \
+		"st     r9, @-r15                                 \n\t" \
+		"st    r10, @-r15                                 \n\t" \
+		"st    r11, @-r15                                 \n\t" \
+		"st    r12, @-r15                                 \n\t" \
+		"st    r13, @-r15                                 \n\t" \
+		"st    r14, @-r15                                 \n\t" \
+		"seth  r14, #high(1f)                             \n\t" \
+		"or3   r14, r14, #low(1f)                         \n\t" \
+		"st    r14, @r4    ; store old LR                 \n\t" \
+		"st    r15, @r2    ; store old SP                 \n\t" \
+		"ld    r15, @r3    ; load new SP                  \n\t" \
+		"st     r0, @-r15  ; store 'prev' onto new stack  \n\t" \
+		"ld    r14, @r5    ; load new LR                  \n\t" \
+		"jmp   r14                                        \n\t" \
+		".fillinsn                                        \n  " \
+		"1:                                               \n\t" \
+		"ld     r6, @r15+  ; load 'prev' from new stack   \n\t" \
+		"ld    r14, @r15+                                 \n\t" \
+		"ld    r13, @r15+                                 \n\t" \
+		"ld    r12, @r15+                                 \n\t" \
+		"ld    r11, @r15+                                 \n\t" \
+		"ld    r10, @r15+                                 \n\t" \
+		"ld     r9, @r15+                                 \n\t" \
+		"ld     r8, @r15+                                 \n\t" \
+		: "=&r" (__last) \
+		: "r" (arg0), "r" (arg1), "r" (oldsp), "r" (newsp), \
+		  "r" (oldlr), "r" (newlr) \
+		: "memory" \
+	); \
+	last = __last; \
+} while(0)
+
+/* Interrupt Control */
+#if !defined(CONFIG_CHIP_M32102)
+#define local_irq_enable() \
+	__asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory")
+#define local_irq_disable() \
+	__asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory")
+#else	/* CONFIG_CHIP_M32102 */
+static inline void local_irq_enable(void)
+{
+	unsigned long tmpreg;
+	__asm__ __volatile__(
+		"mvfc	%0, psw;		\n\t"
+		"or3	%0, %0, #0x0040;	\n\t"
+		"mvtc	%0, psw;		\n\t"
+	: "=&r" (tmpreg) : : "cbit", "memory");
+}
+
+static inline void local_irq_disable(void)
+{
+	unsigned long tmpreg0, tmpreg1;
+	__asm__ __volatile__(
+		"ld24	%0, #0	; Use 32-bit insn. \n\t"
+		"mvfc	%1, psw	; No interrupt can be accepted here. \n\t"
+		"mvtc	%0, psw	\n\t"
+		"and3	%0, %1, #0xffbf	\n\t"
+		"mvtc	%0, psw	\n\t"
+	: "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory");
+}
+#endif	/* CONFIG_CHIP_M32102 */
+
+#define local_save_flags(x) \
+	__asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */)
+
+#define local_irq_restore(x) \
+	__asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \
+		: "r" (x) : "cbit", "memory")
+
+#if !defined(CONFIG_CHIP_M32102)
+#define local_irq_save(x)				\
+	__asm__ __volatile__(				\
+  		"mvfc	%0, psw;		\n\t"	\
+	  	"clrpsw	#0x40 -> nop;		\n\t"	\
+  		: "=r" (x) : /* no input */ : "memory")
+#else	/* CONFIG_CHIP_M32102 */
+#define local_irq_save(x) 				\
+	({						\
+		unsigned long tmpreg;			\
+		__asm__ __volatile__( 			\
+			"ld24	%1, #0 \n\t" 		\
+			"mvfc	%0, psw \n\t"		\
+			"mvtc	%1, psw \n\t"		\
+			"and3	%1, %0, #0xffbf \n\t"	\
+			"mvtc	%1, psw \n\t" 		\
+			: "=r" (x), "=&r" (tmpreg)	\
+			: : "cbit", "memory");		\
+	})
+#endif	/* CONFIG_CHIP_M32102 */
+
+#define irqs_disabled()					\
+	({						\
+		unsigned long flags;			\
+		local_save_flags(flags);		\
+		!(flags & 0x40);			\
+	})
+
+#endif  /* __KERNEL__ */
+
+#define nop()	__asm__ __volatile__ ("nop" : : )
+
+#define xchg(ptr,x) \
+	((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+#define tas(ptr)	(xchg((ptr),1))
+
+#ifdef CONFIG_SMP
+extern void  __xchg_called_with_bad_pointer(void);
+#endif
+
+#ifdef CONFIG_CHIP_M32700_TS1
+#define DCACHE_CLEAR(reg0, reg1, addr)				\
+	"seth	"reg1", #high(dcache_dummy);		\n\t"	\
+	"or3	"reg1", "reg1", #low(dcache_dummy);	\n\t"	\
+	"lock	"reg0", @"reg1";			\n\t"	\
+	"add3	"reg0", "addr", #0x1000;		\n\t"	\
+	"ld	"reg0", @"reg0";			\n\t"	\
+	"add3	"reg0", "addr", #0x2000;		\n\t"	\
+	"ld	"reg0", @"reg0";			\n\t"	\
+	"unlock	"reg0", @"reg1";			\n\t"
+	/* FIXME: This workaround code cannot handle kenrel modules
+	 * correctly under SMP environment.
+	 */
+#else	/* CONFIG_CHIP_M32700_TS1 */
+#define DCACHE_CLEAR(reg0, reg1, addr)
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+
+static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr,
+	int size)
+{
+	unsigned long flags;
+	unsigned long tmp = 0;
+
+	local_irq_save(flags);
+
+	switch (size) {
+#ifndef CONFIG_SMP
+	case 1:
+		__asm__ __volatile__ (
+			"ldb	%0, @%2 \n\t"
+			"stb	%1, @%2 \n\t"
+			: "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
+		break;
+	case 2:
+		__asm__ __volatile__ (
+			"ldh	%0, @%2 \n\t"
+			"sth	%1, @%2 \n\t"
+			: "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
+		break;
+	case 4:
+		__asm__ __volatile__ (
+			"ld	%0, @%2 \n\t"
+			"st	%1, @%2 \n\t"
+			: "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
+		break;
+#else  /* CONFIG_SMP */
+	case 4:
+		__asm__ __volatile__ (
+			DCACHE_CLEAR("%0", "r4", "%2")
+			"lock	%0, @%2;	\n\t"
+			"unlock	%1, @%2;	\n\t"
+			: "=&r" (tmp) : "r" (x), "r" (ptr)
+			: "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+			, "r4"
+#endif	/* CONFIG_CHIP_M32700_TS1 */
+		);
+		break;
+	default:
+		__xchg_called_with_bad_pointer();
+#endif  /* CONFIG_SMP */
+	}
+
+	local_irq_restore(flags);
+
+	return (tmp);
+}
+
+/*
+ * Memory barrier.
+ *
+ * mb() prevents loads and stores being reordered across this point.
+ * rmb() prevents loads being reordered across this point.
+ * wmb() prevents stores being reordered across this point.
+ */
+#define mb()   barrier()
+#define rmb()  mb()
+#define wmb()  mb()
+
+/**
+ * read_barrier_depends - Flush all pending reads that subsequents reads
+ * depend on.
+ *
+ * No data-dependent reads from memory-like regions are ever reordered
+ * over this barrier.  All reads preceding this primitive are guaranteed
+ * to access memory (but not necessarily other CPUs' caches) before any
+ * reads following this primitive that depend on the data return by
+ * any of the preceding reads.  This primitive is much lighter weight than
+ * rmb() on most CPUs, and is never heavier weight than is
+ * rmb().
+ *
+ * These ordering constraints are respected by both the local CPU
+ * and the compiler.
+ *
+ * Ordering is not guaranteed by anything other than these primitives,
+ * not even by data dependencies.  See the documentation for
+ * memory_barrier() for examples and URLs to more information.
+ *
+ * For example, the following code would force ordering (the initial
+ * value of "a" is zero, "b" is one, and "p" is "&a"):
+ *
+ * <programlisting>
+ *      CPU 0                           CPU 1
+ *
+ *      b = 2;
+ *      memory_barrier();
+ *      p = &b;                         q = p;
+ *                                      read_barrier_depends();
+ *                                      d = *q;
+ * </programlisting>
+ *
+ *
+ * because the read of "*q" depends on the read of "p" and these
+ * two reads are separated by a read_barrier_depends().  However,
+ * the following code, with the same initial values for "a" and "b":
+ *
+ * <programlisting>
+ *      CPU 0                           CPU 1
+ *
+ *      a = 2;
+ *      memory_barrier();
+ *      b = 3;                          y = b;
+ *                                      read_barrier_depends();
+ *                                      x = a;
+ * </programlisting>
+ *
+ * does not enforce ordering, since there is no data dependency between
+ * the read of "a" and the read of "b".  Therefore, on some CPUs, such
+ * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
+ * in cases like thiswhere there are no data dependencies.
+ **/
+
+#define read_barrier_depends()	do { } while (0)
+
+#ifdef CONFIG_SMP
+#define smp_mb()	mb()
+#define smp_rmb()	rmb()
+#define smp_wmb()	wmb()
+#define smp_read_barrier_depends()	read_barrier_depends()
+#else
+#define smp_mb()	barrier()
+#define smp_rmb()	barrier()
+#define smp_wmb()	barrier()
+#define smp_read_barrier_depends()	do { } while (0)
+#endif
+
+#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+#define arch_align_stack(x) (x)
+
+#endif  /* _ASM_M32R_SYSTEM_H */
diff --git a/include/asm-m32r/termbits.h b/include/asm-m32r/termbits.h
new file mode 100644
index 0000000..5ace370
--- /dev/null
+++ b/include/asm-m32r/termbits.h
@@ -0,0 +1,176 @@
+#ifndef _ASM_M32R_TERMBITS_H
+#define _ASM_M32R_TERMBITS_H
+
+/* $Id$ */
+
+#include <linux/posix_types.h>
+
+typedef unsigned char	cc_t;
+typedef unsigned int	speed_t;
+typedef unsigned int	tcflag_t;
+
+#define NCCS 19
+struct termios {
+	tcflag_t c_iflag;		/* input mode flags */
+	tcflag_t c_oflag;		/* output mode flags */
+	tcflag_t c_cflag;		/* control mode flags */
+	tcflag_t c_lflag;		/* local mode flags */
+	cc_t c_line;			/* line discipline */
+	cc_t c_cc[NCCS];		/* control characters */
+};
+
+/* c_cc characters */
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
+
+/* c_iflag bits */
+#define IGNBRK	0000001
+#define BRKINT	0000002
+#define IGNPAR	0000004
+#define PARMRK	0000010
+#define INPCK	0000020
+#define ISTRIP	0000040
+#define INLCR	0000100
+#define IGNCR	0000200
+#define ICRNL	0000400
+#define IUCLC	0001000
+#define IXON	0002000
+#define IXANY	0004000
+#define IXOFF	0010000
+#define IMAXBEL	0020000
+#define IUTF8   0040000
+
+/* c_oflag bits */
+#define OPOST	0000001
+#define OLCUC	0000002
+#define ONLCR	0000004
+#define OCRNL	0000010
+#define ONOCR	0000020
+#define ONLRET	0000040
+#define OFILL	0000100
+#define OFDEL	0000200
+#define NLDLY	0000400
+#define   NL0	0000000
+#define   NL1	0000400
+#define CRDLY	0003000
+#define   CR0	0000000
+#define   CR1	0001000
+#define   CR2	0002000
+#define   CR3	0003000
+#define TABDLY	0014000
+#define   TAB0	0000000
+#define   TAB1	0004000
+#define   TAB2	0010000
+#define   TAB3	0014000
+#define   XTABS	0014000
+#define BSDLY	0020000
+#define   BS0	0000000
+#define   BS1	0020000
+#define VTDLY	0040000
+#define   VT0	0000000
+#define   VT1	0040000
+#define FFDLY	0100000
+#define   FF0	0000000
+#define   FF1	0100000
+
+/* c_cflag bit meaning */
+#define CBAUD	0010017
+#define  B0	0000000		/* hang up */
+#define  B50	0000001
+#define  B75	0000002
+#define  B110	0000003
+#define  B134	0000004
+#define  B150	0000005
+#define  B200	0000006
+#define  B300	0000007
+#define  B600	0000010
+#define  B1200	0000011
+#define  B1800	0000012
+#define  B2400	0000013
+#define  B4800	0000014
+#define  B9600	0000015
+#define  B19200	0000016
+#define  B38400	0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE	0000060
+#define   CS5	0000000
+#define   CS6	0000020
+#define   CS7	0000040
+#define   CS8	0000060
+#define CSTOPB	0000100
+#define CREAD	0000200
+#define PARENB	0000400
+#define PARODD	0001000
+#define HUPCL	0002000
+#define CLOCAL	0004000
+#define CBAUDEX 0010000
+#define    B57600 0010001
+#define   B115200 0010002
+#define   B230400 0010003
+#define   B460800 0010004
+#define   B500000 0010005
+#define   B576000 0010006
+#define   B921600 0010007
+#define  B1000000 0010010
+#define  B1152000 0010011
+#define  B1500000 0010012
+#define  B2000000 0010013
+#define  B2500000 0010014
+#define  B3000000 0010015
+#define  B3500000 0010016
+#define  B4000000 0010017
+#define CIBAUD	  002003600000	/* input baud rate (not used) */
+#define CTVB	  004000000000		/* VisioBraille Terminal flow control */
+#define CMSPAR	  010000000000		/* mark or space (stick) parity */
+#define CRTSCTS	  020000000000		/* flow control */
+
+/* c_lflag bits */
+#define ISIG	0000001
+#define ICANON	0000002
+#define XCASE	0000004
+#define ECHO	0000010
+#define ECHOE	0000020
+#define ECHOK	0000040
+#define ECHONL	0000100
+#define NOFLSH	0000200
+#define TOSTOP	0000400
+#define ECHOCTL	0001000
+#define ECHOPRT	0002000
+#define ECHOKE	0004000
+#define FLUSHO	0010000
+#define PENDIN	0040000
+#define IEXTEN	0100000
+
+/* tcflow() and TCXONC use these */
+#define	TCOOFF		0
+#define	TCOON		1
+#define	TCIOFF		2
+#define	TCION		3
+
+/* tcflush() and TCFLSH use these */
+#define	TCIFLUSH	0
+#define	TCOFLUSH	1
+#define	TCIOFLUSH	2
+
+/* tcsetattr uses these */
+#define	TCSANOW		0
+#define	TCSADRAIN	1
+#define	TCSAFLUSH	2
+
+#endif  /* _ASM_M32R_TERMBITS_H */
diff --git a/include/asm-m32r/termios.h b/include/asm-m32r/termios.h
new file mode 100644
index 0000000..fc99d2e
--- /dev/null
+++ b/include/asm-m32r/termios.h
@@ -0,0 +1,109 @@
+#ifndef _M32R_TERMIOS_H
+#define _M32R_TERMIOS_H
+
+/* orig : i386 2.6.0-test5 */
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct winsize {
+	unsigned short ws_row;
+	unsigned short ws_col;
+	unsigned short ws_xpixel;
+	unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+	unsigned short c_iflag;		/* input mode flags */
+	unsigned short c_oflag;		/* output mode flags */
+	unsigned short c_cflag;		/* control mode flags */
+	unsigned short c_lflag;		/* local mode flags */
+	unsigned char c_line;		/* line discipline */
+	unsigned char c_cc[NCC];	/* control characters */
+};
+
+/* modem lines */
+#define TIOCM_LE	0x001
+#define TIOCM_DTR	0x002
+#define TIOCM_RTS	0x004
+#define TIOCM_ST	0x008
+#define TIOCM_SR	0x010
+#define TIOCM_CTS	0x020
+#define TIOCM_CAR	0x040
+#define TIOCM_RNG	0x080
+#define TIOCM_DSR	0x100
+#define TIOCM_CD	TIOCM_CAR
+#define TIOCM_RI	TIOCM_RNG
+#define TIOCM_OUT1	0x2000
+#define TIOCM_OUT2	0x4000
+#define TIOCM_LOOP	0x8000
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+/* line disciplines */
+#define N_TTY		0
+#define N_SLIP		1
+#define N_MOUSE		2
+#define N_PPP		3
+#define N_STRIP		4
+#define N_AX25		5
+#define N_X25		6	/* X.25 async */
+#define N_6PACK		7
+#define N_MASC		8	/* Reserved for Mobitex module <kaz@cafe.net> */
+#define N_R3964		9	/* Reserved for Simatic R3964 module */
+#define N_PROFIBUS_FDL	10	/* Reserved for Profibus <Dave@mvhi.com> */
+#define N_IRDA		11	/* Linux IR - http://irda.sourceforge.net/ */
+#define N_SMSBLOCK	12	/* SMS block mode - for talking to GSM data cards about SMS messages */
+#define N_HDLC		13	/* synchronous HDLC */
+#define N_SYNC_PPP	14	/* synchronous PPP */
+#define N_HCI		15  /* Bluetooth HCI UART */
+
+#ifdef __KERNEL__
+#include <linux/module.h>
+
+/*	intr=^C		quit=^\		erase=del	kill=^U
+	eof=^D		vtime=\0	vmin=\1		sxtc=\0
+	start=^Q	stop=^S		susp=^Z		eol=\0
+	reprint=^R	discard=^U	werase=^W	lnext=^V
+	eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
+	unsigned short __tmp; \
+	get_user(__tmp,&(termio)->x); \
+	*(unsigned short *) &(termios)->x = __tmp; \
+}
+
+#define user_termio_to_kernel_termios(termios, termio) \
+({ \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
+	copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
+})
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+#define kernel_termios_to_user_termio(termio, termios) \
+({ \
+	put_user((termios)->c_iflag, &(termio)->c_iflag); \
+	put_user((termios)->c_oflag, &(termio)->c_oflag); \
+	put_user((termios)->c_cflag, &(termio)->c_cflag); \
+	put_user((termios)->c_lflag, &(termio)->c_lflag); \
+	put_user((termios)->c_line,  &(termio)->c_line); \
+	copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
+})
+
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
+
+#endif	/* __KERNEL__ */
+
+#endif	/* _M32R_TERMIOS_H */
diff --git a/include/asm-m32r/thread_info.h b/include/asm-m32r/thread_info.h
new file mode 100644
index 0000000..9f3a0fc
--- /dev/null
+++ b/include/asm-m32r/thread_info.h
@@ -0,0 +1,182 @@
+#ifndef _ASM_M32R_THREAD_INFO_H
+#define _ASM_M32R_THREAD_INFO_H
+
+/* thread_info.h: m32r low-level thread information
+ *
+ * Copyright (C) 2002  David Howells (dhowells@redhat.com)
+ * - Incorporating suggestions made by Linus Torvalds and Dave Miller
+ * Copyright (C) 2004  Hirokazu Takata <takata at linux-m32r.org>
+ */
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#endif
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants must also be changed
+ */
+#ifndef __ASSEMBLY__
+
+struct thread_info {
+	struct task_struct	*task;		/* main task structure */
+	struct exec_domain	*exec_domain;	/* execution domain */
+	unsigned long		flags;		/* low level flags */
+	unsigned long		status;		/* thread-synchronous flags */
+	__u32			cpu;		/* current CPU */
+	__s32			preempt_count; /* 0 => preemptable, <0 => BUG */
+
+	mm_segment_t		addr_limit;	/* thread address space:
+					 	   0-0xBFFFFFFF for user-thread
+						   0-0xFFFFFFFF for kernel-thread
+						*/
+	struct restart_block    restart_block;
+
+	__u8			supervisor_stack[0];
+};
+
+#else /* !__ASSEMBLY__ */
+
+/* offsets into the thread_info struct for assembly code access */
+#define TI_TASK		0x00000000
+#define TI_EXEC_DOMAIN	0x00000004
+#define TI_FLAGS	0x00000008
+#define TI_STATUS	0x0000000C
+#define TI_CPU		0x00000010
+#define TI_PRE_COUNT	0x00000014
+#define TI_ADDR_LIMIT	0x00000018
+#define TI_RESTART_BLOCK 0x000001C
+
+#endif
+
+#define PREEMPT_ACTIVE		0x10000000
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#ifndef __ASSEMBLY__
+
+#define INIT_THREAD_INFO(tsk)			\
+{						\
+	.task		= &tsk,			\
+	.exec_domain	= &default_exec_domain,	\
+	.flags		= 0,			\
+	.cpu		= 0,			\
+	.preempt_count	= 1,			\
+	.addr_limit	= KERNEL_DS,		\
+	.restart_block = {			\
+		.fn = do_no_restart_syscall,	\
+	},					\
+}
+
+#define init_thread_info	(init_thread_union.thread_info)
+#define init_stack		(init_thread_union.stack)
+
+#define THREAD_SIZE (2*PAGE_SIZE)
+
+/* how to get the thread information struct from C */
+static inline struct thread_info *current_thread_info(void)
+{
+	struct thread_info *ti;
+
+	__asm__ __volatile__ (
+		"ldi	%0, #%1			\n\t"
+		"and	%0, sp			\n\t"
+		: "=r" (ti) : "i" (~(THREAD_SIZE - 1))
+	);
+
+	return ti;
+}
+
+/* thread information allocation */
+#if CONFIG_DEBUG_STACK_USAGE
+#define alloc_thread_info(tsk)					\
+	({							\
+		struct thread_info *ret;			\
+	 							\
+	 	ret = kmalloc(THREAD_SIZE, GFP_KERNEL);		\
+	 	if (ret)					\
+	 		memset(ret, 0, THREAD_SIZE);		\
+	 	ret;						\
+	 })
+#else
+#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL)
+#endif
+
+#define free_thread_info(info) kfree(info)
+#define get_thread_info(ti) get_task_struct((ti)->task)
+#define put_thread_info(ti) put_task_struct((ti)->task)
+
+#define TI_FLAG_FAULT_CODE_SHIFT	28
+
+static inline void set_thread_fault_code(unsigned int val)
+{
+	struct thread_info *ti = current_thread_info();
+	ti->flags = (ti->flags & (~0 >> (32 - TI_FLAG_FAULT_CODE_SHIFT)))
+		| (val << TI_FLAG_FAULT_CODE_SHIFT);
+}
+
+static inline unsigned int get_thread_fault_code(void)
+{
+	struct thread_info *ti = current_thread_info();
+	return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT;
+}
+
+#else /* !__ASSEMBLY__ */
+
+#define THREAD_SIZE	8192
+
+/* how to get the thread information struct from ASM */
+#define GET_THREAD_INFO(reg)	GET_THREAD_INFO reg
+	.macro GET_THREAD_INFO reg
+	ldi	\reg, #-THREAD_SIZE
+	and	\reg, sp
+	.endm
+
+#endif
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_SYSCALL_TRACE	0	/* syscall trace active */
+#define TIF_NOTIFY_RESUME	1	/* resumption notification requested */
+#define TIF_SIGPENDING		2	/* signal pending */
+#define TIF_NEED_RESCHED	3	/* rescheduling necessary */
+#define TIF_SINGLESTEP		4	/* restore singlestep on return to user mode */
+#define TIF_IRET		5	/* return with iret */
+#define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */
+					/* 31..28 fault code */
+#define TIF_MEMDIE		17
+
+#define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP		(1<<TIF_SINGLESTEP)
+#define _TIF_IRET		(1<<TIF_IRET)
+#define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
+
+#define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
+#define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */
+
+/*
+ * Thread-synchronous status.
+ *
+ * This is different from the flags in that nobody else
+ * ever touches our thread-synchronous status, so we don't
+ * have to worry about atomic accesses.
+ */
+#define TS_USEDFPU		0x0001	/* FPU was used by this task this quantum (SMP) */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_M32R_THREAD_INFO_H */
diff --git a/include/asm-m32r/timex.h b/include/asm-m32r/timex.h
new file mode 100644
index 0000000..abf12e7
--- /dev/null
+++ b/include/asm-m32r/timex.h
@@ -0,0 +1,34 @@
+#ifndef _ASM_M32R_TIMEX_H
+#define _ASM_M32R_TIMEX_H
+
+/* $Id$ */
+
+/*
+ * linux/include/asm-m32r/timex.h
+ *
+ * m32r architecture timex specifications
+ */
+
+#include <linux/config.h>
+
+#define CLOCK_TICK_RATE	(CONFIG_BUS_CLOCK / CONFIG_TIMER_DIVIDE)
+#define CLOCK_TICK_FACTOR	20	/* Factor of both 1000000 and CLOCK_TICK_RATE */
+#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
+	(1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
+		<< (SHIFT_SCALE-SHIFT_HZ)) / HZ)
+
+#ifdef __KERNEL__
+/*
+ * Standard way to access the cycle counter.
+ * Currently only used on SMP.
+ */
+
+typedef unsigned long long cycles_t;
+
+static __inline__ cycles_t get_cycles (void)
+{
+	return 0;
+}
+#endif  /* __KERNEL__ */
+
+#endif  /* _ASM_M32R_TIMEX_H */
diff --git a/include/asm-m32r/tlb.h b/include/asm-m32r/tlb.h
new file mode 100644
index 0000000..c7ebd8d
--- /dev/null
+++ b/include/asm-m32r/tlb.h
@@ -0,0 +1,20 @@
+#ifndef _M32R_TLB_H
+#define _M32R_TLB_H
+
+/*
+ * x86 doesn't need any special per-pte or
+ * per-vma handling..
+ */
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
+
+/*
+ * .. because we flush the whole mm when it
+ * fills up.
+ */
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#include <asm-generic/tlb.h>
+
+#endif /* _M32R_TLB_H */
diff --git a/include/asm-m32r/tlbflush.h b/include/asm-m32r/tlbflush.h
new file mode 100644
index 0000000..bc7c407
--- /dev/null
+++ b/include/asm-m32r/tlbflush.h
@@ -0,0 +1,102 @@
+#ifndef _ASM_M32R_TLBFLUSH_H
+#define _ASM_M32R_TLBFLUSH_H
+
+#include <linux/config.h>
+#include <asm/m32r.h>
+
+/*
+ * TLB flushing:
+ *
+ *  - flush_tlb() flushes the current mm struct TLBs
+ *  - flush_tlb_all() flushes all processes TLBs
+ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ *  - flush_tlb_page(vma, vmaddr) flushes one page
+ *  - flush_tlb_range(vma, start, end) flushes a range of pages
+ *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
+ */
+
+extern void local_flush_tlb_all(void);
+extern void local_flush_tlb_mm(struct mm_struct *);
+extern void local_flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void local_flush_tlb_range(struct vm_area_struct *, unsigned long,
+	unsigned long);
+
+#ifndef CONFIG_SMP
+#ifdef CONFIG_MMU
+#define flush_tlb_all()			local_flush_tlb_all()
+#define flush_tlb_mm(mm)		local_flush_tlb_mm(mm)
+#define flush_tlb_page(vma, page)	local_flush_tlb_page(vma, page)
+#define flush_tlb_range(vma, start, end)	\
+	local_flush_tlb_range(vma, start, end)
+#define flush_tlb_kernel_range(start, end)	local_flush_tlb_all()
+#else	/* CONFIG_MMU */
+#define flush_tlb_all()			do { } while (0)
+#define flush_tlb_mm(mm)		do { } while (0)
+#define flush_tlb_page(vma, vmaddr)	do { } while (0)
+#define flush_tlb_range(vma, start, end)	do { } while (0)
+#endif	/* CONFIG_MMU */
+#else	/* CONFIG_SMP */
+extern void smp_flush_tlb_all(void);
+extern void smp_flush_tlb_mm(struct mm_struct *);
+extern void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void smp_flush_tlb_range(struct vm_area_struct *, unsigned long,
+	unsigned long);
+
+#define flush_tlb_all()			smp_flush_tlb_all()
+#define flush_tlb_mm(mm)		smp_flush_tlb_mm(mm)
+#define flush_tlb_page(vma, page)	smp_flush_tlb_page(vma, page)
+#define flush_tlb_range(vma, start, end)	\
+	smp_flush_tlb_range(vma, start, end)
+#define flush_tlb_kernel_range(start, end)	smp_flush_tlb_all()
+#endif	/* CONFIG_SMP */
+
+static __inline__ void __flush_tlb_page(unsigned long page)
+{
+	unsigned int tmpreg0, tmpreg1, tmpreg2;
+
+	__asm__ __volatile__ (
+		"seth	%0, #high(%4)	\n\t"
+		"st	%3, @(%5, %0)	\n\t"
+		"ldi	%1, #1		\n\t"
+		"st	%1, @(%6, %0)	\n\t"
+		"add3	%1, %0, %7	\n\t"
+		".fillinsn		\n"
+		"1:			\n\t"
+		"ld	%2, @(%6, %0)	\n\t"
+		"bnez	%2, 1b		\n\t"
+		"ld	%0, @%1+	\n\t"
+		"ld	%1, @%1		\n\t"
+		"st	%2, @+%0	\n\t"
+		"st	%2, @+%1	\n\t"
+		: "=&r" (tmpreg0), "=&r" (tmpreg1), "=&r" (tmpreg2)
+		: "r" (page), "i" (MMU_REG_BASE), "i" (MSVA_offset),
+		"i" (MTOP_offset), "i" (MIDXI_offset)
+		: "memory"
+	);
+}
+
+static __inline__ void __flush_tlb_all(void)
+{
+	unsigned int tmpreg0, tmpreg1;
+
+	__asm__ __volatile__ (
+		"seth	%0, #high(%2)		\n\t"
+		"or3	%0, %0, #low(%2)	\n\t"
+		"ldi	%1, #0xc		\n\t"
+		"st	%1, @%0			\n\t"
+		".fillinsn			\n"
+		"1:				\n\t"
+		"ld	%1, @%0			\n\t"
+		"bnez	%1, 1b			\n\t"
+		: "=&r" (tmpreg0), "=&r" (tmpreg1)
+		: "i" (MTOP) : "memory"
+	);
+}
+
+#define flush_tlb_pgtables(mm, start, end)	do { } while (0)
+
+extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
+
+#endif	/* _ASM_M32R_TLBFLUSH_H */
+
diff --git a/include/asm-m32r/topology.h b/include/asm-m32r/topology.h
new file mode 100644
index 0000000..299a89d
--- /dev/null
+++ b/include/asm-m32r/topology.h
@@ -0,0 +1,48 @@
+/*
+ * linux/include/asm-generic/topology.h
+ *
+ * Written by: Matthew Dobson, IBM Corporation
+ *
+ * Copyright (C) 2002, IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <colpatch@us.ibm.com>
+ */
+#ifndef _ASM_M32R_TOPOLOGY_H
+#define _ASM_M32R_TOPOLOGY_H
+
+/* Other architectures wishing to use this simple topology API should fill
+   in the below functions as appropriate in their own <asm/topology.h> file. */
+
+#define cpu_to_node(cpu)	(0)
+
+#ifndef parent_node
+#define parent_node(node)	(0)
+#endif
+#ifndef node_to_cpumask
+#define node_to_cpumask(node)	(cpu_online_map)
+#endif
+#ifndef node_to_first_cpu
+#define node_to_first_cpu(node)	(0)
+#endif
+#ifndef pcibus_to_cpumask
+#define pcibus_to_cpumask(bus)	(cpu_online_map)
+#endif
+
+#endif /* _ASM_M32R_TOPOLOGY_H */
diff --git a/include/asm-m32r/types.h b/include/asm-m32r/types.h
new file mode 100644
index 0000000..ca0a887
--- /dev/null
+++ b/include/asm-m32r/types.h
@@ -0,0 +1,64 @@
+#ifndef _ASM_M32R_TYPES_H
+#define _ASM_M32R_TYPES_H
+
+#ifndef __ASSEMBLY__
+
+/* $Id$ */
+
+/* orig : i386 2.4.18 */
+
+typedef unsigned short umode_t;
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+#endif
+#endif /* __ASSEMBLY__ */
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __KERNEL__
+
+#define BITS_PER_LONG 32
+
+#ifndef __ASSEMBLY__
+
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+typedef signed long long s64;
+typedef unsigned long long u64;
+
+/* DMA addresses are 32-bits wide.  */
+
+typedef u32 dma_addr_t;
+typedef u64 dma64_addr_t;
+
+typedef unsigned short kmem_bufctl_t;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif  /* _ASM_M32R_TYPES_H */
diff --git a/include/asm-m32r/uaccess.h b/include/asm-m32r/uaccess.h
new file mode 100644
index 0000000..bbb8ac4
--- /dev/null
+++ b/include/asm-m32r/uaccess.h
@@ -0,0 +1,753 @@
+#ifndef _ASM_M32R_UACCESS_H
+#define _ASM_M32R_UACCESS_H
+
+/*
+ *  linux/include/asm-m32r/uaccess.h
+ *
+ *  M32R version.
+ *    Copyright (C) 2004  Hirokazu Takata <takata at linux-m32r.org>
+ */
+
+#undef UACCESS_DEBUG
+
+#ifdef UACCESS_DEBUG
+#define UAPRINTK(args...) printk(args)
+#else
+#define UAPRINTK(args...)
+#endif /* UACCESS_DEBUG */
+
+/*
+ * User space memory access functions
+ */
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/thread_info.h>
+#include <asm/page.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not.  If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ */
+
+#define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
+
+#ifdef CONFIG_MMU
+#define KERNEL_DS	MAKE_MM_SEG(0xFFFFFFFF)
+#define USER_DS		MAKE_MM_SEG(PAGE_OFFSET)
+#else
+#define KERNEL_DS	MAKE_MM_SEG(0xFFFFFFFF)
+#define USER_DS		MAKE_MM_SEG(0xFFFFFFFF)
+#endif /* CONFIG_MMU */
+
+#define get_ds()	(KERNEL_DS)
+#ifdef CONFIG_MMU
+#define get_fs()	(current_thread_info()->addr_limit)
+#define set_fs(x)	(current_thread_info()->addr_limit = (x))
+#else
+static inline mm_segment_t get_fs(void)
+{
+  return USER_DS;
+}
+
+static inline void set_fs(mm_segment_t s)
+{
+}
+#endif /* CONFIG_MMU */
+
+#define segment_eq(a,b)	((a).seg == (b).seg)
+
+#define __addr_ok(addr) \
+	((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
+
+/*
+ * Test whether a block of memory is a valid user space address.
+ * Returns 0 if the range is valid, nonzero otherwise.
+ *
+ * This is equivalent to the following test:
+ * (u33)addr + (u33)size >= (u33)current->addr_limit.seg
+ *
+ * This needs 33-bit arithmetic. We have a carry...
+ */
+#define __range_ok(addr,size) ({					\
+	unsigned long flag, sum; 					\
+	__chk_user_ptr(addr);						\
+	asm ( 								\
+		"	cmpu	%1, %1    ; clear cbit\n"		\
+		"	addx	%1, %3    ; set cbit if overflow\n"	\
+		"	subx	%0, %0\n"				\
+		"	cmpu	%4, %1\n"				\
+		"	subx	%0, %5\n"				\
+		: "=&r"(flag), "=r"(sum)				\
+		: "1"(addr), "r"((int)(size)), 				\
+		  "r"(current_thread_info()->addr_limit.seg), "r"(0)	\
+		: "cbit" );						\
+	flag; })
+
+/**
+ * access_ok: - Checks if a user space pointer is valid
+ * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
+ *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
+ *        to write to a block, it is always safe to read from it.
+ * @addr: User space pointer to start of block to check
+ * @size: Size of block to check
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * Checks if a pointer to a block of memory in user space is valid.
+ *
+ * Returns true (nonzero) if the memory block may be valid, false (zero)
+ * if it is definitely invalid.
+ *
+ * Note that, depending on architecture, this function probably just
+ * checks that the pointer is in the user space range - after calling
+ * this function, memory access functions may still return -EFAULT.
+ */
+#ifdef CONFIG_MMU
+#define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0))
+#else
+static inline int access_ok(int type, const void *addr, unsigned long size)
+{
+  extern unsigned long memory_start, memory_end;
+  unsigned long val = (unsigned long)addr;
+
+  return ((val >= memory_start) && ((val + size) < memory_end));
+}
+#endif /* CONFIG_MMU */
+
+/**
+ * verify_area: - Obsolete/deprecated and will go away soon,
+ * use access_ok() instead.
+ * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE
+ * @addr: User space pointer to start of block to check
+ * @size: Size of block to check
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * This function has been replaced by access_ok().
+ *
+ * Checks if a pointer to a block of memory in user space is valid.
+ *
+ * Returns zero if the memory block may be valid, -EFAULT
+ * if it is definitely invalid.
+ *
+ * See access_ok() for more details.
+ */
+static inline int __deprecated verify_area(int type, const void __user *addr,
+			      unsigned long size)
+{
+	return access_ok(type, addr, size) ? 0 : -EFAULT;
+}
+
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue.  No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path.  This means when everything is well,
+ * we don't even have to jump over them.  Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry
+{
+	unsigned long insn, fixup;
+};
+
+extern int fixup_exception(struct pt_regs *regs);
+
+/*
+ * These are the main single-value transfer routines.  They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in "get_user()"
+ * and yet we don't want to do any pointers, because that is too much
+ * of a performance impact. Thus we have a few rather ugly macros here,
+ * and hide all the uglyness from the user.
+ *
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ */
+
+extern void __get_user_1(void);
+extern void __get_user_2(void);
+extern void __get_user_4(void);
+
+#ifndef MODULE
+#define __get_user_x(size,ret,x,ptr) 					\
+	__asm__ __volatile__(						\
+		"	mv	r0, %0\n"				\
+		"	mv	r1, %1\n" 				\
+		"	bl __get_user_" #size "\n"			\
+		"	mv	%0, r0\n"				\
+		"	mv	%1, r1\n" 				\
+		: "=r"(ret), "=r"(x) 					\
+		: "0"(ptr)						\
+		: "r0", "r1", "r14" )
+#else /* MODULE */
+/*
+ * Use "jl" instead of "bl" for MODULE
+ */
+#define __get_user_x(size,ret,x,ptr) 					\
+	__asm__ __volatile__(						\
+		"	mv	r0, %0\n"				\
+		"	mv	r1, %1\n" 				\
+		"	seth	lr, #high(__get_user_" #size ")\n"	\
+		"	or3	lr, lr, #low(__get_user_" #size ")\n"	\
+		"	jl 	lr\n"					\
+		"	mv	%0, r0\n"				\
+		"	mv	%1, r1\n" 				\
+		: "=r"(ret), "=r"(x) 					\
+		: "0"(ptr)						\
+		: "r0", "r1", "r14" )
+#endif
+
+/* Careful: we have to cast the result to the type of the pointer for sign
+   reasons */
+/**
+ * get_user: - Get a simple variable from user space.
+ * @x:   Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define get_user(x,ptr)							\
+({	int __ret_gu,__val_gu;						\
+	__chk_user_ptr(ptr);						\
+	switch(sizeof (*(ptr))) {					\
+	case 1:  __get_user_x(1,__ret_gu,__val_gu,ptr); break;		\
+	case 2:  __get_user_x(2,__ret_gu,__val_gu,ptr); break;		\
+	case 4:  __get_user_x(4,__ret_gu,__val_gu,ptr); break;		\
+	default: __get_user_x(X,__ret_gu,__val_gu,ptr); break;		\
+	}								\
+	(x) = (__typeof__(*(ptr)))__val_gu;				\
+	__ret_gu;							\
+})
+
+extern void __put_user_bad(void);
+
+/**
+ * put_user: - Write a simple value into user space.
+ * @x:   Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define put_user(x,ptr)							\
+  __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+
+
+/**
+ * __get_user: - Get a simple variable from user space, with less checking.
+ * @x:   Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define __get_user(x,ptr) \
+  __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
+
+
+/**
+ * __put_user: - Write a simple value into user space, with less checking.
+ * @x:   Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define __put_user(x,ptr) \
+  __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+
+#define __put_user_nocheck(x,ptr,size)					\
+({									\
+	long __pu_err;							\
+	__put_user_size((x),(ptr),(size),__pu_err);			\
+	__pu_err;							\
+})
+
+
+#define __put_user_check(x,ptr,size)					\
+({									\
+	long __pu_err = -EFAULT;					\
+	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
+	might_sleep();							\
+	if (access_ok(VERIFY_WRITE,__pu_addr,size))			\
+		__put_user_size((x),__pu_addr,(size),__pu_err);		\
+	__pu_err;							\
+})
+
+#if defined(__LITTLE_ENDIAN__)
+#define __put_user_u64(x, addr, err)                                    \
+        __asm__ __volatile__(                                           \
+                "       .fillinsn\n"                                    \
+                "1:     st %L1,@%2\n"                                    \
+                "       .fillinsn\n"                                    \
+                "2:     st %H1,@(4,%2)\n"                                \
+                "       .fillinsn\n"                                    \
+                "3:\n"                                                  \
+                ".section .fixup,\"ax\"\n"                              \
+                "       .balign 4\n"                                    \
+                "4:     ldi %0,%3\n"                                    \
+                "       seth r14,#high(3b)\n"                           \
+                "       or3 r14,r14,#low(3b)\n"                         \
+                "       jmp r14\n"                                      \
+                ".previous\n"                                           \
+                ".section __ex_table,\"a\"\n"                           \
+                "       .balign 4\n"                                    \
+                "       .long 1b,4b\n"                                  \
+                "       .long 2b,4b\n"                                  \
+                ".previous"                                             \
+                : "=r"(err)                                             \
+                : "r"(x), "r"(addr), "i"(-EFAULT), "0"(err)		\
+                : "r14", "memory")
+
+#elif defined(__BIG_ENDIAN__)
+#define __put_user_u64(x, addr, err)					\
+	__asm__ __volatile__(						\
+		"	.fillinsn\n"					\
+		"1:	st %H1,@%2\n"					\
+		"	.fillinsn\n"					\
+		"2:	st %L1,@(4,%2)\n"				\
+		"	.fillinsn\n"					\
+		"3:\n"							\
+		".section .fixup,\"ax\"\n"				\
+		"	.balign 4\n"					\
+		"4:	ldi %0,%3\n"					\
+		"	seth r14,#high(3b)\n"				\
+		"	or3 r14,r14,#low(3b)\n"				\
+		"	jmp r14\n"					\
+		".previous\n"						\
+		".section __ex_table,\"a\"\n"				\
+		"	.balign 4\n"					\
+		"	.long 1b,4b\n"					\
+		"	.long 2b,4b\n"					\
+		".previous"						\
+		: "=r"(err)						\
+		: "r"(x), "r"(addr), "i"(-EFAULT), "0"(err)		\
+		: "r14", "memory")
+#else
+#error no endian defined
+#endif
+
+#define __put_user_size(x,ptr,size,retval)				\
+do {									\
+	retval = 0;							\
+	__chk_user_ptr(ptr);						\
+	switch (size) {							\
+	  case 1: __put_user_asm(x,ptr,retval,"b"); break;		\
+	  case 2: __put_user_asm(x,ptr,retval,"h"); break;		\
+	  case 4: __put_user_asm(x,ptr,retval,""); break;		\
+	  case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\
+	  default: __put_user_bad();					\
+	}								\
+} while (0)
+
+struct __large_struct { unsigned long buf[100]; };
+#define __m(x) (*(struct __large_struct *)(x))
+
+/*
+ * Tell gcc we read from memory instead of writing: this is because
+ * we do not write to any memory gcc knows about, so there are no
+ * aliasing issues.
+ */
+#define __put_user_asm(x, addr, err, itype)				\
+	__asm__ __volatile__(						\
+		"	.fillinsn\n"					\
+		"1:	st"itype" %1,@%2\n"				\
+		"	.fillinsn\n"					\
+		"2:\n"							\
+		".section .fixup,\"ax\"\n"				\
+		"	.balign 4\n"					\
+		"3:	ldi %0,%3\n"					\
+		"	seth r14,#high(2b)\n"				\
+		"	or3 r14,r14,#low(2b)\n"				\
+		"	jmp r14\n"					\
+		".previous\n"						\
+		".section __ex_table,\"a\"\n"				\
+		"	.balign 4\n"					\
+		"	.long 1b,3b\n"					\
+		".previous"						\
+		: "=r"(err)						\
+		: "r"(x), "r"(addr), "i"(-EFAULT), "0"(err)		\
+		: "r14", "memory")
+
+#define __get_user_nocheck(x,ptr,size)					\
+({									\
+	long __gu_err, __gu_val;					\
+	__get_user_size(__gu_val,(ptr),(size),__gu_err);		\
+	(x) = (__typeof__(*(ptr)))__gu_val;				\
+	__gu_err;							\
+})
+
+extern long __get_user_bad(void);
+
+#define __get_user_size(x,ptr,size,retval)				\
+do {									\
+	retval = 0;							\
+	__chk_user_ptr(ptr);						\
+	switch (size) {							\
+	  case 1: __get_user_asm(x,ptr,retval,"ub"); break;		\
+	  case 2: __get_user_asm(x,ptr,retval,"uh"); break;		\
+	  case 4: __get_user_asm(x,ptr,retval,""); break;		\
+	  default: (x) = __get_user_bad();				\
+	}								\
+} while (0)
+
+#define __get_user_asm(x, addr, err, itype)				\
+	__asm__ __volatile__(						\
+		"	.fillinsn\n"					\
+		"1:	ld"itype" %1,@%2\n"				\
+		"	.fillinsn\n"					\
+		"2:\n"							\
+		".section .fixup,\"ax\"\n"				\
+		"	.balign 4\n"					\
+		"3:	ldi %0,%3\n"					\
+		"	seth r14,#high(2b)\n"				\
+		"	or3 r14,r14,#low(2b)\n"				\
+		"	jmp r14\n"					\
+		".previous\n"						\
+		".section __ex_table,\"a\"\n"				\
+		"	.balign 4\n"					\
+		"	.long 1b,3b\n"					\
+		".previous"						\
+		: "=r"(err), "=&r"(x)					\
+		: "r"(addr), "i"(-EFAULT), "0"(err)			\
+		: "r14", "memory")
+
+/*
+ * Here we special-case 1, 2 and 4-byte copy_*_user invocations.  On a fault
+ * we return the initial request size (1, 2 or 4), as copy_*_user should do.
+ * If a store crosses a page boundary and gets a fault, the m32r will not write
+ * anything, so this is accurate.
+ */
+
+
+/*
+ * Copy To/From Userspace
+ */
+
+/* Generic arbitrary sized copy.  */
+/* Return the number of bytes NOT copied.  */
+#define __copy_user(to,from,size)					\
+do {									\
+	unsigned long __dst, __src, __c;				\
+	__asm__ __volatile__ (						\
+		"	mv	r14, %0\n"				\
+		"	or	r14, %1\n"				\
+		"	beq	%0, %1, 9f\n"				\
+		"	beqz	%2, 9f\n"				\
+		"	and3	r14, r14, #3\n"				\
+		"	bnez	r14, 2f\n"				\
+		"	and3	%2, %2, #3\n"				\
+		"	beqz	%3, 2f\n"				\
+		"	addi	%0, #-4		; word_copy \n"		\
+		"	.fillinsn\n"					\
+		"0:	ld	r14, @%1+\n"				\
+		"	addi	%3, #-1\n"				\
+		"	.fillinsn\n"					\
+		"1:	st	r14, @+%0\n"				\
+		"	bnez	%3, 0b\n"				\
+		"	beqz	%2, 9f\n"				\
+		"	addi	%0, #4\n"				\
+		"	.fillinsn\n"					\
+		"2:	ldb	r14, @%1	; byte_copy \n"		\
+		"	.fillinsn\n"					\
+		"3:	stb	r14, @%0\n"				\
+		"	addi	%1, #1\n"				\
+		"	addi	%2, #-1\n"				\
+		"	addi	%0, #1\n"				\
+		"	bnez	%2, 2b\n"				\
+		"	.fillinsn\n"					\
+		"9:\n"							\
+		".section .fixup,\"ax\"\n"				\
+		"	.balign 4\n"					\
+		"5:	addi	%3, #1\n"				\
+		"	addi	%1, #-4\n"				\
+		"	.fillinsn\n"					\
+		"6:	slli	%3, #2\n"				\
+		"	add	%2, %3\n"				\
+		"	addi	%0, #4\n"				\
+		"	.fillinsn\n"					\
+		"7:	seth	r14, #high(9b)\n"			\
+		"	or3	r14, r14, #low(9b)\n"			\
+		"	jmp	r14\n"					\
+		".previous\n"						\
+		".section __ex_table,\"a\"\n"				\
+		"	.balign 4\n"					\
+		"	.long 0b,6b\n"					\
+		"	.long 1b,5b\n"					\
+		"	.long 2b,9b\n"					\
+		"	.long 3b,9b\n"					\
+		".previous\n"						\
+		: "=&r"(__dst), "=&r"(__src), "=&r"(size), "=&r"(__c)	\
+		: "0"(to), "1"(from), "2"(size), "3"(size / 4)		\
+		: "r14", "memory");					\
+} while (0)
+
+#define __copy_user_zeroing(to,from,size)				\
+do {									\
+	unsigned long __dst, __src, __c;				\
+	__asm__ __volatile__ (						\
+		"	mv	r14, %0\n"				\
+		"	or	r14, %1\n"				\
+		"	beq	%0, %1, 9f\n"				\
+		"	beqz	%2, 9f\n"				\
+		"	and3	r14, r14, #3\n"				\
+		"	bnez	r14, 2f\n"				\
+		"	and3	%2, %2, #3\n"				\
+		"	beqz	%3, 2f\n"				\
+		"	addi	%0, #-4		; word_copy \n"		\
+		"	.fillinsn\n"					\
+		"0:	ld	r14, @%1+\n"				\
+		"	addi	%3, #-1\n"				\
+		"	.fillinsn\n"					\
+		"1:	st	r14, @+%0\n"				\
+		"	bnez	%3, 0b\n"				\
+		"	beqz	%2, 9f\n"				\
+		"	addi	%0, #4\n"				\
+		"	.fillinsn\n"					\
+		"2:	ldb	r14, @%1	; byte_copy \n"		\
+		"	.fillinsn\n"					\
+		"3:	stb	r14, @%0\n"				\
+		"	addi	%1, #1\n"				\
+		"	addi	%2, #-1\n"				\
+		"	addi	%0, #1\n"				\
+		"	bnez	%2, 2b\n"				\
+		"	.fillinsn\n"					\
+		"9:\n"							\
+		".section .fixup,\"ax\"\n"				\
+		"	.balign 4\n"					\
+		"5:	addi	%3, #1\n"				\
+		"	addi	%1, #-4\n"				\
+		"	.fillinsn\n"					\
+		"6:	slli	%3, #2\n"				\
+		"	add	%2, %3\n"				\
+		"	addi	%0, #4\n"				\
+		"	.fillinsn\n"					\
+		"7:	ldi	r14, #0		; store zero \n"	\
+		"	.fillinsn\n"					\
+		"8:	addi	%2, #-1\n"				\
+		"	stb	r14, @%0	; ACE? \n"		\
+		"	addi	%0, #1\n"				\
+		"	bnez	%2, 8b\n"				\
+		"	seth	r14, #high(9b)\n"			\
+		"	or3	r14, r14, #low(9b)\n"			\
+		"	jmp	r14\n"					\
+		".previous\n"						\
+		".section __ex_table,\"a\"\n"				\
+		"	.balign 4\n"					\
+		"	.long 0b,6b\n"					\
+		"	.long 1b,5b\n"					\
+		"	.long 2b,7b\n"					\
+		"	.long 3b,7b\n"					\
+		".previous\n"						\
+		: "=&r"(__dst), "=&r"(__src), "=&r"(size), "=&r"(__c)	\
+		: "0"(to), "1"(from), "2"(size), "3"(size / 4)		\
+		: "r14", "memory");					\
+} while (0)
+
+
+/* We let the __ versions of copy_from/to_user inline, because they're often
+ * used in fast paths and have only a small space overhead.
+ */
+static inline unsigned long __generic_copy_from_user_nocheck(void *to,
+	const void __user *from, unsigned long n)
+{
+	__copy_user_zeroing(to,from,n);
+	return n;
+}
+
+static inline unsigned long __generic_copy_to_user_nocheck(void __user *to,
+	const void *from, unsigned long n)
+{
+	__copy_user(to,from,n);
+	return n;
+}
+
+unsigned long __generic_copy_to_user(void *, const void *, unsigned long);
+unsigned long __generic_copy_from_user(void *, const void *, unsigned long);
+
+/**
+ * __copy_to_user: - Copy a block of data into user space, with less checking.
+ * @to:   Destination address, in user space.
+ * @from: Source address, in kernel space.
+ * @n:    Number of bytes to copy.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * Copy data from kernel space to user space.  Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ */
+#define __copy_to_user(to,from,n)			\
+	__generic_copy_to_user_nocheck((to),(from),(n))
+
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
+
+/**
+ * copy_to_user: - Copy a block of data into user space.
+ * @to:   Destination address, in user space.
+ * @from: Source address, in kernel space.
+ * @n:    Number of bytes to copy.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * Copy data from kernel space to user space.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ */
+#define copy_to_user(to,from,n)				\
+({							\
+	might_sleep();					\
+	__generic_copy_to_user((to),(from),(n));	\
+})
+
+/**
+ * __copy_from_user: - Copy a block of data from user space, with less checking. * @to:   Destination address, in kernel space.
+ * @from: Source address, in user space.
+ * @n:    Number of bytes to copy.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * Copy data from user space to kernel space.  Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ *
+ * If some data could not be copied, this function will pad the copied
+ * data to the requested size using zero bytes.
+ */
+#define __copy_from_user(to,from,n)			\
+	__generic_copy_from_user_nocheck((to),(from),(n))
+
+/**
+ * copy_from_user: - Copy a block of data from user space.
+ * @to:   Destination address, in kernel space.
+ * @from: Source address, in user space.
+ * @n:    Number of bytes to copy.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * Copy data from user space to kernel space.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ *
+ * If some data could not be copied, this function will pad the copied
+ * data to the requested size using zero bytes.
+ */
+#define copy_from_user(to,from,n)			\
+({							\
+	might_sleep();					\
+__generic_copy_from_user((to),(from),(n));	\
+})
+
+long __must_check strncpy_from_user(char *dst, const char __user *src,
+				long count);
+long __must_check __strncpy_from_user(char *dst,
+				const char __user *src, long count);
+
+/**
+ * __clear_user: - Zero a block of memory in user space, with less checking.
+ * @to:   Destination address, in user space.
+ * @n:    Number of bytes to zero.
+ *
+ * Zero a block of memory in user space.  Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be cleared.
+ * On success, this will be zero.
+ */
+unsigned long __clear_user(void __user *mem, unsigned long len);
+
+/**
+ * clear_user: - Zero a block of memory in user space.
+ * @to:   Destination address, in user space.
+ * @n:    Number of bytes to zero.
+ *
+ * Zero a block of memory in user space.  Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be cleared.
+ * On success, this will be zero.
+ */
+unsigned long clear_user(void __user *mem, unsigned long len);
+
+/**
+ * strlen_user: - Get the size of a string in user space.
+ * @str: The string to measure.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * Get the size of a NUL-terminated string in user space.
+ *
+ * Returns the size of the string INCLUDING the terminating NUL.
+ * On exception, returns 0.
+ *
+ * If there is a limit on the length of a valid string, you may wish to
+ * consider using strnlen_user() instead.
+ */
+#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
+long strnlen_user(const char __user *str, long n);
+
+#endif /* _ASM_M32R_UACCESS_H */
diff --git a/include/asm-m32r/ucontext.h b/include/asm-m32r/ucontext.h
new file mode 100644
index 0000000..2de709a
--- /dev/null
+++ b/include/asm-m32r/ucontext.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_M32R_UCONTEXT_H
+#define _ASM_M32R_UCONTEXT_H
+
+/* orig : i386 2.4.18 */
+
+struct ucontext {
+	unsigned long	  uc_flags;
+	struct ucontext  *uc_link;
+	stack_t		  uc_stack;
+	struct sigcontext uc_mcontext;
+	sigset_t	  uc_sigmask;	/* mask last for extensibility */
+};
+
+#endif /* _ASM_M32R_UCONTEXT_H */
diff --git a/include/asm-m32r/unaligned.h b/include/asm-m32r/unaligned.h
new file mode 100644
index 0000000..3aef9ac
--- /dev/null
+++ b/include/asm-m32r/unaligned.h
@@ -0,0 +1,25 @@
+#ifndef _ASM_M32R_UNALIGNED_H
+#define _ASM_M32R_UNALIGNED_H
+
+/* $Id$ */
+
+/* orig : generic 2.4.18 */
+
+/*
+ * For the benefit of those who are trying to port Linux to another
+ * architecture, here are some C-language equivalents.
+ */
+
+#include <asm/string.h>
+
+
+#define get_unaligned(ptr) \
+  ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; })
+
+#define put_unaligned(val, ptr)				\
+  ({ __typeof__(*(ptr)) __tmp = (val);			\
+     memmove((ptr), &__tmp, sizeof(*(ptr)));		\
+     (void)0; })
+
+
+#endif  /* _ASM_M32R_UNALIGNED_H */
diff --git a/include/asm-m32r/unistd.h b/include/asm-m32r/unistd.h
new file mode 100644
index 0000000..8552d8f
--- /dev/null
+++ b/include/asm-m32r/unistd.h
@@ -0,0 +1,474 @@
+#ifndef _ASM_M32R_UNISTD_H
+#define _ASM_M32R_UNISTD_H
+
+/* $Id$ */
+
+#include <asm/syscall.h>	/* SYSCALL_* */
+
+/*
+ * This file contains the system call numbers.
+ */
+
+#define __NR_restart_syscall	  0
+#define __NR_exit		  1
+#define __NR_fork		  2
+#define __NR_read		  3
+#define __NR_write		  4
+#define __NR_open		  5
+#define __NR_close		  6
+#define __NR_waitpid		  7
+#define __NR_creat		  8
+#define __NR_link		  9
+#define __NR_unlink		 10
+#define __NR_execve		 11
+#define __NR_chdir		 12
+#define __NR_time		 13
+#define __NR_mknod		 14
+#define __NR_chmod		 15
+/* 16 is unused */
+/* 17 is unused */
+/* 18 is unused */
+#define __NR_lseek		 19
+#define __NR_getpid		 20
+#define __NR_mount		 21
+#define __NR_umount		 22
+/* 23 is unused */
+/* 24 is unused */
+#define __NR_stime		 25
+#define __NR_ptrace		 26
+#define __NR_alarm		 27
+/* 28 is unused */
+#define __NR_pause		 29
+#define __NR_utime		 30
+/* 31 is unused */
+#define __NR_cachectl		 32 /* old #define __NR_gtty		 32*/
+#define __NR_access		 33
+/* 34 is unused */
+/* 35 is unused */
+#define __NR_sync		 36
+#define __NR_kill		 37
+#define __NR_rename		 38
+#define __NR_mkdir		 39
+#define __NR_rmdir		 40
+#define __NR_dup		 41
+#define __NR_pipe		 42
+#define __NR_times		 43
+/* 44 is unused */
+#define __NR_brk		 45
+/* 46 is unused */
+/* 47 is unused (getgid16) */
+/* 48 is unused */
+/* 49 is unused */
+/* 50 is unused */
+#define __NR_acct		 51
+#define __NR_umount2		 52
+/* 53 is unused */
+#define __NR_ioctl		 54
+/* 55 is unused (fcntl) */
+/* 56 is unused */
+#define __NR_setpgid		 57
+/* 58 is unused */
+/* 59 is unused */
+#define __NR_umask		 60
+#define __NR_chroot		 61
+#define __NR_ustat		 62
+#define __NR_dup2		 63
+#define __NR_getppid		 64
+#define __NR_getpgrp		 65
+#define __NR_setsid		 66
+/* 67 is unused */
+/* 68 is unused*/
+/* 69 is unused*/
+/* 70 is unused */
+/* 71 is unused */
+/* 72 is unused */
+/* 73 is unused */
+#define __NR_sethostname	 74
+#define __NR_setrlimit		 75
+/* 76 is unused (old getrlimit) */
+#define __NR_getrusage		 77
+#define __NR_gettimeofday	 78
+#define __NR_settimeofday	 79
+/* 80 is unused */
+/* 81 is unused */
+/* 82 is unused */
+#define __NR_symlink		 83
+/* 84 is unused */
+#define __NR_readlink		 85
+#define __NR_uselib		 86
+#define __NR_swapon		 87
+#define __NR_reboot		 88
+/* 89 is unused */
+/* 90 is unused */
+#define __NR_munmap		 91
+#define __NR_truncate		 92
+#define __NR_ftruncate		 93
+#define __NR_fchmod		 94
+/* 95 is unused */
+#define __NR_getpriority	 96
+#define __NR_setpriority	 97
+/* 98 is unused */
+#define __NR_statfs		 99
+#define __NR_fstatfs		100
+/* 101 is unused */
+#define __NR_socketcall		102
+#define __NR_syslog		103
+#define __NR_setitimer		104
+#define __NR_getitimer		105
+#define __NR_stat		106
+#define __NR_lstat		107
+#define __NR_fstat		108
+/* 109 is unused */
+/* 110 is unused */
+#define __NR_vhangup		111
+/* 112 is unused */
+/* 113 is unused */
+#define __NR_wait4		114
+#define __NR_swapoff		115
+#define __NR_sysinfo		116
+#define __NR_ipc		117
+#define __NR_fsync		118
+/* 119 is unused */
+#define __NR_clone		120
+#define __NR_setdomainname	121
+#define __NR_uname		122
+/* 123 is unused */
+#define __NR_adjtimex		124
+#define __NR_mprotect		125
+/* 126 is unused */
+/* 127 is unused */
+#define __NR_init_module	128
+#define __NR_delete_module	129
+/* 130 is unused */
+#define __NR_quotactl		131
+#define __NR_getpgid		132
+#define __NR_fchdir		133
+#define __NR_bdflush		134
+#define __NR_sysfs		135
+#define __NR_personality	136
+/* 137 is unused */
+/* 138 is unused */
+/* 139 is unused */
+#define __NR__llseek		140
+#define __NR_getdents		141
+#define __NR__newselect		142
+#define __NR_flock		143
+#define __NR_msync		144
+#define __NR_readv		145
+#define __NR_writev		146
+#define __NR_getsid		147
+#define __NR_fdatasync		148
+#define __NR__sysctl		149
+#define __NR_mlock		150
+#define __NR_munlock		151
+#define __NR_mlockall		152
+#define __NR_munlockall		153
+#define __NR_sched_setparam		154
+#define __NR_sched_getparam		155
+#define __NR_sched_setscheduler		156
+#define __NR_sched_getscheduler		157
+#define __NR_sched_yield		158
+#define __NR_sched_get_priority_max	159
+#define __NR_sched_get_priority_min	160
+#define __NR_sched_rr_get_interval	161
+#define __NR_nanosleep		162
+#define __NR_mremap		163
+/* 164 is unused */
+/* 165 is unused */
+#define __NR_tas		166
+/* 167 is unused */
+#define __NR_poll		168
+#define __NR_nfsservctl		169
+/* 170 is unused */
+/* 171 is unused */
+#define __NR_prctl              172
+#define __NR_rt_sigreturn	173
+#define __NR_rt_sigaction	174
+#define __NR_rt_sigprocmask	175
+#define __NR_rt_sigpending	176
+#define __NR_rt_sigtimedwait	177
+#define __NR_rt_sigqueueinfo	178
+#define __NR_rt_sigsuspend	179
+#define __NR_pread64		180
+#define __NR_pwrite64		181
+/* 182 is unused */
+#define __NR_getcwd		183
+#define __NR_capget		184
+#define __NR_capset		185
+#define __NR_sigaltstack	186
+#define __NR_sendfile		187
+/* 188 is unused */
+/* 189 is unused */
+#define __NR_vfork		190
+#define __NR_ugetrlimit		191	/* SuS compliant getrlimit */
+#define __NR_mmap2		192
+#define __NR_truncate64		193
+#define __NR_ftruncate64	194
+#define __NR_stat64		195
+#define __NR_lstat64		196
+#define __NR_fstat64		197
+#define __NR_lchown32		198
+#define __NR_getuid32		199
+#define __NR_getgid32		200
+#define __NR_geteuid32		201
+#define __NR_getegid32		202
+#define __NR_setreuid32		203
+#define __NR_setregid32		204
+#define __NR_getgroups32	205
+#define __NR_setgroups32	206
+#define __NR_fchown32		207
+#define __NR_setresuid32	208
+#define __NR_getresuid32	209
+#define __NR_setresgid32	210
+#define __NR_getresgid32	211
+#define __NR_chown32		212
+#define __NR_setuid32		213
+#define __NR_setgid32		214
+#define __NR_setfsuid32		215
+#define __NR_setfsgid32		216
+#define __NR_pivot_root		217
+#define __NR_mincore		218
+#define __NR_madvise		219
+#define __NR_getdents64		220
+#define __NR_fcntl64		221
+/* 222 is unused */
+/* 223 is unused */
+#define __NR_gettid		224
+#define __NR_readahead		225
+#define __NR_setxattr		226
+#define __NR_lsetxattr		227
+#define __NR_fsetxattr		228
+#define __NR_getxattr		229
+#define __NR_lgetxattr		230
+#define __NR_fgetxattr		231
+#define __NR_listxattr		232
+#define __NR_llistxattr		233
+#define __NR_flistxattr		234
+#define __NR_removexattr	235
+#define __NR_lremovexattr	236
+#define __NR_fremovexattr	237
+#define __NR_tkill		238
+#define __NR_sendfile64		239
+#define __NR_futex		240
+#define __NR_sched_setaffinity	241
+#define __NR_sched_getaffinity	242
+#define __NR_set_thread_area	243
+#define __NR_get_thread_area	244
+#define __NR_io_setup		245
+#define __NR_io_destroy		246
+#define __NR_io_getevents	247
+#define __NR_io_submit		248
+#define __NR_io_cancel		249
+#define __NR_fadvise64		250
+/* 251 is unused */
+#define __NR_exit_group		252
+#define __NR_lookup_dcookie	253
+#define __NR_epoll_create	254
+#define __NR_epoll_ctl		255
+#define __NR_epoll_wait		256
+#define __NR_remap_file_pages	257
+#define __NR_set_tid_address	258
+#define __NR_timer_create	259
+#define __NR_timer_settime	(__NR_timer_create+1)
+#define __NR_timer_gettime	(__NR_timer_create+2)
+#define __NR_timer_getoverrun	(__NR_timer_create+3)
+#define __NR_timer_delete	(__NR_timer_create+4)
+#define __NR_clock_settime	(__NR_timer_create+5)
+#define __NR_clock_gettime	(__NR_timer_create+6)
+#define __NR_clock_getres	(__NR_timer_create+7)
+#define __NR_clock_nanosleep	(__NR_timer_create+8)
+#define __NR_statfs64		268
+#define __NR_fstatfs64		269
+#define __NR_tgkill		270
+#define __NR_utimes		271
+#define __NR_fadvise64_64	272
+#define __NR_vserver		273
+#define __NR_mbind		274
+#define __NR_get_mempolicy	275
+#define __NR_set_mempolicy	276
+#define __NR_mq_open		277
+#define __NR_mq_unlink		(__NR_mq_open+1)
+#define __NR_mq_timedsend	(__NR_mq_open+2)
+#define __NR_mq_timedreceive	(__NR_mq_open+3)
+#define __NR_mq_notify		(__NR_mq_open+4)
+#define __NR_mq_getsetattr	(__NR_mq_open+5)
+#define __NR_sys_kexec_load	283
+#define __NR_waitid		284
+
+#define NR_syscalls 285
+
+/* user-visible error numbers are in the range -1 - -124: see
+ * <asm-m32r/errno.h>
+ */
+
+#define __syscall_return(type, res) \
+do { \
+	if ((unsigned long)(res) >= (unsigned long)(-(124 + 1))) { \
+	/* Avoid using "res" which is declared to be in register r0; \
+	   errno might expand to a function call and clobber it.  */ \
+		int __err = -(res); \
+		errno = __err; \
+		res = -1; \
+	} \
+	return (type) (res); \
+} while (0)
+
+#define _syscall0(type,name) \
+type name(void) \
+{ \
+register long __scno __asm__ ("r7") = __NR_##name; \
+register long __res __asm__("r0"); \
+__asm__ __volatile__ (\
+	"trap #" SYSCALL_VECTOR \
+	: "=r" (__res) \
+	: "r" (__scno) \
+	: "memory"); \
+__syscall_return(type,__res); \
+}
+
+#define _syscall1(type,name,type1,arg1) \
+type name(type1 arg1) \
+{ \
+register long __scno __asm__ ("r7") = __NR_##name; \
+register long __res __asm__ ("r0") = (long)(arg1); \
+__asm__ __volatile__ (\
+	"trap #" SYSCALL_VECTOR \
+	: "=r" (__res) \
+	: "r" (__scno), "0" (__res) \
+	: "memory"); \
+__syscall_return(type,__res); \
+}
+
+#define _syscall2(type,name,type1,arg1,type2,arg2) \
+type name(type1 arg1,type2 arg2) \
+{ \
+register long __scno __asm__ ("r7") = __NR_##name; \
+register long __arg2 __asm__ ("r1") = (long)(arg2); \
+register long __res __asm__ ("r0") = (long)(arg1); \
+__asm__ __volatile__ (\
+	"trap #" SYSCALL_VECTOR \
+	: "=r" (__res) \
+	: "r" (__scno), "0" (__res), "r" (__arg2) \
+	: "memory"); \
+__syscall_return(type,__res); \
+}
+
+#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
+type name(type1 arg1,type2 arg2,type3 arg3) \
+{ \
+register long __scno __asm__ ("r7") = __NR_##name; \
+register long __arg3 __asm__ ("r2") = (long)(arg3); \
+register long __arg2 __asm__ ("r1") = (long)(arg2); \
+register long __res __asm__ ("r0") = (long)(arg1); \
+__asm__ __volatile__ (\
+	"trap #" SYSCALL_VECTOR \
+	: "=r" (__res) \
+	: "r" (__scno), "0" (__res), "r" (__arg2), \
+		"r" (__arg3) \
+	: "memory"); \
+__syscall_return(type,__res); \
+}
+
+#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
+type name(type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
+{ \
+register long __scno __asm__ ("r7") = __NR_##name; \
+register long __arg4 __asm__ ("r3") = (long)(arg4); \
+register long __arg3 __asm__ ("r2") = (long)(arg3); \
+register long __arg2 __asm__ ("r1") = (long)(arg2); \
+register long __res __asm__ ("r0") = (long)(arg1); \
+__asm__ __volatile__ (\
+	"trap #" SYSCALL_VECTOR \
+	: "=r" (__res) \
+	: "r" (__scno), "0" (__res), "r" (__arg2), \
+		"r" (__arg3), "r" (__arg4) \
+	: "memory"); \
+__syscall_return(type,__res); \
+}
+
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
+	type5,arg5) \
+type name(type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
+{ \
+register long __scno __asm__ ("r7") = __NR_##name; \
+register long __arg5 __asm__ ("r4") = (long)(arg5); \
+register long __arg4 __asm__ ("r3") = (long)(arg4); \
+register long __arg3 __asm__ ("r2") = (long)(arg3); \
+register long __arg2 __asm__ ("r1") = (long)(arg2); \
+register long __res __asm__ ("r0") = (long)(arg1); \
+__asm__ __volatile__ (\
+	"trap #" SYSCALL_VECTOR \
+	: "=r" (__res) \
+	: "r" (__scno), "0" (__res), "r" (__arg2), \
+		"r" (__arg3), "r" (__arg4), "r" (__arg5) \
+	: "memory"); \
+__syscall_return(type,__res); \
+}
+
+#ifdef __KERNEL__
+#define __ARCH_WANT_IPC_PARSE_VERSION
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_ALARM
+#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_PAUSE
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_WAITPID
+#define __ARCH_WANT_SYS_SOCKETCALL
+#define __ARCH_WANT_SYS_FADVISE64
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __ARCH_WANT_SYS_RT_SIGACTION
+#endif
+
+#ifdef __KERNEL_SYSCALLS__
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+
+/*
+ * we need this inline - forking from kernel space will result
+ * in NO COPY ON WRITE (!!!), until an execve is executed. This
+ * is no problem, but for the stack. This is handled by not letting
+ * main() use the stack at all after fork(). Thus, no function
+ * calls - which means inline code for fork too, as otherwise we
+ * would use the stack upon exit from 'fork()'.
+ *
+ * Actually only pause and fork are needed inline, so that there
+ * won't be any messing with the stack from main(), but we define
+ * some others too.
+ */
+static __inline__ _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
+
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+			  unsigned long prot, unsigned long flags,
+			  unsigned long fd, unsigned long pgoff);
+asmlinkage int sys_execve(struct pt_regs regs);
+asmlinkage int sys_clone(struct pt_regs regs);
+asmlinkage int sys_fork(struct pt_regs regs);
+asmlinkage int sys_vfork(struct pt_regs regs);
+asmlinkage int sys_pipe(unsigned long __user *fildes);
+asmlinkage int sys_ptrace(long request, long pid, long addr, long data);
+struct sigaction;
+asmlinkage long sys_rt_sigaction(int sig,
+				 const struct sigaction __user *act,
+				 struct sigaction __user *oact,
+				 size_t sigsetsize);
+
+#endif /* __KERNEL_SYSCALLS__ */
+
+/*
+ * "Conditional" syscalls
+ *
+ * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
+ * but it doesn't work on all toolchains, so we just do it by hand
+ */
+#ifndef cond_syscall
+#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
+#endif
+
+#endif /* _ASM_M32R_UNISTD_H */
diff --git a/include/asm-m32r/user.h b/include/asm-m32r/user.h
new file mode 100644
index 0000000..2ffd0c6
--- /dev/null
+++ b/include/asm-m32r/user.h
@@ -0,0 +1,59 @@
+#ifndef _ASM_M32R_USER_H
+#define _ASM_M32R_USER_H
+
+/* $Id$ */
+
+/* orig : sh 2.4.18
+ * mod  : remove fpu registers
+ */
+
+#include <linux/types.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+
+/*
+ * Core file format: The core file is written in such a way that gdb
+ * can understand it and provide useful information to the user (under
+ * linux we use the `trad-core' bfd).
+ *
+ * The actual file contents are as follows:
+ * UPAGE: 1 page consisting of a user struct that tells gdb
+ *	what is present in the file.  Directly after this is a
+ *	copy of the task_struct, which is currently not used by gdb,
+ *	but it may come in handy at some point.  All of the registers
+ *	are stored as part of the upage.  The upage should always be
+ *	only one page.
+ * DATA: The data area is stored.  We use current->end_text to
+ *	current->brk to pick up all of the user variables, plus any memory
+ *	that may have been sbrk'ed.  No attempt is made to determine if a
+ *	page is demand-zero or if a page is totally unused, we just cover
+ *	the entire range.  All of the addresses are rounded in such a way
+ *	that an integral number of pages is written.
+ * STACK: We need the stack information in order to get a meaningful
+ *	backtrace.  We need to write the data from usp to
+ *	current->start_stack, so we round each of these off in order to be
+ *	able to write an integer number of pages.
+ */
+
+struct user {
+	struct pt_regs	regs;			/* entire machine state */
+	size_t		u_tsize;		/* text size (pages) */
+	size_t		u_dsize;		/* data size (pages) */
+	size_t		u_ssize;		/* stack size (pages) */
+	unsigned long	start_code;		/* text starting address */
+	unsigned long	start_data;		/* data starting address */
+	unsigned long	start_stack;		/* stack starting address */
+	long int	signal;			/* signal causing core dump */
+	struct regs *	u_ar0;			/* help gdb find registers */
+	unsigned long	magic;			/* identifies a core file */
+	char		u_comm[32];		/* user command name */
+};
+
+#define NBPG			PAGE_SIZE
+#define UPAGES			1
+#define HOST_TEXT_START_ADDR	(u.start_code)
+#define HOST_DATA_START_ADDR	(u.start_data)
+#define HOST_STACK_END_ADDR	(u.start_stack + u.u_ssize * NBPG)
+
+#endif /* _ASM_M32R_USER_H */
diff --git a/include/asm-m32r/vga.h b/include/asm-m32r/vga.h
new file mode 100644
index 0000000..d0f4b6e
--- /dev/null
+++ b/include/asm-m32r/vga.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_M32R_VGA_H
+#define _ASM_M32R_VGA_H
+
+/* $Id$ */
+
+/*
+ *	Access to VGA videoram
+ *
+ *	(c) 1998 Martin Mares <mj@ucw.cz>
+ */
+
+/*
+ *	On the PC, we can just recalculate addresses and then
+ *	access the videoram directly without any black magic.
+ */
+
+#define VGA_MAP_MEM(x) (unsigned long)phys_to_virt(x)
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x,y) (*(y) = (x))
+
+#endif  /* _ASM_M32R_VGA_H */
diff --git a/include/asm-m32r/xor.h b/include/asm-m32r/xor.h
new file mode 100644
index 0000000..fd960dc
--- /dev/null
+++ b/include/asm-m32r/xor.h
@@ -0,0 +1,8 @@
+#ifndef _ASM_M32R_XOR_H
+#define _ASM_M32R_XOR_H
+
+/* $Id$ */
+
+#include <asm-generic/xor.h>
+
+#endif  /* _ASM_M32R_XOR_H */