Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/include/asm-alpha/8253pit.h b/include/asm-alpha/8253pit.h
new file mode 100644
index 0000000..fef5c14
--- /dev/null
+++ b/include/asm-alpha/8253pit.h
@@ -0,0 +1,10 @@
+/*
+ * 8253/8254 Programmable Interval Timer
+ */
+
+#ifndef _8253PIT_H
+#define _8253PIT_H
+
+#define PIT_TICK_RATE 	1193180UL
+
+#endif
diff --git a/include/asm-alpha/a.out.h b/include/asm-alpha/a.out.h
new file mode 100644
index 0000000..d97daf4
--- /dev/null
+++ b/include/asm-alpha/a.out.h
@@ -0,0 +1,106 @@
+#ifndef __ALPHA_A_OUT_H__
+#define __ALPHA_A_OUT_H__
+
+#include <linux/types.h>
+
+/*
+ * OSF/1 ECOFF header structs.  ECOFF files consist of:
+ * 	- a file header (struct filehdr),
+ *	- an a.out header (struct aouthdr),
+ *	- one or more section headers (struct scnhdr). 
+ *	  The filhdr's "f_nscns" field contains the
+ *	  number of section headers.
+ */
+
+struct filehdr
+{
+	/* OSF/1 "file" header */
+	__u16 f_magic, f_nscns;
+	__u32 f_timdat;
+	__u64 f_symptr;
+	__u32 f_nsyms;
+	__u16 f_opthdr, f_flags;
+};
+
+struct aouthdr
+{
+	__u64 info;		/* after that it looks quite normal.. */
+	__u64 tsize;
+	__u64 dsize;
+	__u64 bsize;
+	__u64 entry;
+	__u64 text_start;	/* with a few additions that actually make sense */
+	__u64 data_start;
+	__u64 bss_start;
+	__u32 gprmask, fprmask;	/* bitmask of general & floating point regs used in binary */
+	__u64 gpvalue;
+};
+
+struct scnhdr
+{
+	char	s_name[8];
+	__u64	s_paddr;
+	__u64	s_vaddr;
+	__u64	s_size;
+	__u64	s_scnptr;
+	__u64	s_relptr;
+	__u64	s_lnnoptr;
+	__u16	s_nreloc;
+	__u16	s_nlnno;
+	__u32	s_flags;
+};
+
+struct exec
+{
+	/* OSF/1 "file" header */
+	struct filehdr		fh;
+	struct aouthdr		ah;
+};
+
+/*
+ * Define's so that the kernel exec code can access the a.out header
+ * fields...
+ */
+#define	a_info		ah.info
+#define	a_text		ah.tsize
+#define a_data		ah.dsize
+#define a_bss		ah.bsize
+#define a_entry		ah.entry
+#define a_textstart	ah.text_start
+#define	a_datastart	ah.data_start
+#define	a_bssstart	ah.bss_start
+#define	a_gprmask	ah.gprmask
+#define a_fprmask	ah.fprmask
+#define a_gpvalue	ah.gpvalue
+
+#define N_TXTADDR(x) ((x).a_textstart)
+#define N_DATADDR(x) ((x).a_datastart)
+#define N_BSSADDR(x) ((x).a_bssstart)
+#define N_DRSIZE(x) 0
+#define N_TRSIZE(x) 0
+#define N_SYMSIZE(x) 0
+
+#define AOUTHSZ		sizeof(struct aouthdr)
+#define SCNHSZ		sizeof(struct scnhdr)
+#define SCNROUND	16
+
+#define N_TXTOFF(x) \
+  ((long) N_MAGIC(x) == ZMAGIC ? 0 : \
+   (sizeof(struct exec) + (x).fh.f_nscns*SCNHSZ + SCNROUND - 1) & ~(SCNROUND - 1))
+
+#ifdef __KERNEL__
+
+/* Assume that start addresses below 4G belong to a TASO application.
+   Unfortunately, there is no proper bit in the exec header to check.
+   Worse, we have to notice the start address before swapping to use
+   /sbin/loader, which of course is _not_ a TASO application.  */
+#define SET_AOUT_PERSONALITY(BFPM, EX) \
+	set_personality (((BFPM->sh_bang || EX.ah.entry < 0x100000000L \
+			   ? ADDR_LIMIT_32BIT : 0) | PER_OSF4))
+
+#define STACK_TOP \
+  (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL)
+
+#endif
+
+#endif /* __A_OUT_GNU_H__ */
diff --git a/include/asm-alpha/agp.h b/include/asm-alpha/agp.h
new file mode 100644
index 0000000..c99dbbb
--- /dev/null
+++ b/include/asm-alpha/agp.h
@@ -0,0 +1,13 @@
+#ifndef AGP_H
+#define AGP_H 1
+
+#include <asm/io.h>
+
+/* dummy for now */
+
+#define map_page_into_agp(page) 
+#define unmap_page_from_agp(page) 
+#define flush_agp_mappings() 
+#define flush_agp_cache() mb()
+
+#endif
diff --git a/include/asm-alpha/agp_backend.h b/include/asm-alpha/agp_backend.h
new file mode 100644
index 0000000..55dd44a
--- /dev/null
+++ b/include/asm-alpha/agp_backend.h
@@ -0,0 +1,42 @@
+#ifndef _ALPHA_AGP_BACKEND_H
+#define _ALPHA_AGP_BACKEND_H 1
+
+typedef	union _alpha_agp_mode {
+	struct {
+		u32 rate : 3;
+		u32 reserved0 : 1;
+		u32 fw : 1;
+		u32 fourgb : 1;
+		u32 reserved1 : 2;
+		u32 enable : 1;
+		u32 sba : 1;
+		u32 reserved2 : 14;
+		u32 rq : 8;
+	} bits;
+	u32 lw;
+} alpha_agp_mode;
+
+typedef struct _alpha_agp_info {
+	struct pci_controller *hose;
+	struct {
+		dma_addr_t bus_base;
+		unsigned long size;
+		void *sysdata;
+	} aperture;
+	alpha_agp_mode capability;
+	alpha_agp_mode mode;
+	void *private;
+	struct alpha_agp_ops *ops;
+} alpha_agp_info;
+
+struct alpha_agp_ops {
+	int (*setup)(alpha_agp_info *);
+	void (*cleanup)(alpha_agp_info *);
+	int (*configure)(alpha_agp_info *);
+	int (*bind)(alpha_agp_info *, off_t, struct agp_memory *);
+	int (*unbind)(alpha_agp_info *, off_t, struct agp_memory *);
+	unsigned long (*translate)(alpha_agp_info *, dma_addr_t);
+};
+
+
+#endif /* _ALPHA_AGP_BACKEND_H */
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
new file mode 100644
index 0000000..1b383e3
--- /dev/null
+++ b/include/asm-alpha/atomic.h
@@ -0,0 +1,198 @@
+#ifndef _ALPHA_ATOMIC_H
+#define _ALPHA_ATOMIC_H
+
+/*
+ * Atomic operations that C can't guarantee us.  Useful for
+ * resource counting etc...
+ *
+ * But use these as seldom as possible since they are much slower
+ * than regular operations.
+ */
+
+
+/*
+ * Counter is volatile to make sure gcc doesn't try to be clever
+ * and move things around on us. We need to use _exactly_ the address
+ * the user gave us, not some alias that contains the same information.
+ */
+typedef struct { volatile int counter; } atomic_t;
+typedef struct { volatile long counter; } atomic64_t;
+
+#define ATOMIC_INIT(i)		( (atomic_t) { (i) } )
+#define ATOMIC64_INIT(i)	( (atomic64_t) { (i) } )
+
+#define atomic_read(v)		((v)->counter + 0)
+#define atomic64_read(v)	((v)->counter + 0)
+
+#define atomic_set(v,i)		((v)->counter = (i))
+#define atomic64_set(v,i)	((v)->counter = (i))
+
+/*
+ * To get proper branch prediction for the main line, we must branch
+ * forward to code at the end of this object's .text section, then
+ * branch back to restart the operation.
+ */
+
+static __inline__ void atomic_add(int i, atomic_t * v)
+{
+	unsigned long temp;
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%1\n"
+	"	addl %0,%2,%0\n"
+	"	stl_c %0,%1\n"
+	"	beq %0,2f\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (v->counter)
+	:"Ir" (i), "m" (v->counter));
+}
+
+static __inline__ void atomic64_add(long i, atomic64_t * v)
+{
+	unsigned long temp;
+	__asm__ __volatile__(
+	"1:	ldq_l %0,%1\n"
+	"	addq %0,%2,%0\n"
+	"	stq_c %0,%1\n"
+	"	beq %0,2f\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (v->counter)
+	:"Ir" (i), "m" (v->counter));
+}
+
+static __inline__ void atomic_sub(int i, atomic_t * v)
+{
+	unsigned long temp;
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%1\n"
+	"	subl %0,%2,%0\n"
+	"	stl_c %0,%1\n"
+	"	beq %0,2f\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (v->counter)
+	:"Ir" (i), "m" (v->counter));
+}
+
+static __inline__ void atomic64_sub(long i, atomic64_t * v)
+{
+	unsigned long temp;
+	__asm__ __volatile__(
+	"1:	ldq_l %0,%1\n"
+	"	subq %0,%2,%0\n"
+	"	stq_c %0,%1\n"
+	"	beq %0,2f\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (v->counter)
+	:"Ir" (i), "m" (v->counter));
+}
+
+
+/*
+ * Same as above, but return the result value
+ */
+static __inline__ long atomic_add_return(int i, atomic_t * v)
+{
+	long temp, result;
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%1\n"
+	"	addl %0,%3,%2\n"
+	"	addl %0,%3,%0\n"
+	"	stl_c %0,%1\n"
+	"	beq %0,2f\n"
+	"	mb\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
+	:"Ir" (i), "m" (v->counter) : "memory");
+	return result;
+}
+
+#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
+
+static __inline__ long atomic64_add_return(long i, atomic64_t * v)
+{
+	long temp, result;
+	__asm__ __volatile__(
+	"1:	ldq_l %0,%1\n"
+	"	addq %0,%3,%2\n"
+	"	addq %0,%3,%0\n"
+	"	stq_c %0,%1\n"
+	"	beq %0,2f\n"
+	"	mb\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
+	:"Ir" (i), "m" (v->counter) : "memory");
+	return result;
+}
+
+static __inline__ long atomic_sub_return(int i, atomic_t * v)
+{
+	long temp, result;
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%1\n"
+	"	subl %0,%3,%2\n"
+	"	subl %0,%3,%0\n"
+	"	stl_c %0,%1\n"
+	"	beq %0,2f\n"
+	"	mb\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
+	:"Ir" (i), "m" (v->counter) : "memory");
+	return result;
+}
+
+static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
+{
+	long temp, result;
+	__asm__ __volatile__(
+	"1:	ldq_l %0,%1\n"
+	"	subq %0,%3,%2\n"
+	"	subq %0,%3,%0\n"
+	"	stq_c %0,%1\n"
+	"	beq %0,2f\n"
+	"	mb\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
+	:"Ir" (i), "m" (v->counter) : "memory");
+	return result;
+}
+
+#define atomic_dec_return(v) atomic_sub_return(1,(v))
+#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
+
+#define atomic_inc_return(v) atomic_add_return(1,(v))
+#define atomic64_inc_return(v) atomic64_add_return(1,(v))
+
+#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
+#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
+
+#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
+
+#define atomic_inc(v) atomic_add(1,(v))
+#define atomic64_inc(v) atomic64_add(1,(v))
+
+#define atomic_dec(v) atomic_sub(1,(v))
+#define atomic64_dec(v) atomic64_sub(1,(v))
+
+#define smp_mb__before_atomic_dec()	smp_mb()
+#define smp_mb__after_atomic_dec()	smp_mb()
+#define smp_mb__before_atomic_inc()	smp_mb()
+#define smp_mb__after_atomic_inc()	smp_mb()
+
+#endif /* _ALPHA_ATOMIC_H */
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
new file mode 100644
index 0000000..578ed3f
--- /dev/null
+++ b/include/asm-alpha/bitops.h
@@ -0,0 +1,507 @@
+#ifndef _ALPHA_BITOPS_H
+#define _ALPHA_BITOPS_H
+
+#include <linux/config.h>
+#include <asm/compiler.h>
+
+/*
+ * Copyright 1994, Linus Torvalds.
+ */
+
+/*
+ * These have to be done with inline assembly: that way the bit-setting
+ * is guaranteed to be atomic. All bit operations return 0 if the bit
+ * was cleared before the operation and != 0 if it was not.
+ *
+ * To get proper branch prediction for the main line, we must branch
+ * forward to code at the end of this object's .text section, then
+ * branch back to restart the operation.
+ *
+ * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
+ */
+
+static inline void
+set_bit(unsigned long nr, volatile void * addr)
+{
+	unsigned long temp;
+	int *m = ((int *) addr) + (nr >> 5);
+
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%3\n"
+	"	bis %0,%2,%0\n"
+	"	stl_c %0,%1\n"
+	"	beq %0,2f\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (*m)
+	:"Ir" (1UL << (nr & 31)), "m" (*m));
+}
+
+/*
+ * WARNING: non atomic version.
+ */
+static inline void
+__set_bit(unsigned long nr, volatile void * addr)
+{
+	int *m = ((int *) addr) + (nr >> 5);
+
+	*m |= 1 << (nr & 31);
+}
+
+#define smp_mb__before_clear_bit()	smp_mb()
+#define smp_mb__after_clear_bit()	smp_mb()
+
+static inline void
+clear_bit(unsigned long nr, volatile void * addr)
+{
+	unsigned long temp;
+	int *m = ((int *) addr) + (nr >> 5);
+
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%3\n"
+	"	bic %0,%2,%0\n"
+	"	stl_c %0,%1\n"
+	"	beq %0,2f\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (*m)
+	:"Ir" (1UL << (nr & 31)), "m" (*m));
+}
+
+/*
+ * WARNING: non atomic version.
+ */
+static __inline__ void
+__clear_bit(unsigned long nr, volatile void * addr)
+{
+	int *m = ((int *) addr) + (nr >> 5);
+
+	*m &= ~(1 << (nr & 31));
+}
+
+static inline void
+change_bit(unsigned long nr, volatile void * addr)
+{
+	unsigned long temp;
+	int *m = ((int *) addr) + (nr >> 5);
+
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%3\n"
+	"	xor %0,%2,%0\n"
+	"	stl_c %0,%1\n"
+	"	beq %0,2f\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (*m)
+	:"Ir" (1UL << (nr & 31)), "m" (*m));
+}
+
+/*
+ * WARNING: non atomic version.
+ */
+static __inline__ void
+__change_bit(unsigned long nr, volatile void * addr)
+{
+	int *m = ((int *) addr) + (nr >> 5);
+
+	*m ^= 1 << (nr & 31);
+}
+
+static inline int
+test_and_set_bit(unsigned long nr, volatile void *addr)
+{
+	unsigned long oldbit;
+	unsigned long temp;
+	int *m = ((int *) addr) + (nr >> 5);
+
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%4\n"
+	"	and %0,%3,%2\n"
+	"	bne %2,2f\n"
+	"	xor %0,%3,%0\n"
+	"	stl_c %0,%1\n"
+	"	beq %0,3f\n"
+	"2:\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	".subsection 2\n"
+	"3:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
+	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
+
+	return oldbit != 0;
+}
+
+/*
+ * WARNING: non atomic version.
+ */
+static inline int
+__test_and_set_bit(unsigned long nr, volatile void * addr)
+{
+	unsigned long mask = 1 << (nr & 0x1f);
+	int *m = ((int *) addr) + (nr >> 5);
+	int old = *m;
+
+	*m = old | mask;
+	return (old & mask) != 0;
+}
+
+static inline int
+test_and_clear_bit(unsigned long nr, volatile void * addr)
+{
+	unsigned long oldbit;
+	unsigned long temp;
+	int *m = ((int *) addr) + (nr >> 5);
+
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%4\n"
+	"	and %0,%3,%2\n"
+	"	beq %2,2f\n"
+	"	xor %0,%3,%0\n"
+	"	stl_c %0,%1\n"
+	"	beq %0,3f\n"
+	"2:\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	".subsection 2\n"
+	"3:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
+	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
+
+	return oldbit != 0;
+}
+
+/*
+ * WARNING: non atomic version.
+ */
+static inline int
+__test_and_clear_bit(unsigned long nr, volatile void * addr)
+{
+	unsigned long mask = 1 << (nr & 0x1f);
+	int *m = ((int *) addr) + (nr >> 5);
+	int old = *m;
+
+	*m = old & ~mask;
+	return (old & mask) != 0;
+}
+
+static inline int
+test_and_change_bit(unsigned long nr, volatile void * addr)
+{
+	unsigned long oldbit;
+	unsigned long temp;
+	int *m = ((int *) addr) + (nr >> 5);
+
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%4\n"
+	"	and %0,%3,%2\n"
+	"	xor %0,%3,%0\n"
+	"	stl_c %0,%1\n"
+	"	beq %0,3f\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	".subsection 2\n"
+	"3:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
+	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
+
+	return oldbit != 0;
+}
+
+/*
+ * WARNING: non atomic version.
+ */
+static __inline__ int
+__test_and_change_bit(unsigned long nr, volatile void * addr)
+{
+	unsigned long mask = 1 << (nr & 0x1f);
+	int *m = ((int *) addr) + (nr >> 5);
+	int old = *m;
+
+	*m = old ^ mask;
+	return (old & mask) != 0;
+}
+
+static inline int
+test_bit(int nr, const volatile void * addr)
+{
+	return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
+}
+
+/*
+ * ffz = Find First Zero in word. Undefined if no zero exists,
+ * so code should check against ~0UL first..
+ *
+ * Do a binary search on the bits.  Due to the nature of large
+ * constants on the alpha, it is worthwhile to split the search.
+ */
+static inline unsigned long ffz_b(unsigned long x)
+{
+	unsigned long sum, x1, x2, x4;
+
+	x = ~x & -~x;		/* set first 0 bit, clear others */
+	x1 = x & 0xAA;
+	x2 = x & 0xCC;
+	x4 = x & 0xF0;
+	sum = x2 ? 2 : 0;
+	sum += (x4 != 0) * 4;
+	sum += (x1 != 0);
+
+	return sum;
+}
+
+static inline unsigned long ffz(unsigned long word)
+{
+#if defined(__alpha_cix__) && defined(__alpha_fix__)
+	/* Whee.  EV67 can calculate it directly.  */
+	return __kernel_cttz(~word);
+#else
+	unsigned long bits, qofs, bofs;
+
+	bits = __kernel_cmpbge(word, ~0UL);
+	qofs = ffz_b(bits);
+	bits = __kernel_extbl(word, qofs);
+	bofs = ffz_b(bits);
+
+	return qofs*8 + bofs;
+#endif
+}
+
+/*
+ * __ffs = Find First set bit in word.  Undefined if no set bit exists.
+ */
+static inline unsigned long __ffs(unsigned long word)
+{
+#if defined(__alpha_cix__) && defined(__alpha_fix__)
+	/* Whee.  EV67 can calculate it directly.  */
+	return __kernel_cttz(word);
+#else
+	unsigned long bits, qofs, bofs;
+
+	bits = __kernel_cmpbge(0, word);
+	qofs = ffz_b(bits);
+	bits = __kernel_extbl(word, qofs);
+	bofs = ffz_b(~bits);
+
+	return qofs*8 + bofs;
+#endif
+}
+
+#ifdef __KERNEL__
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above __ffs.
+ */
+
+static inline int ffs(int word)
+{
+	int result = __ffs(word) + 1;
+	return word ? result : 0;
+}
+
+/*
+ * fls: find last bit set.
+ */
+#if defined(__alpha_cix__) && defined(__alpha_fix__)
+static inline int fls(int word)
+{
+	return 64 - __kernel_ctlz(word & 0xffffffff);
+}
+#else
+#define fls	generic_fls
+#endif
+
+/* Compute powers of two for the given integer.  */
+static inline long floor_log2(unsigned long word)
+{
+#if defined(__alpha_cix__) && defined(__alpha_fix__)
+	return 63 - __kernel_ctlz(word);
+#else
+	long bit;
+	for (bit = -1; word ; bit++)
+		word >>= 1;
+	return bit;
+#endif
+}
+
+static inline long ceil_log2(unsigned long word)
+{
+	long bit = floor_log2(word);
+	return bit + (word > (1UL << bit));
+}
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+
+#if defined(__alpha_cix__) && defined(__alpha_fix__)
+/* Whee.  EV67 can calculate it directly.  */
+static inline unsigned long hweight64(unsigned long w)
+{
+	return __kernel_ctpop(w);
+}
+
+#define hweight32(x)	(unsigned int) hweight64((x) & 0xfffffffful)
+#define hweight16(x)	(unsigned int) hweight64((x) & 0xfffful)
+#define hweight8(x)	(unsigned int) hweight64((x) & 0xfful)
+#else
+static inline unsigned long hweight64(unsigned long w)
+{
+	unsigned long result;
+	for (result = 0; w ; w >>= 1)
+		result += (w & 1);
+	return result;
+}
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x)  generic_hweight8(x)
+#endif
+
+#endif /* __KERNEL__ */
+
+/*
+ * Find next zero bit in a bitmap reasonably efficiently..
+ */
+static inline unsigned long
+find_next_zero_bit(const void *addr, unsigned long size, unsigned long offset)
+{
+	const unsigned long *p = addr;
+	unsigned long result = offset & ~63UL;
+	unsigned long tmp;
+
+	p += offset >> 6;
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset &= 63UL;
+	if (offset) {
+		tmp = *(p++);
+		tmp |= ~0UL >> (64-offset);
+		if (size < 64)
+			goto found_first;
+		if (~tmp)
+			goto found_middle;
+		size -= 64;
+		result += 64;
+	}
+	while (size & ~63UL) {
+		if (~(tmp = *(p++)))
+			goto found_middle;
+		result += 64;
+		size -= 64;
+	}
+	if (!size)
+		return result;
+	tmp = *p;
+ found_first:
+	tmp |= ~0UL << size;
+	if (tmp == ~0UL)        /* Are any bits zero? */
+		return result + size; /* Nope. */
+ found_middle:
+	return result + ffz(tmp);
+}
+
+/*
+ * Find next one bit in a bitmap reasonably efficiently.
+ */
+static inline unsigned long
+find_next_bit(const void * addr, unsigned long size, unsigned long offset)
+{
+	const unsigned long *p = addr;
+	unsigned long result = offset & ~63UL;
+	unsigned long tmp;
+
+	p += offset >> 6;
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset &= 63UL;
+	if (offset) {
+		tmp = *(p++);
+		tmp &= ~0UL << offset;
+		if (size < 64)
+			goto found_first;
+		if (tmp)
+			goto found_middle;
+		size -= 64;
+		result += 64;
+	}
+	while (size & ~63UL) {
+		if ((tmp = *(p++)))
+			goto found_middle;
+		result += 64;
+		size -= 64;
+	}
+	if (!size)
+		return result;
+	tmp = *p;
+ found_first:
+	tmp &= ~0UL >> (64 - size);
+	if (!tmp)
+		return result + size;
+ found_middle:
+	return result + __ffs(tmp);
+}
+
+/*
+ * The optimizer actually does good code for this case.
+ */
+#define find_first_zero_bit(addr, size) \
+	find_next_zero_bit((addr), (size), 0)
+#define find_first_bit(addr, size) \
+	find_next_bit((addr), (size), 0)
+
+#ifdef __KERNEL__
+
+/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 140-bit bitmap where the first 100 bits are
+ * unlikely to be set. It's guaranteed that at least one of the 140
+ * bits is set.
+ */
+static inline unsigned long
+sched_find_first_bit(unsigned long b[3])
+{
+	unsigned long b0 = b[0], b1 = b[1], b2 = b[2];
+	unsigned long ofs;
+
+	ofs = (b1 ? 64 : 128);
+	b1 = (b1 ? b1 : b2);
+	ofs = (b0 ? 0 : ofs);
+	b0 = (b0 ? b0 : b1);
+
+	return __ffs(b0) + ofs;
+}
+
+
+#define ext2_set_bit                 __test_and_set_bit
+#define ext2_set_bit_atomic(l,n,a)   test_and_set_bit(n,a)
+#define ext2_clear_bit               __test_and_clear_bit
+#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
+#define ext2_test_bit                test_bit
+#define ext2_find_first_zero_bit     find_first_zero_bit
+#define ext2_find_next_zero_bit      find_next_zero_bit
+
+/* Bitmap functions for the minix filesystem.  */
+#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
+#define minix_set_bit(nr,addr) __set_bit(nr,addr)
+#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
+#define minix_test_bit(nr,addr) test_bit(nr,addr)
+#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ALPHA_BITOPS_H */
diff --git a/include/asm-alpha/bug.h b/include/asm-alpha/bug.h
new file mode 100644
index 0000000..ae1e0a5
--- /dev/null
+++ b/include/asm-alpha/bug.h
@@ -0,0 +1,15 @@
+#ifndef _ALPHA_BUG_H
+#define _ALPHA_BUG_H
+
+#include <asm/pal.h>
+
+/* ??? Would be nice to use .gprel32 here, but we can't be sure that the
+   function loaded the GP, so this could fail in modules.  */
+#define BUG() \
+  __asm__ __volatile__("call_pal %0  # bugchk\n\t"".long %1\n\t.8byte %2" \
+		       : : "i" (PAL_bugchk), "i"(__LINE__), "i"(__FILE__))
+
+#define HAVE_ARCH_BUG
+#include <asm-generic/bug.h>
+
+#endif
diff --git a/include/asm-alpha/bugs.h b/include/asm-alpha/bugs.h
new file mode 100644
index 0000000..78030d1
--- /dev/null
+++ b/include/asm-alpha/bugs.h
@@ -0,0 +1,20 @@
+/*
+ *  include/asm-alpha/bugs.h
+ *
+ *  Copyright (C) 1994  Linus Torvalds
+ */
+
+/*
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Needs:
+ *	void check_bugs(void);
+ */
+
+/*
+ * I don't know of any alpha bugs yet.. Nice chip
+ */
+
+static void check_bugs(void)
+{
+}
diff --git a/include/asm-alpha/byteorder.h b/include/asm-alpha/byteorder.h
new file mode 100644
index 0000000..7af2b8d
--- /dev/null
+++ b/include/asm-alpha/byteorder.h
@@ -0,0 +1,47 @@
+#ifndef _ALPHA_BYTEORDER_H
+#define _ALPHA_BYTEORDER_H
+
+#include <asm/types.h>
+#include <linux/compiler.h>
+#include <asm/compiler.h>
+
+#ifdef __GNUC__
+
+static __inline __attribute_const__ __u32 __arch__swab32(__u32 x)
+{
+	/*
+	 * Unfortunately, we can't use the 6 instruction sequence
+	 * on ev6 since the latency of the UNPKBW is 3, which is
+	 * pretty hard to hide.  Just in case a future implementation
+	 * has a lower latency, here's the sequence (also by Mike Burrows)
+	 *
+	 * UNPKBW a0, v0       v0: 00AA00BB00CC00DD
+	 * SLL v0, 24, a0      a0: BB00CC00DD000000
+	 * BIS v0, a0, a0      a0: BBAACCBBDDCC00DD
+	 * EXTWL a0, 6, v0     v0: 000000000000BBAA
+	 * ZAP a0, 0xf3, a0    a0: 00000000DDCC0000
+	 * ADDL a0, v0, v0     v0: ssssssssDDCCBBAA
+	 */
+
+	__u64 t0, t1, t2, t3;
+
+	t0 = __kernel_inslh(x, 7);	/* t0 : 0000000000AABBCC */
+	t1 = __kernel_inswl(x, 3);	/* t1 : 000000CCDD000000 */
+	t1 |= t0;			/* t1 : 000000CCDDAABBCC */
+	t2 = t1 >> 16;			/* t2 : 0000000000CCDDAA */
+	t0 = t1 & 0xFF00FF00;		/* t0 : 00000000DD00BB00 */
+	t3 = t2 & 0x00FF00FF;		/* t3 : 0000000000CC00AA */
+	t1 = t0 + t3;			/* t1 : ssssssssDDCCBBAA */
+
+	return t1;
+}
+
+#define __arch__swab32 __arch__swab32
+
+#endif /* __GNUC__ */
+
+#define __BYTEORDER_HAS_U64__
+
+#include <linux/byteorder/little_endian.h>
+
+#endif /* _ALPHA_BYTEORDER_H */
diff --git a/include/asm-alpha/cache.h b/include/asm-alpha/cache.h
new file mode 100644
index 0000000..e69b295
--- /dev/null
+++ b/include/asm-alpha/cache.h
@@ -0,0 +1,25 @@
+/*
+ * include/asm-alpha/cache.h
+ */
+#ifndef __ARCH_ALPHA_CACHE_H
+#define __ARCH_ALPHA_CACHE_H
+
+#include <linux/config.h>
+
+/* Bytes per L1 (data) cache line. */
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
+# define L1_CACHE_BYTES     64
+# define L1_CACHE_SHIFT     6
+#else
+/* Both EV4 and EV5 are write-through, read-allocate,
+   direct-mapped, physical.
+*/
+# define L1_CACHE_BYTES     32
+# define L1_CACHE_SHIFT     5
+#endif
+
+#define L1_CACHE_ALIGN(x)  (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
+#define SMP_CACHE_BYTES    L1_CACHE_BYTES
+#define L1_CACHE_SHIFT_MAX L1_CACHE_SHIFT
+
+#endif
diff --git a/include/asm-alpha/cacheflush.h b/include/asm-alpha/cacheflush.h
new file mode 100644
index 0000000..3fc6ef7
--- /dev/null
+++ b/include/asm-alpha/cacheflush.h
@@ -0,0 +1,74 @@
+#ifndef _ALPHA_CACHEFLUSH_H
+#define _ALPHA_CACHEFLUSH_H
+
+#include <linux/config.h>
+#include <linux/mm.h>
+
+/* Caches aren't brain-dead on the Alpha. */
+#define flush_cache_all()			do { } while (0)
+#define flush_cache_mm(mm)			do { } while (0)
+#define flush_cache_range(vma, start, end)	do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
+#define flush_dcache_page(page)			do { } while (0)
+#define flush_dcache_mmap_lock(mapping)		do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
+#define flush_cache_vmap(start, end)		do { } while (0)
+#define flush_cache_vunmap(start, end)		do { } while (0)
+
+/* Note that the following two definitions are _highly_ dependent
+   on the contexts in which they are used in the kernel.  I personally
+   think it is criminal how loosely defined these macros are.  */
+
+/* We need to flush the kernel's icache after loading modules.  The
+   only other use of this macro is in load_aout_interp which is not
+   used on Alpha. 
+
+   Note that this definition should *not* be used for userspace
+   icache flushing.  While functional, it is _way_ overkill.  The
+   icache is tagged with ASNs and it suffices to allocate a new ASN
+   for the process.  */
+#ifndef CONFIG_SMP
+#define flush_icache_range(start, end)		imb()
+#else
+#define flush_icache_range(start, end)		smp_imb()
+extern void smp_imb(void);
+#endif
+
+/* We need to flush the userspace icache after setting breakpoints in
+   ptrace.
+
+   Instead of indiscriminately using imb, take advantage of the fact
+   that icache entries are tagged with the ASN and load a new mm context.  */
+/* ??? Ought to use this in arch/alpha/kernel/signal.c too.  */
+
+#ifndef CONFIG_SMP
+extern void __load_new_mm_context(struct mm_struct *);
+static inline void
+flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+			unsigned long addr, int len)
+{
+	if (vma->vm_flags & VM_EXEC) {
+		struct mm_struct *mm = vma->vm_mm;
+		if (current->active_mm == mm)
+			__load_new_mm_context(mm);
+		else
+			mm->context[smp_processor_id()] = 0;
+	}
+}
+#else
+extern void flush_icache_user_range(struct vm_area_struct *vma,
+		struct page *page, unsigned long addr, int len);
+#endif
+
+/* This is used only in do_no_page and do_swap_page.  */
+#define flush_icache_page(vma, page) \
+  flush_icache_user_range((vma), (page), 0, 0)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { memcpy(dst, src, len); \
+     flush_icache_user_range(vma, page, vaddr, len); \
+} while (0)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+	memcpy(dst, src, len)
+
+#endif /* _ALPHA_CACHEFLUSH_H */
diff --git a/include/asm-alpha/checksum.h b/include/asm-alpha/checksum.h
new file mode 100644
index 0000000..a5c9f08
--- /dev/null
+++ b/include/asm-alpha/checksum.h
@@ -0,0 +1,77 @@
+#ifndef _ALPHA_CHECKSUM_H
+#define _ALPHA_CHECKSUM_H
+
+#include <linux/in6.h>
+
+/*
+ *	This is a version of ip_compute_csum() optimized for IP headers,
+ *	which always checksum on 4 octet boundaries.
+ */
+extern unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl);
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+extern unsigned short int csum_tcpudp_magic(unsigned long saddr,
+					   unsigned long daddr,
+					   unsigned short len,
+					   unsigned short proto,
+					   unsigned int sum);
+
+unsigned int csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
+				unsigned short len, unsigned short proto,
+				unsigned int sum);
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+unsigned int csum_partial_copy_from_user(const char __user *src, char *dst, int len, unsigned int sum, int *errp);
+
+unsigned int csum_partial_copy_nocheck(const char *src, char *dst, int len, unsigned int sum);
+
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+
+extern unsigned short ip_compute_csum(unsigned char * buff, int len);
+
+/*
+ *	Fold a partial checksum without adding pseudo headers
+ */
+
+static inline unsigned short csum_fold(unsigned int sum)
+{
+	sum = (sum & 0xffff) + (sum >> 16);
+	sum = (sum & 0xffff) + (sum >> 16);
+	return ~sum;
+}
+
+#define _HAVE_ARCH_IPV6_CSUM
+extern unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
+                                          struct in6_addr *daddr,
+                                          __u32 len,
+                                          unsigned short proto,
+                                          unsigned int sum);
+
+#endif
diff --git a/include/asm-alpha/compiler.h b/include/asm-alpha/compiler.h
new file mode 100644
index 0000000..399c33b
--- /dev/null
+++ b/include/asm-alpha/compiler.h
@@ -0,0 +1,103 @@
+#ifndef __ALPHA_COMPILER_H
+#define __ALPHA_COMPILER_H
+
+/* 
+ * Herein are macros we use when describing various patterns we want to GCC.
+ * In all cases we can get better schedules out of the compiler if we hide
+ * as little as possible inside inline assembly.  However, we want to be
+ * able to know what we'll get out before giving up inline assembly.  Thus
+ * these tests and macros.
+ */
+
+#if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3
+# define __kernel_insbl(val, shift)	__builtin_alpha_insbl(val, shift)
+# define __kernel_inswl(val, shift)	__builtin_alpha_inswl(val, shift)
+# define __kernel_insql(val, shift)	__builtin_alpha_insql(val, shift)
+# define __kernel_inslh(val, shift)	__builtin_alpha_inslh(val, shift)
+# define __kernel_extbl(val, shift)	__builtin_alpha_extbl(val, shift)
+# define __kernel_extwl(val, shift)	__builtin_alpha_extwl(val, shift)
+# define __kernel_cmpbge(a, b)		__builtin_alpha_cmpbge(a, b)
+# define __kernel_cttz(x)		__builtin_ctzl(x)
+# define __kernel_ctlz(x)		__builtin_clzl(x)
+# define __kernel_ctpop(x)		__builtin_popcountl(x)
+#else
+# define __kernel_insbl(val, shift)					\
+  ({ unsigned long __kir;						\
+     __asm__("insbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val));	\
+     __kir; })
+# define __kernel_inswl(val, shift)					\
+  ({ unsigned long __kir;						\
+     __asm__("inswl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val));	\
+     __kir; })
+# define __kernel_insql(val, shift)					\
+  ({ unsigned long __kir;						\
+     __asm__("insql %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val));	\
+     __kir; })
+# define __kernel_inslh(val, shift)					\
+  ({ unsigned long __kir;						\
+     __asm__("inslh %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val));	\
+     __kir; })
+# define __kernel_extbl(val, shift)					\
+  ({ unsigned long __kir;						\
+     __asm__("extbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val));	\
+     __kir; })
+# define __kernel_extwl(val, shift)					\
+  ({ unsigned long __kir;						\
+     __asm__("extwl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val));	\
+     __kir; })
+# define __kernel_cmpbge(a, b)						\
+  ({ unsigned long __kir;						\
+     __asm__("cmpbge %r2,%1,%0" : "=r"(__kir) : "rI"(b), "rJ"(a));	\
+     __kir; })
+# define __kernel_cttz(x)						\
+  ({ unsigned long __kir;						\
+     __asm__("cttz %1,%0" : "=r"(__kir) : "r"(x));			\
+     __kir; })
+# define __kernel_ctlz(x)						\
+  ({ unsigned long __kir;						\
+     __asm__("ctlz %1,%0" : "=r"(__kir) : "r"(x));			\
+     __kir; })
+# define __kernel_ctpop(x)						\
+  ({ unsigned long __kir;						\
+     __asm__("ctpop %1,%0" : "=r"(__kir) : "r"(x));			\
+     __kir; })
+#endif
+
+
+/* 
+ * Beginning with EGCS 1.1, GCC defines __alpha_bwx__ when the BWX 
+ * extension is enabled.  Previous versions did not define anything
+ * we could test during compilation -- too bad, so sad.
+ */
+
+#if defined(__alpha_bwx__)
+#define __kernel_ldbu(mem)	(mem)
+#define __kernel_ldwu(mem)	(mem)
+#define __kernel_stb(val,mem)	((mem) = (val))
+#define __kernel_stw(val,mem)	((mem) = (val))
+#else
+#define __kernel_ldbu(mem)				\
+  ({ unsigned char __kir;				\
+     __asm__("ldbu %0,%1" : "=r"(__kir) : "m"(mem));	\
+     __kir; })
+#define __kernel_ldwu(mem)				\
+  ({ unsigned short __kir;				\
+     __asm__("ldwu %0,%1" : "=r"(__kir) : "m"(mem));	\
+     __kir; })
+#define __kernel_stb(val,mem) \
+  __asm__("stb %1,%0" : "=m"(mem) : "r"(val))
+#define __kernel_stw(val,mem) \
+  __asm__("stw %1,%0" : "=m"(mem) : "r"(val))
+#endif
+
+/* Some idiots over in <linux/compiler.h> thought inline should imply
+   always_inline.  This breaks stuff.  We'll include this file whenever
+   we run into such problems.  */
+
+#include <linux/compiler.h>
+#undef inline
+#undef __inline__
+#undef __inline
+
+
+#endif /* __ALPHA_COMPILER_H */
diff --git a/include/asm-alpha/console.h b/include/asm-alpha/console.h
new file mode 100644
index 0000000..a3ce4e6
--- /dev/null
+++ b/include/asm-alpha/console.h
@@ -0,0 +1,75 @@
+#ifndef __AXP_CONSOLE_H
+#define __AXP_CONSOLE_H
+
+/*
+ * Console callback routine numbers
+ */
+#define CCB_GETC		0x01
+#define CCB_PUTS		0x02
+#define CCB_RESET_TERM		0x03
+#define CCB_SET_TERM_INT	0x04
+#define CCB_SET_TERM_CTL	0x05
+#define CCB_PROCESS_KEYCODE	0x06
+#define CCB_OPEN_CONSOLE	0x07
+#define CCB_CLOSE_CONSOLE	0x08
+
+#define CCB_OPEN		0x10
+#define CCB_CLOSE		0x11
+#define CCB_IOCTL		0x12
+#define CCB_READ		0x13
+#define CCB_WRITE		0x14
+
+#define CCB_SET_ENV		0x20
+#define CCB_RESET_ENV		0x21
+#define CCB_GET_ENV		0x22
+#define CCB_SAVE_ENV		0x23
+
+#define CCB_PSWITCH		0x30
+#define CCB_BIOS_EMUL		0x32
+
+/*
+ * Environment variable numbers
+ */
+#define ENV_AUTO_ACTION		0x01
+#define ENV_BOOT_DEV		0x02
+#define ENV_BOOTDEF_DEV		0x03
+#define ENV_BOOTED_DEV		0x04
+#define ENV_BOOT_FILE		0x05
+#define ENV_BOOTED_FILE		0x06
+#define ENV_BOOT_OSFLAGS	0x07
+#define ENV_BOOTED_OSFLAGS	0x08
+#define ENV_BOOT_RESET		0x09
+#define ENV_DUMP_DEV		0x0A
+#define ENV_ENABLE_AUDIT	0x0B
+#define ENV_LICENSE		0x0C
+#define ENV_CHAR_SET		0x0D
+#define ENV_LANGUAGE		0x0E
+#define ENV_TTY_DEV		0x0F
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+extern long callback_puts(long unit, const char *s, long length);
+extern long callback_getc(long unit);
+extern long callback_open_console(void);
+extern long callback_close_console(void);
+extern long callback_open(const char *device, long length);
+extern long callback_close(long unit);
+extern long callback_read(long channel, long count, const char *buf, long lbn);
+extern long callback_getenv(long id, const char *buf, unsigned long buf_size);
+extern long callback_setenv(long id, const char *buf, unsigned long buf_size);
+extern long callback_save_env(void);
+
+extern int srm_fixup(unsigned long new_callback_addr,
+		     unsigned long new_hwrpb_addr);
+extern long srm_puts(const char *, long);
+extern long srm_printk(const char *, ...)
+	__attribute__ ((format (printf, 1, 2)));
+
+struct crb_struct;
+struct hwrpb_struct;
+extern int callback_init_done;
+extern void * callback_init(void *);
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+
+#endif /* __AXP_CONSOLE_H */
diff --git a/include/asm-alpha/core_apecs.h b/include/asm-alpha/core_apecs.h
new file mode 100644
index 0000000..6785ff7
--- /dev/null
+++ b/include/asm-alpha/core_apecs.h
@@ -0,0 +1,517 @@
+#ifndef __ALPHA_APECS__H__
+#define __ALPHA_APECS__H__
+
+#include <linux/types.h>
+#include <asm/compiler.h>
+
+/*
+ * APECS is the internal name for the 2107x chipset which provides
+ * memory controller and PCI access for the 21064 chip based systems.
+ *
+ * This file is based on:
+ *
+ * DECchip 21071-AA and DECchip 21072-AA Core Logic Chipsets
+ * Data Sheet
+ *
+ * EC-N0648-72
+ *
+ *
+ * david.rusling@reo.mts.dec.com Initial Version.
+ *
+ */
+
+/*
+   An AVANTI *might* be an XL, and an XL has only 27 bits of ISA address
+   that get passed through the PCI<->ISA bridge chip. So we've gotta use
+   both windows to max out the physical memory we can DMA to. Sigh...
+
+   If we try a window at 0 for 1GB as a work-around, we run into conflicts
+   with ISA/PCI bus memory which can't be relocated, like VGA aperture and
+   BIOS ROMs. So we must put the windows high enough to avoid these areas.
+
+   We put window 1 at BUS 64Mb for 64Mb, mapping physical 0 to 64Mb-1,
+   and window 2 at BUS 1Gb for 1Gb, mapping physical 0 to 1Gb-1.
+   Yes, this does map 0 to 64Mb-1 twice, but only window 1 will actually
+   be used for that range (via virt_to_bus()).
+
+   Note that we actually fudge the window 1 maximum as 48Mb instead of 64Mb,
+   to keep virt_to_bus() from returning an address in the first window, for
+   a data area that goes beyond the 64Mb first DMA window.  Sigh...
+   The fudge factor MUST match with <asm/dma.h> MAX_DMA_ADDRESS, but
+   we can't just use that here, because of header file looping... :-(
+
+   Window 1 will be used for all DMA from the ISA bus; yes, that does
+   limit what memory an ISA floppy or sound card or Ethernet can touch, but
+   it's also a known limitation on other platforms as well. We use the
+   same technique that is used on INTEL platforms with similar limitation:
+   set MAX_DMA_ADDRESS and clear some pages' DMAable flags during mem_init().
+   We trust that any ISA bus device drivers will *always* ask for DMAable
+   memory explicitly via kmalloc()/get_free_pages() flags arguments.
+
+   Note that most PCI bus devices' drivers do *not* explicitly ask for
+   DMAable memory; they count on being able to DMA to any memory they
+   get from kmalloc()/get_free_pages(). They will also use window 1 for
+   any physical memory accesses below 64Mb; the rest will be handled by
+   window 2, maxing out at 1Gb of memory. I trust this is enough... :-)
+
+   We hope that the area before the first window is large enough so that
+   there will be no overlap at the top end (64Mb). We *must* locate the
+   PCI cards' memory just below window 1, so that there's still the
+   possibility of being able to access it via SPARSE space. This is
+   important for cards such as the Matrox Millennium, whose Xserver
+   wants to access memory-mapped registers in byte and short lengths.
+
+   Note that the XL is treated differently from the AVANTI, even though
+   for most other things they are identical. It didn't seem reasonable to
+   make the AVANTI support pay for the limitations of the XL. It is true,
+   however, that an XL kernel will run on an AVANTI without problems.
+
+   %%% All of this should be obviated by the ability to route
+   everything through the iommu.
+*/
+
+/*
+ * 21071-DA Control and Status registers.
+ * These are used for PCI memory access.
+ */
+#define APECS_IOC_DCSR                  (IDENT_ADDR + 0x1A0000000UL)
+#define APECS_IOC_PEAR                  (IDENT_ADDR + 0x1A0000020UL)
+#define APECS_IOC_SEAR                  (IDENT_ADDR + 0x1A0000040UL)
+#define APECS_IOC_DR1                   (IDENT_ADDR + 0x1A0000060UL)
+#define APECS_IOC_DR2                   (IDENT_ADDR + 0x1A0000080UL)
+#define APECS_IOC_DR3                   (IDENT_ADDR + 0x1A00000A0UL)
+
+#define APECS_IOC_TB1R                  (IDENT_ADDR + 0x1A00000C0UL)
+#define APECS_IOC_TB2R                  (IDENT_ADDR + 0x1A00000E0UL)
+
+#define APECS_IOC_PB1R                  (IDENT_ADDR + 0x1A0000100UL)
+#define APECS_IOC_PB2R                  (IDENT_ADDR + 0x1A0000120UL)
+
+#define APECS_IOC_PM1R                  (IDENT_ADDR + 0x1A0000140UL)
+#define APECS_IOC_PM2R                  (IDENT_ADDR + 0x1A0000160UL)
+
+#define APECS_IOC_HAXR0                 (IDENT_ADDR + 0x1A0000180UL)
+#define APECS_IOC_HAXR1                 (IDENT_ADDR + 0x1A00001A0UL)
+#define APECS_IOC_HAXR2                 (IDENT_ADDR + 0x1A00001C0UL)
+
+#define APECS_IOC_PMLT                  (IDENT_ADDR + 0x1A00001E0UL)
+
+#define APECS_IOC_TLBTAG0               (IDENT_ADDR + 0x1A0000200UL)
+#define APECS_IOC_TLBTAG1               (IDENT_ADDR + 0x1A0000220UL)
+#define APECS_IOC_TLBTAG2               (IDENT_ADDR + 0x1A0000240UL)
+#define APECS_IOC_TLBTAG3               (IDENT_ADDR + 0x1A0000260UL)
+#define APECS_IOC_TLBTAG4               (IDENT_ADDR + 0x1A0000280UL)
+#define APECS_IOC_TLBTAG5               (IDENT_ADDR + 0x1A00002A0UL)
+#define APECS_IOC_TLBTAG6               (IDENT_ADDR + 0x1A00002C0UL)
+#define APECS_IOC_TLBTAG7               (IDENT_ADDR + 0x1A00002E0UL)
+
+#define APECS_IOC_TLBDATA0              (IDENT_ADDR + 0x1A0000300UL)
+#define APECS_IOC_TLBDATA1              (IDENT_ADDR + 0x1A0000320UL)
+#define APECS_IOC_TLBDATA2              (IDENT_ADDR + 0x1A0000340UL)
+#define APECS_IOC_TLBDATA3              (IDENT_ADDR + 0x1A0000360UL)
+#define APECS_IOC_TLBDATA4              (IDENT_ADDR + 0x1A0000380UL)
+#define APECS_IOC_TLBDATA5              (IDENT_ADDR + 0x1A00003A0UL)
+#define APECS_IOC_TLBDATA6              (IDENT_ADDR + 0x1A00003C0UL)
+#define APECS_IOC_TLBDATA7              (IDENT_ADDR + 0x1A00003E0UL)
+
+#define APECS_IOC_TBIA                  (IDENT_ADDR + 0x1A0000400UL)
+
+
+/*
+ * 21071-CA Control and Status registers.
+ * These are used to program memory timing,
+ *  configure memory and initialise the B-Cache.
+ */
+#define APECS_MEM_GCR		        (IDENT_ADDR + 0x180000000UL)
+#define APECS_MEM_EDSR		        (IDENT_ADDR + 0x180000040UL)
+#define APECS_MEM_TAR  		        (IDENT_ADDR + 0x180000060UL)
+#define APECS_MEM_ELAR		        (IDENT_ADDR + 0x180000080UL)
+#define APECS_MEM_EHAR  		(IDENT_ADDR + 0x1800000a0UL)
+#define APECS_MEM_SFT_RST		(IDENT_ADDR + 0x1800000c0UL)
+#define APECS_MEM_LDxLAR 		(IDENT_ADDR + 0x1800000e0UL)
+#define APECS_MEM_LDxHAR 		(IDENT_ADDR + 0x180000100UL)
+#define APECS_MEM_GTR    		(IDENT_ADDR + 0x180000200UL)
+#define APECS_MEM_RTR    		(IDENT_ADDR + 0x180000220UL)
+#define APECS_MEM_VFPR   		(IDENT_ADDR + 0x180000240UL)
+#define APECS_MEM_PDLDR  		(IDENT_ADDR + 0x180000260UL)
+#define APECS_MEM_PDhDR  		(IDENT_ADDR + 0x180000280UL)
+
+/* Bank x Base Address Register */
+#define APECS_MEM_B0BAR  		(IDENT_ADDR + 0x180000800UL)
+#define APECS_MEM_B1BAR  		(IDENT_ADDR + 0x180000820UL)
+#define APECS_MEM_B2BAR  		(IDENT_ADDR + 0x180000840UL)
+#define APECS_MEM_B3BAR  		(IDENT_ADDR + 0x180000860UL)
+#define APECS_MEM_B4BAR  		(IDENT_ADDR + 0x180000880UL)
+#define APECS_MEM_B5BAR  		(IDENT_ADDR + 0x1800008A0UL)
+#define APECS_MEM_B6BAR  		(IDENT_ADDR + 0x1800008C0UL)
+#define APECS_MEM_B7BAR  		(IDENT_ADDR + 0x1800008E0UL)
+#define APECS_MEM_B8BAR  		(IDENT_ADDR + 0x180000900UL)
+
+/* Bank x Configuration Register */
+#define APECS_MEM_B0BCR  		(IDENT_ADDR + 0x180000A00UL)
+#define APECS_MEM_B1BCR  		(IDENT_ADDR + 0x180000A20UL)
+#define APECS_MEM_B2BCR  		(IDENT_ADDR + 0x180000A40UL)
+#define APECS_MEM_B3BCR  		(IDENT_ADDR + 0x180000A60UL)
+#define APECS_MEM_B4BCR  		(IDENT_ADDR + 0x180000A80UL)
+#define APECS_MEM_B5BCR  		(IDENT_ADDR + 0x180000AA0UL)
+#define APECS_MEM_B6BCR  		(IDENT_ADDR + 0x180000AC0UL)
+#define APECS_MEM_B7BCR  		(IDENT_ADDR + 0x180000AE0UL)
+#define APECS_MEM_B8BCR  		(IDENT_ADDR + 0x180000B00UL)
+
+/* Bank x Timing Register A */
+#define APECS_MEM_B0TRA  		(IDENT_ADDR + 0x180000C00UL)
+#define APECS_MEM_B1TRA  		(IDENT_ADDR + 0x180000C20UL)
+#define APECS_MEM_B2TRA  		(IDENT_ADDR + 0x180000C40UL)
+#define APECS_MEM_B3TRA  		(IDENT_ADDR + 0x180000C60UL)
+#define APECS_MEM_B4TRA  		(IDENT_ADDR + 0x180000C80UL)
+#define APECS_MEM_B5TRA  		(IDENT_ADDR + 0x180000CA0UL)
+#define APECS_MEM_B6TRA  		(IDENT_ADDR + 0x180000CC0UL)
+#define APECS_MEM_B7TRA  		(IDENT_ADDR + 0x180000CE0UL)
+#define APECS_MEM_B8TRA  		(IDENT_ADDR + 0x180000D00UL)
+
+/* Bank x Timing Register B */
+#define APECS_MEM_B0TRB                 (IDENT_ADDR + 0x180000E00UL)
+#define APECS_MEM_B1TRB  		(IDENT_ADDR + 0x180000E20UL)
+#define APECS_MEM_B2TRB  		(IDENT_ADDR + 0x180000E40UL)
+#define APECS_MEM_B3TRB  		(IDENT_ADDR + 0x180000E60UL)
+#define APECS_MEM_B4TRB  		(IDENT_ADDR + 0x180000E80UL)
+#define APECS_MEM_B5TRB  		(IDENT_ADDR + 0x180000EA0UL)
+#define APECS_MEM_B6TRB  		(IDENT_ADDR + 0x180000EC0UL)
+#define APECS_MEM_B7TRB  		(IDENT_ADDR + 0x180000EE0UL)
+#define APECS_MEM_B8TRB  		(IDENT_ADDR + 0x180000F00UL)
+
+
+/*
+ * Memory spaces:
+ */
+#define APECS_IACK_SC		        (IDENT_ADDR + 0x1b0000000UL)
+#define APECS_CONF		        (IDENT_ADDR + 0x1e0000000UL)
+#define APECS_IO			(IDENT_ADDR + 0x1c0000000UL)
+#define APECS_SPARSE_MEM		(IDENT_ADDR + 0x200000000UL)
+#define APECS_DENSE_MEM		        (IDENT_ADDR + 0x300000000UL)
+
+
+/*
+ * Bit definitions for I/O Controller status register 0:
+ */
+#define APECS_IOC_STAT0_CMD		0xf
+#define APECS_IOC_STAT0_ERR		(1<<4)
+#define APECS_IOC_STAT0_LOST		(1<<5)
+#define APECS_IOC_STAT0_THIT		(1<<6)
+#define APECS_IOC_STAT0_TREF		(1<<7)
+#define APECS_IOC_STAT0_CODE_SHIFT	8
+#define APECS_IOC_STAT0_CODE_MASK	0x7
+#define APECS_IOC_STAT0_P_NBR_SHIFT	13
+#define APECS_IOC_STAT0_P_NBR_MASK	0x7ffff
+
+#define APECS_HAE_ADDRESS		APECS_IOC_HAXR1
+
+
+/*
+ * Data structure for handling APECS machine checks:
+ */
+
+struct el_apecs_mikasa_sysdata_mcheck
+{
+	unsigned long coma_gcr;
+	unsigned long coma_edsr;
+	unsigned long coma_ter;
+	unsigned long coma_elar;
+	unsigned long coma_ehar;
+	unsigned long coma_ldlr;
+	unsigned long coma_ldhr;
+	unsigned long coma_base0;
+	unsigned long coma_base1;
+	unsigned long coma_base2;
+	unsigned long coma_base3;
+	unsigned long coma_cnfg0;
+	unsigned long coma_cnfg1;
+	unsigned long coma_cnfg2;
+	unsigned long coma_cnfg3;
+	unsigned long epic_dcsr;
+	unsigned long epic_pear;
+	unsigned long epic_sear;
+	unsigned long epic_tbr1;
+	unsigned long epic_tbr2;
+	unsigned long epic_pbr1;
+	unsigned long epic_pbr2;
+	unsigned long epic_pmr1;
+	unsigned long epic_pmr2;
+	unsigned long epic_harx1;
+	unsigned long epic_harx2;
+	unsigned long epic_pmlt;
+	unsigned long epic_tag0;
+	unsigned long epic_tag1;
+	unsigned long epic_tag2;
+	unsigned long epic_tag3;
+	unsigned long epic_tag4;
+	unsigned long epic_tag5;
+	unsigned long epic_tag6;
+	unsigned long epic_tag7;
+	unsigned long epic_data0;
+	unsigned long epic_data1;
+	unsigned long epic_data2;
+	unsigned long epic_data3;
+	unsigned long epic_data4;
+	unsigned long epic_data5;
+	unsigned long epic_data6;
+	unsigned long epic_data7;
+
+	unsigned long pceb_vid;
+	unsigned long pceb_did;
+	unsigned long pceb_revision;
+	unsigned long pceb_command;
+	unsigned long pceb_status;
+	unsigned long pceb_latency;
+	unsigned long pceb_control;
+	unsigned long pceb_arbcon;
+	unsigned long pceb_arbpri;
+
+	unsigned long esc_id;
+	unsigned long esc_revision;
+	unsigned long esc_int0;
+	unsigned long esc_int1;
+	unsigned long esc_elcr0;
+	unsigned long esc_elcr1;
+	unsigned long esc_last_eisa;
+	unsigned long esc_nmi_stat;
+
+	unsigned long pci_ir;
+	unsigned long pci_imr;
+	unsigned long svr_mgr;
+};
+
+/* This for the normal APECS machines.  */
+struct el_apecs_sysdata_mcheck
+{
+	unsigned long coma_gcr;
+	unsigned long coma_edsr;
+	unsigned long coma_ter;
+	unsigned long coma_elar;
+	unsigned long coma_ehar;
+	unsigned long coma_ldlr;
+	unsigned long coma_ldhr;
+	unsigned long coma_base0;
+	unsigned long coma_base1;
+	unsigned long coma_base2;
+	unsigned long coma_cnfg0;
+	unsigned long coma_cnfg1;
+	unsigned long coma_cnfg2;
+	unsigned long epic_dcsr;
+	unsigned long epic_pear;
+	unsigned long epic_sear;
+	unsigned long epic_tbr1;
+	unsigned long epic_tbr2;
+	unsigned long epic_pbr1;
+	unsigned long epic_pbr2;
+	unsigned long epic_pmr1;
+	unsigned long epic_pmr2;
+	unsigned long epic_harx1;
+	unsigned long epic_harx2;
+	unsigned long epic_pmlt;
+	unsigned long epic_tag0;
+	unsigned long epic_tag1;
+	unsigned long epic_tag2;
+	unsigned long epic_tag3;
+	unsigned long epic_tag4;
+	unsigned long epic_tag5;
+	unsigned long epic_tag6;
+	unsigned long epic_tag7;
+	unsigned long epic_data0;
+	unsigned long epic_data1;
+	unsigned long epic_data2;
+	unsigned long epic_data3;
+	unsigned long epic_data4;
+	unsigned long epic_data5;
+	unsigned long epic_data6;
+	unsigned long epic_data7;
+};
+
+struct el_apecs_procdata
+{
+	unsigned long paltemp[32];  /* PAL TEMP REGS. */
+	/* EV4-specific fields */
+	unsigned long exc_addr;     /* Address of excepting instruction. */
+	unsigned long exc_sum;      /* Summary of arithmetic traps. */
+	unsigned long exc_mask;     /* Exception mask (from exc_sum). */
+	unsigned long iccsr;        /* IBox hardware enables. */
+	unsigned long pal_base;     /* Base address for PALcode. */
+	unsigned long hier;         /* Hardware Interrupt Enable. */
+	unsigned long hirr;         /* Hardware Interrupt Request. */
+	unsigned long csr;          /* D-stream fault info. */
+	unsigned long dc_stat;      /* D-cache status (ECC/Parity Err). */
+	unsigned long dc_addr;      /* EV3 Phys Addr for ECC/DPERR. */
+	unsigned long abox_ctl;     /* ABox Control Register. */
+	unsigned long biu_stat;     /* BIU Status. */
+	unsigned long biu_addr;     /* BUI Address. */
+	unsigned long biu_ctl;      /* BIU Control. */
+	unsigned long fill_syndrome;/* For correcting ECC errors. */
+	unsigned long fill_addr;    /* Cache block which was being read */
+	unsigned long va;           /* Effective VA of fault or miss. */
+	unsigned long bc_tag;       /* Backup Cache Tag Probe Results.*/
+};
+
+
+#ifdef __KERNEL__
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
+/*
+ * I/O functions:
+ *
+ * Unlike Jensen, the APECS machines have no concept of local
+ * I/O---everything goes over the PCI bus.
+ *
+ * There is plenty room for optimization here.  In particular,
+ * the Alpha's insb/insw/extb/extw should be useful in moving
+ * data to/from the right byte-lanes.
+ */
+
+#define vip	volatile int __force *
+#define vuip	volatile unsigned int __force *
+#define vulp	volatile unsigned long __force *
+
+#define APECS_SET_HAE						\
+	do {							\
+		if (addr >= (1UL << 24)) {			\
+			unsigned long msb = addr & 0xf8000000;	\
+			addr -= msb;				\
+			set_hae(msb);				\
+		}						\
+	} while (0)
+
+__EXTERN_INLINE unsigned int apecs_ioread8(void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long result, base_and_type;
+
+	if (addr >= APECS_DENSE_MEM) {
+		addr -= APECS_DENSE_MEM;
+		APECS_SET_HAE;
+		base_and_type = APECS_SPARSE_MEM + 0x00;
+	} else {
+		addr -= APECS_IO;
+		base_and_type = APECS_IO + 0x00;
+	}
+
+	result = *(vip) ((addr << 5) + base_and_type);
+	return __kernel_extbl(result, addr & 3);
+}
+
+__EXTERN_INLINE void apecs_iowrite8(u8 b, void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long w, base_and_type;
+
+	if (addr >= APECS_DENSE_MEM) {
+		addr -= APECS_DENSE_MEM;
+		APECS_SET_HAE;
+		base_and_type = APECS_SPARSE_MEM + 0x00;
+	} else {
+		addr -= APECS_IO;
+		base_and_type = APECS_IO + 0x00;
+	}
+
+	w = __kernel_insbl(b, addr & 3);
+	*(vuip) ((addr << 5) + base_and_type) = w;
+}
+
+__EXTERN_INLINE unsigned int apecs_ioread16(void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long result, base_and_type;
+
+	if (addr >= APECS_DENSE_MEM) {
+		addr -= APECS_DENSE_MEM;
+		APECS_SET_HAE;
+		base_and_type = APECS_SPARSE_MEM + 0x08;
+	} else {
+		addr -= APECS_IO;
+		base_and_type = APECS_IO + 0x08;
+	}
+
+	result = *(vip) ((addr << 5) + base_and_type);
+	return __kernel_extwl(result, addr & 3);
+}
+
+__EXTERN_INLINE void apecs_iowrite16(u16 b, void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long w, base_and_type;
+
+	if (addr >= APECS_DENSE_MEM) {
+		addr -= APECS_DENSE_MEM;
+		APECS_SET_HAE;
+		base_and_type = APECS_SPARSE_MEM + 0x08;
+	} else {
+		addr -= APECS_IO;
+		base_and_type = APECS_IO + 0x08;
+	}
+
+	w = __kernel_inswl(b, addr & 3);
+	*(vuip) ((addr << 5) + base_and_type) = w;
+}
+
+__EXTERN_INLINE unsigned int apecs_ioread32(void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	if (addr < APECS_DENSE_MEM)
+		addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18;
+	return *(vuip)addr;
+}
+
+__EXTERN_INLINE void apecs_iowrite32(u32 b, void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	if (addr < APECS_DENSE_MEM)
+		addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18;
+	*(vuip)addr = b;
+}
+
+__EXTERN_INLINE void __iomem *apecs_ioportmap(unsigned long addr)
+{
+	return (void __iomem *)(addr + APECS_IO);
+}
+
+__EXTERN_INLINE void __iomem *apecs_ioremap(unsigned long addr,
+					    unsigned long size)
+{
+	return (void __iomem *)(addr + APECS_DENSE_MEM);
+}
+
+__EXTERN_INLINE int apecs_is_ioaddr(unsigned long addr)
+{
+	return addr >= IDENT_ADDR + 0x180000000UL;
+}
+
+__EXTERN_INLINE int apecs_is_mmio(const volatile void __iomem *addr)
+{
+	return (unsigned long)addr >= APECS_DENSE_MEM;
+}
+
+#undef APECS_SET_HAE
+
+#undef vip
+#undef vuip
+#undef vulp
+
+#undef __IO_PREFIX
+#define __IO_PREFIX		apecs
+#define apecs_trivial_io_bw	0
+#define apecs_trivial_io_lq	0
+#define apecs_trivial_rw_bw	2
+#define apecs_trivial_rw_lq	1
+#define apecs_trivial_iounmap	1
+#include <asm/io_trivial.h>
+
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_APECS__H__ */
diff --git a/include/asm-alpha/core_cia.h b/include/asm-alpha/core_cia.h
new file mode 100644
index 0000000..3a70d68
--- /dev/null
+++ b/include/asm-alpha/core_cia.h
@@ -0,0 +1,501 @@
+#ifndef __ALPHA_CIA__H__
+#define __ALPHA_CIA__H__
+
+/* Define to experiment with fitting everything into one 512MB HAE window.  */
+#define CIA_ONE_HAE_WINDOW 1
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <asm/compiler.h>
+
+/*
+ * CIA is the internal name for the 21171 chipset which provides
+ * memory controller and PCI access for the 21164 chip based systems.
+ * Also supported here is the 21172 (CIA-2) and 21174 (PYXIS).
+ *
+ * The lineage is a bit confused, since the 21174 was reportedly started
+ * from the 21171 Pass 1 mask, and so is missing bug fixes that appear
+ * in 21171 Pass 2 and 21172, but it also contains additional features.
+ *
+ * This file is based on:
+ *
+ * DECchip 21171 Core Logic Chipset
+ * Technical Reference Manual
+ *
+ * EC-QE18B-TE
+ *
+ * david.rusling@reo.mts.dec.com Initial Version.
+ *
+ */
+
+/*
+ * CIA ADDRESS BIT DEFINITIONS
+ *
+ *  3333 3333 3322 2222 2222 1111 1111 11
+ *  9876 5432 1098 7654 3210 9876 5432 1098 7654 3210
+ *  ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
+ *  1                                             000
+ *  ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
+ *  |                                             |\|
+ *  |                               Byte Enable --+ |
+ *  |                             Transfer Length --+
+ *  +-- IO space, not cached
+ *
+ *   Byte      Transfer
+ *   Enable    Length    Transfer  Byte    Address
+ *   adr<6:5>  adr<4:3>  Length    Enable  Adder
+ *   ---------------------------------------------
+ *      00        00      Byte      1110   0x000
+ *      01        00      Byte      1101   0x020
+ *      10        00      Byte      1011   0x040
+ *      11        00      Byte      0111   0x060
+ *
+ *      00        01      Word      1100   0x008
+ *      01        01      Word      1001   0x028 <= Not supported in this code.
+ *      10        01      Word      0011   0x048
+ *
+ *      00        10      Tribyte   1000   0x010
+ *      01        10      Tribyte   0001   0x030
+ *
+ *      10        11      Longword  0000   0x058
+ *
+ *      Note that byte enables are asserted low.
+ *
+ */
+
+#define CIA_MEM_R1_MASK 0x1fffffff  /* SPARSE Mem region 1 mask is 29 bits */
+#define CIA_MEM_R2_MASK 0x07ffffff  /* SPARSE Mem region 2 mask is 27 bits */
+#define CIA_MEM_R3_MASK 0x03ffffff  /* SPARSE Mem region 3 mask is 26 bits */
+
+/*
+ * 21171-CA Control and Status Registers
+ */
+#define CIA_IOC_CIA_REV			(IDENT_ADDR + 0x8740000080UL)
+#  define CIA_REV_MASK			0xff
+#define CIA_IOC_PCI_LAT			(IDENT_ADDR + 0x87400000C0UL)
+#define CIA_IOC_CIA_CTRL		(IDENT_ADDR + 0x8740000100UL)
+#  define CIA_CTRL_PCI_EN		(1 << 0)
+#  define CIA_CTRL_PCI_LOCK_EN		(1 << 1)
+#  define CIA_CTRL_PCI_LOOP_EN		(1 << 2)
+#  define CIA_CTRL_FST_BB_EN		(1 << 3)
+#  define CIA_CTRL_PCI_MST_EN		(1 << 4)
+#  define CIA_CTRL_PCI_MEM_EN		(1 << 5)
+#  define CIA_CTRL_PCI_REQ64_EN		(1 << 6)
+#  define CIA_CTRL_PCI_ACK64_EN		(1 << 7)
+#  define CIA_CTRL_ADDR_PE_EN		(1 << 8)
+#  define CIA_CTRL_PERR_EN		(1 << 9)
+#  define CIA_CTRL_FILL_ERR_EN		(1 << 10)
+#  define CIA_CTRL_MCHK_ERR_EN		(1 << 11)
+#  define CIA_CTRL_ECC_CHK_EN		(1 << 12)
+#  define CIA_CTRL_ASSERT_IDLE_BC	(1 << 13)
+#  define CIA_CTRL_COM_IDLE_BC		(1 << 14)
+#  define CIA_CTRL_CSR_IOA_BYPASS	(1 << 15)
+#  define CIA_CTRL_IO_FLUSHREQ_EN	(1 << 16)
+#  define CIA_CTRL_CPU_FLUSHREQ_EN	(1 << 17)
+#  define CIA_CTRL_ARB_CPU_EN		(1 << 18)
+#  define CIA_CTRL_EN_ARB_LINK		(1 << 19)
+#  define CIA_CTRL_RD_TYPE_SHIFT	20
+#  define CIA_CTRL_RL_TYPE_SHIFT	24
+#  define CIA_CTRL_RM_TYPE_SHIFT	28
+#  define CIA_CTRL_EN_DMA_RD_PERF	(1 << 31)
+#define CIA_IOC_CIA_CNFG		(IDENT_ADDR + 0x8740000140UL)
+#  define CIA_CNFG_IOA_BWEN		(1 << 0)
+#  define CIA_CNFG_PCI_MWEN		(1 << 4)
+#  define CIA_CNFG_PCI_DWEN		(1 << 5)
+#  define CIA_CNFG_PCI_WLEN		(1 << 8)
+#define CIA_IOC_FLASH_CTRL		(IDENT_ADDR + 0x8740000200UL)
+#define CIA_IOC_HAE_MEM			(IDENT_ADDR + 0x8740000400UL)
+#define CIA_IOC_HAE_IO			(IDENT_ADDR + 0x8740000440UL)
+#define CIA_IOC_CFG			(IDENT_ADDR + 0x8740000480UL)
+#define CIA_IOC_CACK_EN			(IDENT_ADDR + 0x8740000600UL)
+#  define CIA_CACK_EN_LOCK_EN		(1 << 0)
+#  define CIA_CACK_EN_MB_EN		(1 << 1)
+#  define CIA_CACK_EN_SET_DIRTY_EN	(1 << 2)
+#  define CIA_CACK_EN_BC_VICTIM_EN	(1 << 3)
+
+
+/*
+ * 21171-CA Diagnostic Registers
+ */
+#define CIA_IOC_CIA_DIAG		(IDENT_ADDR + 0x8740002000UL)
+#define CIA_IOC_DIAG_CHECK		(IDENT_ADDR + 0x8740003000UL)
+
+/*
+ * 21171-CA Performance Monitor registers
+ */
+#define CIA_IOC_PERF_MONITOR		(IDENT_ADDR + 0x8740004000UL)
+#define CIA_IOC_PERF_CONTROL		(IDENT_ADDR + 0x8740004040UL)
+
+/*
+ * 21171-CA Error registers
+ */
+#define CIA_IOC_CPU_ERR0		(IDENT_ADDR + 0x8740008000UL)
+#define CIA_IOC_CPU_ERR1		(IDENT_ADDR + 0x8740008040UL)
+#define CIA_IOC_CIA_ERR			(IDENT_ADDR + 0x8740008200UL)
+#  define CIA_ERR_COR_ERR		(1 << 0)
+#  define CIA_ERR_UN_COR_ERR		(1 << 1)
+#  define CIA_ERR_CPU_PE		(1 << 2)
+#  define CIA_ERR_MEM_NEM		(1 << 3)
+#  define CIA_ERR_PCI_SERR		(1 << 4)
+#  define CIA_ERR_PERR			(1 << 5)
+#  define CIA_ERR_PCI_ADDR_PE		(1 << 6)
+#  define CIA_ERR_RCVD_MAS_ABT		(1 << 7)
+#  define CIA_ERR_RCVD_TAR_ABT		(1 << 8)
+#  define CIA_ERR_PA_PTE_INV		(1 << 9)
+#  define CIA_ERR_FROM_WRT_ERR		(1 << 10)
+#  define CIA_ERR_IOA_TIMEOUT		(1 << 11)
+#  define CIA_ERR_LOST_CORR_ERR		(1 << 16)
+#  define CIA_ERR_LOST_UN_CORR_ERR	(1 << 17)
+#  define CIA_ERR_LOST_CPU_PE		(1 << 18)
+#  define CIA_ERR_LOST_MEM_NEM		(1 << 19)
+#  define CIA_ERR_LOST_PERR		(1 << 21)
+#  define CIA_ERR_LOST_PCI_ADDR_PE	(1 << 22)
+#  define CIA_ERR_LOST_RCVD_MAS_ABT	(1 << 23)
+#  define CIA_ERR_LOST_RCVD_TAR_ABT	(1 << 24)
+#  define CIA_ERR_LOST_PA_PTE_INV	(1 << 25)
+#  define CIA_ERR_LOST_FROM_WRT_ERR	(1 << 26)
+#  define CIA_ERR_LOST_IOA_TIMEOUT	(1 << 27)
+#  define CIA_ERR_VALID			(1 << 31)
+#define CIA_IOC_CIA_STAT		(IDENT_ADDR + 0x8740008240UL)
+#define CIA_IOC_ERR_MASK		(IDENT_ADDR + 0x8740008280UL)
+#define CIA_IOC_CIA_SYN			(IDENT_ADDR + 0x8740008300UL)
+#define CIA_IOC_MEM_ERR0		(IDENT_ADDR + 0x8740008400UL)
+#define CIA_IOC_MEM_ERR1		(IDENT_ADDR + 0x8740008440UL)
+#define CIA_IOC_PCI_ERR0		(IDENT_ADDR + 0x8740008800UL)
+#define CIA_IOC_PCI_ERR1		(IDENT_ADDR + 0x8740008840UL)
+#define CIA_IOC_PCI_ERR3		(IDENT_ADDR + 0x8740008880UL)
+
+/*
+ * 21171-CA System configuration registers
+ */
+#define CIA_IOC_MCR			(IDENT_ADDR + 0x8750000000UL)
+#define CIA_IOC_MBA0			(IDENT_ADDR + 0x8750000600UL)
+#define CIA_IOC_MBA2			(IDENT_ADDR + 0x8750000680UL)
+#define CIA_IOC_MBA4			(IDENT_ADDR + 0x8750000700UL)
+#define CIA_IOC_MBA6			(IDENT_ADDR + 0x8750000780UL)
+#define CIA_IOC_MBA8			(IDENT_ADDR + 0x8750000800UL)
+#define CIA_IOC_MBAA			(IDENT_ADDR + 0x8750000880UL)
+#define CIA_IOC_MBAC			(IDENT_ADDR + 0x8750000900UL)
+#define CIA_IOC_MBAE			(IDENT_ADDR + 0x8750000980UL)
+#define CIA_IOC_TMG0			(IDENT_ADDR + 0x8750000B00UL)
+#define CIA_IOC_TMG1			(IDENT_ADDR + 0x8750000B40UL)
+#define CIA_IOC_TMG2			(IDENT_ADDR + 0x8750000B80UL)
+
+/*
+ * 2117A-CA PCI Address and Scatter-Gather Registers.
+ */
+#define CIA_IOC_PCI_TBIA		(IDENT_ADDR + 0x8760000100UL)
+
+#define CIA_IOC_PCI_W0_BASE		(IDENT_ADDR + 0x8760000400UL)
+#define CIA_IOC_PCI_W0_MASK		(IDENT_ADDR + 0x8760000440UL)
+#define CIA_IOC_PCI_T0_BASE		(IDENT_ADDR + 0x8760000480UL)
+
+#define CIA_IOC_PCI_W1_BASE		(IDENT_ADDR + 0x8760000500UL)
+#define CIA_IOC_PCI_W1_MASK		(IDENT_ADDR + 0x8760000540UL)
+#define CIA_IOC_PCI_T1_BASE		(IDENT_ADDR + 0x8760000580UL)
+
+#define CIA_IOC_PCI_W2_BASE		(IDENT_ADDR + 0x8760000600UL)
+#define CIA_IOC_PCI_W2_MASK		(IDENT_ADDR + 0x8760000640UL)
+#define CIA_IOC_PCI_T2_BASE		(IDENT_ADDR + 0x8760000680UL)
+
+#define CIA_IOC_PCI_W3_BASE		(IDENT_ADDR + 0x8760000700UL)
+#define CIA_IOC_PCI_W3_MASK		(IDENT_ADDR + 0x8760000740UL)
+#define CIA_IOC_PCI_T3_BASE		(IDENT_ADDR + 0x8760000780UL)
+
+#define CIA_IOC_PCI_Wn_BASE(N)	(IDENT_ADDR + 0x8760000400UL + (N)*0x100) 
+#define CIA_IOC_PCI_Wn_MASK(N)	(IDENT_ADDR + 0x8760000440UL + (N)*0x100) 
+#define CIA_IOC_PCI_Tn_BASE(N)	(IDENT_ADDR + 0x8760000480UL + (N)*0x100) 
+
+#define CIA_IOC_PCI_W_DAC		(IDENT_ADDR + 0x87600007C0UL)
+
+/*
+ * 2117A-CA Address Translation Registers.
+ */
+
+/* 8 tag registers, the first 4 of which are lockable.  */
+#define CIA_IOC_TB_TAGn(n) \
+	(IDENT_ADDR + 0x8760000800UL + (n)*0x40)
+
+/* 4 page registers per tag register.  */
+#define CIA_IOC_TBn_PAGEm(n,m) \
+	(IDENT_ADDR + 0x8760001000UL + (n)*0x100 + (m)*0x40)
+
+/*
+ * Memory spaces:
+ */
+#define CIA_IACK_SC			(IDENT_ADDR + 0x8720000000UL)
+#define CIA_CONF			(IDENT_ADDR + 0x8700000000UL)
+#define CIA_IO				(IDENT_ADDR + 0x8580000000UL)
+#define CIA_SPARSE_MEM			(IDENT_ADDR + 0x8000000000UL)
+#define CIA_SPARSE_MEM_R2		(IDENT_ADDR + 0x8400000000UL)
+#define CIA_SPARSE_MEM_R3		(IDENT_ADDR + 0x8500000000UL)
+#define CIA_DENSE_MEM		        (IDENT_ADDR + 0x8600000000UL)
+#define CIA_BW_MEM			(IDENT_ADDR + 0x8800000000UL)
+#define CIA_BW_IO			(IDENT_ADDR + 0x8900000000UL)
+#define CIA_BW_CFG_0			(IDENT_ADDR + 0x8a00000000UL)
+#define CIA_BW_CFG_1			(IDENT_ADDR + 0x8b00000000UL)
+
+/*
+ * ALCOR's GRU ASIC registers
+ */
+#define GRU_INT_REQ			(IDENT_ADDR + 0x8780000000UL)
+#define GRU_INT_MASK			(IDENT_ADDR + 0x8780000040UL)
+#define GRU_INT_EDGE			(IDENT_ADDR + 0x8780000080UL)
+#define GRU_INT_HILO			(IDENT_ADDR + 0x87800000C0UL)
+#define GRU_INT_CLEAR			(IDENT_ADDR + 0x8780000100UL)
+
+#define GRU_CACHE_CNFG			(IDENT_ADDR + 0x8780000200UL)
+#define GRU_SCR				(IDENT_ADDR + 0x8780000300UL)
+#define GRU_LED				(IDENT_ADDR + 0x8780000800UL)
+#define GRU_RESET			(IDENT_ADDR + 0x8780000900UL)
+
+#define ALCOR_GRU_INT_REQ_BITS		0x800fffffUL
+#define XLT_GRU_INT_REQ_BITS		0x80003fffUL
+#define GRU_INT_REQ_BITS		(alpha_mv.sys.cia.gru_int_req_bits+0)
+
+/*
+ * PYXIS interrupt control registers
+ */
+#define PYXIS_INT_REQ			(IDENT_ADDR + 0x87A0000000UL)
+#define PYXIS_INT_MASK			(IDENT_ADDR + 0x87A0000040UL)
+#define PYXIS_INT_HILO			(IDENT_ADDR + 0x87A00000C0UL)
+#define PYXIS_INT_ROUTE			(IDENT_ADDR + 0x87A0000140UL)
+#define PYXIS_GPO			(IDENT_ADDR + 0x87A0000180UL)
+#define PYXIS_INT_CNFG			(IDENT_ADDR + 0x87A00001C0UL)
+#define PYXIS_RT_COUNT			(IDENT_ADDR + 0x87A0000200UL)
+#define PYXIS_INT_TIME			(IDENT_ADDR + 0x87A0000240UL)
+#define PYXIS_IIC_CTRL			(IDENT_ADDR + 0x87A00002C0UL)
+#define PYXIS_RESET			(IDENT_ADDR + 0x8780000900UL)
+
+/* Offset between ram physical addresses and pci64 DAC bus addresses.  */
+#define PYXIS_DAC_OFFSET		(1UL << 40)
+
+/*
+ * Data structure for handling CIA machine checks.
+ */
+
+/* System-specific info.  */
+struct el_CIA_sysdata_mcheck {
+	unsigned long	cpu_err0;
+	unsigned long	cpu_err1;
+	unsigned long	cia_err;
+	unsigned long	cia_stat;
+	unsigned long	err_mask;
+	unsigned long	cia_syn;
+	unsigned long	mem_err0;
+	unsigned long	mem_err1;
+	unsigned long	pci_err0;
+	unsigned long	pci_err1;
+	unsigned long	pci_err2;
+};
+
+
+#ifdef __KERNEL__
+
+#ifndef __EXTERN_INLINE
+/* Do not touch, this should *NOT* be static inline */
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
+/*
+ * I/O functions:
+ *
+ * CIA (the 2117x PCI/memory support chipset for the EV5 (21164)
+ * series of processors uses a sparse address mapping scheme to
+ * get at PCI memory and I/O.
+ */
+
+/*
+ * Memory functions.  64-bit and 32-bit accesses are done through
+ * dense memory space, everything else through sparse space.
+ *
+ * For reading and writing 8 and 16 bit quantities we need to
+ * go through one of the three sparse address mapping regions
+ * and use the HAE_MEM CSR to provide some bits of the address.
+ * The following few routines use only sparse address region 1
+ * which gives 1Gbyte of accessible space which relates exactly
+ * to the amount of PCI memory mapping *into* system address space.
+ * See p 6-17 of the specification but it looks something like this:
+ *
+ * 21164 Address:
+ *
+ *          3         2         1
+ * 9876543210987654321098765432109876543210
+ * 1ZZZZ0.PCI.QW.Address............BBLL
+ *
+ * ZZ = SBZ
+ * BB = Byte offset
+ * LL = Transfer length
+ *
+ * PCI Address:
+ *
+ * 3         2         1
+ * 10987654321098765432109876543210
+ * HHH....PCI.QW.Address........ 00
+ *
+ * HHH = 31:29 HAE_MEM CSR
+ *
+ */
+
+#define vip	volatile int __force *
+#define vuip	volatile unsigned int __force *
+#define vulp	volatile unsigned long __force *
+
+__EXTERN_INLINE unsigned int cia_ioread8(void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long result, base_and_type;
+
+	if (addr >= CIA_DENSE_MEM)
+		base_and_type = CIA_SPARSE_MEM + 0x00;
+	else
+		base_and_type = CIA_IO + 0x00;
+
+	/* We can use CIA_MEM_R1_MASK for io ports too, since it is large
+	   enough to cover all io ports, and smaller than CIA_IO.  */
+	addr &= CIA_MEM_R1_MASK;
+	result = *(vip) ((addr << 5) + base_and_type);
+	return __kernel_extbl(result, addr & 3);
+}
+
+__EXTERN_INLINE void cia_iowrite8(u8 b, void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long w, base_and_type;
+
+	if (addr >= CIA_DENSE_MEM)
+		base_and_type = CIA_SPARSE_MEM + 0x00;
+	else
+		base_and_type = CIA_IO + 0x00;
+
+	addr &= CIA_MEM_R1_MASK;
+	w = __kernel_insbl(b, addr & 3);
+	*(vuip) ((addr << 5) + base_and_type) = w;
+}
+
+__EXTERN_INLINE unsigned int cia_ioread16(void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long result, base_and_type;
+
+	if (addr >= CIA_DENSE_MEM)
+		base_and_type = CIA_SPARSE_MEM + 0x08;
+	else
+		base_and_type = CIA_IO + 0x08;
+
+	addr &= CIA_MEM_R1_MASK;
+	result = *(vip) ((addr << 5) + base_and_type);
+	return __kernel_extwl(result, addr & 3);
+}
+
+__EXTERN_INLINE void cia_iowrite16(u16 b, void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long w, base_and_type;
+
+	if (addr >= CIA_DENSE_MEM)
+		base_and_type = CIA_SPARSE_MEM + 0x08;
+	else
+		base_and_type = CIA_IO + 0x08;
+
+	addr &= CIA_MEM_R1_MASK;
+	w = __kernel_inswl(b, addr & 3);
+	*(vuip) ((addr << 5) + base_and_type) = w;
+}
+
+__EXTERN_INLINE unsigned int cia_ioread32(void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	if (addr < CIA_DENSE_MEM)
+		addr = ((addr - CIA_IO) << 5) + CIA_IO + 0x18;
+	return *(vuip)addr;
+}
+
+__EXTERN_INLINE void cia_iowrite32(u32 b, void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	if (addr < CIA_DENSE_MEM)
+		addr = ((addr - CIA_IO) << 5) + CIA_IO + 0x18;
+	*(vuip)addr = b;
+}
+
+__EXTERN_INLINE void __iomem *cia_ioportmap(unsigned long addr)
+{
+	return (void __iomem *)(addr + CIA_IO);
+}
+
+__EXTERN_INLINE void __iomem *cia_ioremap(unsigned long addr,
+					  unsigned long size)
+{
+	return (void __iomem *)(addr + CIA_DENSE_MEM);
+}
+
+__EXTERN_INLINE int cia_is_ioaddr(unsigned long addr)
+{
+	return addr >= IDENT_ADDR + 0x8000000000UL;
+}
+
+__EXTERN_INLINE int cia_is_mmio(const volatile void __iomem *addr)
+{
+	return (unsigned long)addr >= CIA_DENSE_MEM;
+}
+
+__EXTERN_INLINE void __iomem *cia_bwx_ioportmap(unsigned long addr)
+{
+	return (void __iomem *)(addr + CIA_BW_IO);
+}
+
+__EXTERN_INLINE void __iomem *cia_bwx_ioremap(unsigned long addr,
+					      unsigned long size)
+{
+	return (void __iomem *)(addr + CIA_BW_MEM);
+}
+
+__EXTERN_INLINE int cia_bwx_is_ioaddr(unsigned long addr)
+{
+	return addr >= IDENT_ADDR + 0x8000000000UL;
+}
+
+__EXTERN_INLINE int cia_bwx_is_mmio(const volatile void __iomem *addr)
+{
+	return (unsigned long)addr < CIA_BW_IO;
+}
+
+#undef vip
+#undef vuip
+#undef vulp
+
+#undef __IO_PREFIX
+#define __IO_PREFIX		cia
+#define cia_trivial_rw_bw	2
+#define cia_trivial_rw_lq	1
+#define cia_trivial_io_bw	0
+#define cia_trivial_io_lq	0
+#define cia_trivial_iounmap	1
+#include <asm/io_trivial.h>
+
+#undef __IO_PREFIX
+#define __IO_PREFIX		cia_bwx
+#define cia_bwx_trivial_rw_bw	1
+#define cia_bwx_trivial_rw_lq	1
+#define cia_bwx_trivial_io_bw	1
+#define cia_bwx_trivial_io_lq	1
+#define cia_bwx_trivial_iounmap	1
+#include <asm/io_trivial.h>
+
+#undef __IO_PREFIX
+#ifdef CONFIG_ALPHA_PYXIS
+#define __IO_PREFIX		cia_bwx
+#else
+#define __IO_PREFIX		cia
+#endif
+
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_CIA__H__ */
diff --git a/include/asm-alpha/core_irongate.h b/include/asm-alpha/core_irongate.h
new file mode 100644
index 0000000..24b2db5
--- /dev/null
+++ b/include/asm-alpha/core_irongate.h
@@ -0,0 +1,232 @@
+#ifndef __ALPHA_IRONGATE__H__
+#define __ALPHA_IRONGATE__H__
+
+#include <linux/types.h>
+#include <asm/compiler.h>
+
+/*
+ * IRONGATE is the internal name for the AMD-751 K7 core logic chipset
+ * which provides memory controller and PCI access for NAUTILUS-based
+ * EV6 (21264) systems.
+ *
+ * This file is based on:
+ *
+ * IronGate management library, (c) 1999 Alpha Processor, Inc.
+ * Copyright (C) 1999 Alpha Processor, Inc.,
+ *	(David Daniel, Stig Telfer, Soohoon Lee)
+ */
+
+/*
+ * The 21264 supports, and internally recognizes, a 44-bit physical
+ * address space that is divided equally between memory address space
+ * and I/O address space. Memory address space resides in the lower
+ * half of the physical address space (PA[43]=0) and I/O address space
+ * resides in the upper half of the physical address space (PA[43]=1).
+ */
+
+/*
+ * Irongate CSR map.  Some of the CSRs are 8 or 16 bits, but all access
+ * through the routines given is 32-bit.
+ *
+ * The first 0x40 bytes are standard as per the PCI spec.
+ */
+
+typedef volatile __u32	igcsr32;
+
+typedef struct {
+	igcsr32 dev_vendor;		/* 0x00 - device ID, vendor ID */
+	igcsr32 stat_cmd;		/* 0x04 - status, command */
+	igcsr32 class;			/* 0x08 - class code, rev ID */
+	igcsr32 latency;		/* 0x0C - header type, PCI latency */
+	igcsr32 bar0;			/* 0x10 - BAR0 - AGP */
+	igcsr32 bar1;			/* 0x14 - BAR1 - GART */
+	igcsr32 bar2;			/* 0x18 - Power Management reg block */
+
+	igcsr32 rsrvd0[6];		/* 0x1C-0x33 reserved */
+
+	igcsr32 capptr;			/* 0x34 - Capabilities pointer */
+
+	igcsr32 rsrvd1[2];		/* 0x38-0x3F reserved */
+
+	igcsr32 bacsr10;		/* 0x40 - base address chip selects */
+	igcsr32 bacsr32;		/* 0x44 - base address chip selects */
+	igcsr32 bacsr54_eccms761;	/* 0x48 - 751: base addr. chip selects
+						  761: ECC, mode/status */
+
+	igcsr32 rsrvd2[1];		/* 0x4C-0x4F reserved */
+
+	igcsr32 drammap;		/* 0x50 - address mapping control */
+	igcsr32 dramtm;			/* 0x54 - timing, driver strength */
+	igcsr32 dramms;			/* 0x58 - DRAM mode/status */
+
+	igcsr32 rsrvd3[1];		/* 0x5C-0x5F reserved */
+
+	igcsr32 biu0;			/* 0x60 - bus interface unit */
+	igcsr32 biusip;			/* 0x64 - Serial initialisation pkt */
+
+	igcsr32 rsrvd4[2];		/* 0x68-0x6F reserved */
+
+	igcsr32 mro;			/* 0x70 - memory request optimiser */
+
+	igcsr32 rsrvd5[3];		/* 0x74-0x7F reserved */
+
+	igcsr32 whami;			/* 0x80 - who am I */
+	igcsr32 pciarb;			/* 0x84 - PCI arbitration control */
+	igcsr32 pcicfg;			/* 0x88 - PCI config status */
+
+	igcsr32 rsrvd6[4];		/* 0x8C-0x9B reserved */
+
+	igcsr32 pci_mem;		/* 0x9C - PCI top of memory,
+						  761 only */
+
+	/* AGP (bus 1) control registers */
+	igcsr32 agpcap;			/* 0xA0 - AGP Capability Identifier */
+	igcsr32 agpstat;		/* 0xA4 - AGP status register */
+	igcsr32 agpcmd;			/* 0xA8 - AGP control register */
+	igcsr32 agpva;			/* 0xAC - AGP Virtual Address Space */
+	igcsr32 agpmode;		/* 0xB0 - AGP/GART mode control */
+} Irongate0;
+
+
+typedef struct {
+
+	igcsr32 dev_vendor;		/* 0x00 - Device and Vendor IDs */
+	igcsr32 stat_cmd;		/* 0x04 - Status and Command regs */
+	igcsr32 class;			/* 0x08 - subclass, baseclass etc */
+	igcsr32 htype;			/* 0x0C - header type (at 0x0E) */
+	igcsr32 rsrvd0[2];		/* 0x10-0x17 reserved */
+	igcsr32 busnos;			/* 0x18 - Primary, secondary bus nos */
+	igcsr32 io_baselim_regs;	/* 0x1C - IO base, IO lim, AGP status */
+	igcsr32	mem_baselim;		/* 0x20 - memory base, memory lim */
+	igcsr32 pfmem_baselim;		/* 0x24 - prefetchable base, lim */
+	igcsr32 rsrvd1[2];		/* 0x28-0x2F reserved */
+	igcsr32 io_baselim;		/* 0x30 - IO base, IO limit */
+	igcsr32 rsrvd2[2];		/* 0x34-0x3B - reserved */
+	igcsr32 interrupt;		/* 0x3C - interrupt, PCI bridge ctrl */
+
+} Irongate1;
+
+extern igcsr32 *IronECC;
+
+/*
+ * Memory spaces:
+ */
+
+/* Irongate is consistent with a subset of the Tsunami memory map */
+#ifdef USE_48_BIT_KSEG
+#define IRONGATE_BIAS 0x80000000000UL
+#else
+#define IRONGATE_BIAS 0x10000000000UL
+#endif
+
+
+#define IRONGATE_MEM		(IDENT_ADDR | IRONGATE_BIAS | 0x000000000UL)
+#define IRONGATE_IACK_SC	(IDENT_ADDR | IRONGATE_BIAS | 0x1F8000000UL)
+#define IRONGATE_IO		(IDENT_ADDR | IRONGATE_BIAS | 0x1FC000000UL)
+#define IRONGATE_CONF		(IDENT_ADDR | IRONGATE_BIAS | 0x1FE000000UL)
+
+/*
+ * PCI Configuration space accesses are formed like so:
+ *
+ * 0x1FE << 24 |  : 2 2 2 2 1 1 1 1 : 1 1 1 1 1 1 0 0 : 0 0 0 0 0 0 0 0 :
+ *                : 3 2 1 0 9 8 7 6 : 5 4 3 2 1 0 9 8 : 7 6 5 4 3 2 1 0 :
+ *                  ---bus numer---   -device-- -fun-   ---register----
+ */
+
+#define IGCSR(dev,fun,reg)	( IRONGATE_CONF | \
+				((dev)<<11) | \
+				((fun)<<8) | \
+				(reg) )
+
+#define IRONGATE0		((Irongate0 *) IGCSR(0, 0, 0))
+#define IRONGATE1		((Irongate1 *) IGCSR(1, 0, 0))
+
+/*
+ * Data structure for handling IRONGATE machine checks:
+ * This is the standard OSF logout frame
+ */
+
+#define SCB_Q_SYSERR	0x620			/* OSF definitions */
+#define SCB_Q_PROCERR	0x630
+#define SCB_Q_SYSMCHK	0x660
+#define SCB_Q_PROCMCHK	0x670
+
+struct el_IRONGATE_sysdata_mcheck {
+	__u32 FrameSize;                 /* Bytes, including this field */
+	__u32 FrameFlags;                /* <31> = Retry, <30> = Second Error */
+	__u32 CpuOffset;                 /* Offset to CPU-specific into */
+	__u32 SystemOffset;              /* Offset to system-specific info */
+	__u32 MCHK_Code;
+	__u32 MCHK_Frame_Rev;
+	__u64 I_STAT;
+	__u64 DC_STAT;
+	__u64 C_ADDR;
+	__u64 DC1_SYNDROME;
+	__u64 DC0_SYNDROME;
+	__u64 C_STAT;
+	__u64 C_STS;
+	__u64 RESERVED0;
+	__u64 EXC_ADDR;
+	__u64 IER_CM;
+	__u64 ISUM;
+	__u64 MM_STAT;
+	__u64 PAL_BASE;
+	__u64 I_CTL;
+	__u64 PCTX;
+};
+
+
+#ifdef __KERNEL__
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
+/*
+ * I/O functions:
+ *
+ * IRONGATE (AMD-751) PCI/memory support chip for the EV6 (21264) and
+ * K7 can only use linear accesses to get at PCI memory and I/O spaces.
+ */
+
+/*
+ * Memory functions.  All accesses are done through linear space.
+ */
+
+__EXTERN_INLINE void __iomem *irongate_ioportmap(unsigned long addr)
+{
+	return (void __iomem *)(addr + IRONGATE_IO);
+}
+
+extern void __iomem *irongate_ioremap(unsigned long addr, unsigned long size);
+extern void irongate_iounmap(volatile void __iomem *addr);
+
+__EXTERN_INLINE int irongate_is_ioaddr(unsigned long addr)
+{
+	return addr >= IRONGATE_MEM;
+}
+
+__EXTERN_INLINE int irongate_is_mmio(const volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long)xaddr;
+	return addr < IRONGATE_IO || addr >= IRONGATE_CONF;
+}
+
+#undef __IO_PREFIX
+#define __IO_PREFIX			irongate
+#define irongate_trivial_rw_bw		1
+#define irongate_trivial_rw_lq		1
+#define irongate_trivial_io_bw		1
+#define irongate_trivial_io_lq		1
+#define irongate_trivial_iounmap	0
+#include <asm/io_trivial.h>
+
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_IRONGATE__H__ */
diff --git a/include/asm-alpha/core_lca.h b/include/asm-alpha/core_lca.h
new file mode 100644
index 0000000..f7cb4b4
--- /dev/null
+++ b/include/asm-alpha/core_lca.h
@@ -0,0 +1,361 @@
+#ifndef __ALPHA_LCA__H__
+#define __ALPHA_LCA__H__
+
+#include <asm/system.h>
+#include <asm/compiler.h>
+
+/*
+ * Low Cost Alpha (LCA) definitions (these apply to 21066 and 21068,
+ * for example).
+ *
+ * This file is based on:
+ *
+ *	DECchip 21066 and DECchip 21068 Alpha AXP Microprocessors
+ *	Hardware Reference Manual; Digital Equipment Corp.; May 1994;
+ *	Maynard, MA; Order Number: EC-N2681-71.
+ */
+
+/*
+ * NOTE: The LCA uses a Host Address Extension (HAE) register to access
+ *	 PCI addresses that are beyond the first 27 bits of address
+ *	 space.  Updating the HAE requires an external cycle (and
+ *	 a memory barrier), which tends to be slow.  Instead of updating
+ *	 it on each sparse memory access, we keep the current HAE value
+ *	 cached in variable cache_hae.  Only if the cached HAE differs
+ *	 from the desired HAE value do we actually updated HAE register.
+ *	 The HAE register is preserved by the interrupt handler entry/exit
+ *	 code, so this scheme works even in the presence of interrupts.
+ *
+ * Dense memory space doesn't require the HAE, but is restricted to
+ * aligned 32 and 64 bit accesses.  Special Cycle and Interrupt
+ * Acknowledge cycles may also require the use of the HAE.  The LCA
+ * limits I/O address space to the bottom 24 bits of address space,
+ * but this easily covers the 16 bit ISA I/O address space.
+ */
+
+/*
+ * NOTE 2! The memory operations do not set any memory barriers, as
+ * it's not needed for cases like a frame buffer that is essentially
+ * memory-like.  You need to do them by hand if the operations depend
+ * on ordering.
+ *
+ * Similarly, the port I/O operations do a "mb" only after a write
+ * operation: if an mb is needed before (as in the case of doing
+ * memory mapped I/O first, and then a port I/O operation to the same
+ * device), it needs to be done by hand.
+ *
+ * After the above has bitten me 100 times, I'll give up and just do
+ * the mb all the time, but right now I'm hoping this will work out.
+ * Avoiding mb's may potentially be a noticeable speed improvement,
+ * but I can't honestly say I've tested it.
+ *
+ * Handling interrupts that need to do mb's to synchronize to
+ * non-interrupts is another fun race area.  Don't do it (because if
+ * you do, I'll have to do *everything* with interrupts disabled,
+ * ugh).
+ */
+
+/*
+ * Memory Controller registers:
+ */
+#define LCA_MEM_BCR0		(IDENT_ADDR + 0x120000000UL)
+#define LCA_MEM_BCR1		(IDENT_ADDR + 0x120000008UL)
+#define LCA_MEM_BCR2		(IDENT_ADDR + 0x120000010UL)
+#define LCA_MEM_BCR3		(IDENT_ADDR + 0x120000018UL)
+#define LCA_MEM_BMR0		(IDENT_ADDR + 0x120000020UL)
+#define LCA_MEM_BMR1		(IDENT_ADDR + 0x120000028UL)
+#define LCA_MEM_BMR2		(IDENT_ADDR + 0x120000030UL)
+#define LCA_MEM_BMR3		(IDENT_ADDR + 0x120000038UL)
+#define LCA_MEM_BTR0		(IDENT_ADDR + 0x120000040UL)
+#define LCA_MEM_BTR1		(IDENT_ADDR + 0x120000048UL)
+#define LCA_MEM_BTR2		(IDENT_ADDR + 0x120000050UL)
+#define LCA_MEM_BTR3		(IDENT_ADDR + 0x120000058UL)
+#define LCA_MEM_GTR		(IDENT_ADDR + 0x120000060UL)
+#define LCA_MEM_ESR		(IDENT_ADDR + 0x120000068UL)
+#define LCA_MEM_EAR		(IDENT_ADDR + 0x120000070UL)
+#define LCA_MEM_CAR		(IDENT_ADDR + 0x120000078UL)
+#define LCA_MEM_VGR		(IDENT_ADDR + 0x120000080UL)
+#define LCA_MEM_PLM		(IDENT_ADDR + 0x120000088UL)
+#define LCA_MEM_FOR		(IDENT_ADDR + 0x120000090UL)
+
+/*
+ * I/O Controller registers:
+ */
+#define LCA_IOC_HAE		(IDENT_ADDR + 0x180000000UL)
+#define LCA_IOC_CONF		(IDENT_ADDR + 0x180000020UL)
+#define LCA_IOC_STAT0		(IDENT_ADDR + 0x180000040UL)
+#define LCA_IOC_STAT1		(IDENT_ADDR + 0x180000060UL)
+#define LCA_IOC_TBIA		(IDENT_ADDR + 0x180000080UL)
+#define LCA_IOC_TB_ENA		(IDENT_ADDR + 0x1800000a0UL)
+#define LCA_IOC_SFT_RST		(IDENT_ADDR + 0x1800000c0UL)
+#define LCA_IOC_PAR_DIS		(IDENT_ADDR + 0x1800000e0UL)
+#define LCA_IOC_W_BASE0		(IDENT_ADDR + 0x180000100UL)
+#define LCA_IOC_W_BASE1		(IDENT_ADDR + 0x180000120UL)
+#define LCA_IOC_W_MASK0		(IDENT_ADDR + 0x180000140UL)
+#define LCA_IOC_W_MASK1		(IDENT_ADDR + 0x180000160UL)
+#define LCA_IOC_T_BASE0		(IDENT_ADDR + 0x180000180UL)
+#define LCA_IOC_T_BASE1		(IDENT_ADDR + 0x1800001a0UL)
+#define LCA_IOC_TB_TAG0		(IDENT_ADDR + 0x188000000UL)
+#define LCA_IOC_TB_TAG1		(IDENT_ADDR + 0x188000020UL)
+#define LCA_IOC_TB_TAG2		(IDENT_ADDR + 0x188000040UL)
+#define LCA_IOC_TB_TAG3		(IDENT_ADDR + 0x188000060UL)
+#define LCA_IOC_TB_TAG4		(IDENT_ADDR + 0x188000070UL)
+#define LCA_IOC_TB_TAG5		(IDENT_ADDR + 0x1880000a0UL)
+#define LCA_IOC_TB_TAG6		(IDENT_ADDR + 0x1880000c0UL)
+#define LCA_IOC_TB_TAG7		(IDENT_ADDR + 0x1880000e0UL)
+
+/*
+ * Memory spaces:
+ */
+#define LCA_IACK_SC		(IDENT_ADDR + 0x1a0000000UL)
+#define LCA_CONF		(IDENT_ADDR + 0x1e0000000UL)
+#define LCA_IO			(IDENT_ADDR + 0x1c0000000UL)
+#define LCA_SPARSE_MEM		(IDENT_ADDR + 0x200000000UL)
+#define LCA_DENSE_MEM		(IDENT_ADDR + 0x300000000UL)
+
+/*
+ * Bit definitions for I/O Controller status register 0:
+ */
+#define LCA_IOC_STAT0_CMD		0xf
+#define LCA_IOC_STAT0_ERR		(1<<4)
+#define LCA_IOC_STAT0_LOST		(1<<5)
+#define LCA_IOC_STAT0_THIT		(1<<6)
+#define LCA_IOC_STAT0_TREF		(1<<7)
+#define LCA_IOC_STAT0_CODE_SHIFT	8
+#define LCA_IOC_STAT0_CODE_MASK		0x7
+#define LCA_IOC_STAT0_P_NBR_SHIFT	13
+#define LCA_IOC_STAT0_P_NBR_MASK	0x7ffff
+
+#define LCA_HAE_ADDRESS		LCA_IOC_HAE
+
+/* LCA PMR Power Management register defines */
+#define LCA_PMR_ADDR	(IDENT_ADDR + 0x120000098UL)
+#define LCA_PMR_PDIV    0x7                     /* Primary clock divisor */
+#define LCA_PMR_ODIV    0x38                    /* Override clock divisor */
+#define LCA_PMR_INTO    0x40                    /* Interrupt override */
+#define LCA_PMR_DMAO    0x80                    /* DMA override */
+#define LCA_PMR_OCCEB   0xffff0000L             /* Override cycle counter - even bits */
+#define LCA_PMR_OCCOB   0xffff000000000000L     /* Override cycle counter - even bits */
+#define LCA_PMR_PRIMARY_MASK    0xfffffffffffffff8L
+
+/* LCA PMR Macros */
+
+#define LCA_READ_PMR        (*(volatile unsigned long *)LCA_PMR_ADDR)
+#define LCA_WRITE_PMR(d)    (*((volatile unsigned long *)LCA_PMR_ADDR) = (d))
+
+#define LCA_GET_PRIMARY(r)  ((r) & LCA_PMR_PDIV)
+#define LCA_GET_OVERRIDE(r) (((r) >> 3) & LCA_PMR_PDIV)
+#define LCA_SET_PRIMARY_CLOCK(r, c) ((r) = (((r) & LCA_PMR_PRIMARY_MASK)|(c)))
+
+/* LCA PMR Divisor values */
+#define LCA_PMR_DIV_1   0x0
+#define LCA_PMR_DIV_1_5 0x1
+#define LCA_PMR_DIV_2   0x2
+#define LCA_PMR_DIV_4   0x3
+#define LCA_PMR_DIV_8   0x4
+#define LCA_PMR_DIV_16  0x5
+#define LCA_PMR_DIV_MIN DIV_1
+#define LCA_PMR_DIV_MAX DIV_16
+
+
+/*
+ * Data structure for handling LCA machine checks.  Correctable errors
+ * result in a short logout frame, uncorrectable ones in a long one.
+ */
+struct el_lca_mcheck_short {
+	struct el_common	h;		/* common logout header */
+	unsigned long		esr;		/* error-status register */
+	unsigned long		ear;		/* error-address register */
+	unsigned long		dc_stat;	/* dcache status register */
+	unsigned long		ioc_stat0;	/* I/O controller status register 0 */
+	unsigned long		ioc_stat1;	/* I/O controller status register 1 */
+};
+
+struct el_lca_mcheck_long {
+	struct el_common	h;		/* common logout header */
+	unsigned long		pt[31];		/* PAL temps */
+	unsigned long		exc_addr;	/* exception address */
+	unsigned long		pad1[3];
+	unsigned long		pal_base;	/* PALcode base address */
+	unsigned long		hier;		/* hw interrupt enable */
+	unsigned long		hirr;		/* hw interrupt request */
+	unsigned long		mm_csr;		/* MMU control & status */
+	unsigned long		dc_stat;	/* data cache status */
+	unsigned long		dc_addr;	/* data cache addr register */
+	unsigned long		abox_ctl;	/* address box control register */
+	unsigned long		esr;		/* error status register */
+	unsigned long		ear;		/* error address register */
+	unsigned long		car;		/* cache control register */
+	unsigned long		ioc_stat0;	/* I/O controller status register 0 */
+	unsigned long		ioc_stat1;	/* I/O controller status register 1 */
+	unsigned long		va;		/* virtual address register */
+};
+
+union el_lca {
+	struct el_common *		c;
+	struct el_lca_mcheck_long *	l;
+	struct el_lca_mcheck_short *	s;
+};
+
+#ifdef __KERNEL__
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
+/*
+ * I/O functions:
+ *
+ * Unlike Jensen, the Noname machines have no concept of local
+ * I/O---everything goes over the PCI bus.
+ *
+ * There is plenty room for optimization here.  In particular,
+ * the Alpha's insb/insw/extb/extw should be useful in moving
+ * data to/from the right byte-lanes.
+ */
+
+#define vip	volatile int __force *
+#define vuip	volatile unsigned int __force *
+#define vulp	volatile unsigned long __force *
+
+#define LCA_SET_HAE						\
+	do {							\
+		if (addr >= (1UL << 24)) {			\
+			unsigned long msb = addr & 0xf8000000;	\
+			addr -= msb;				\
+			set_hae(msb);				\
+		}						\
+	} while (0)
+
+
+__EXTERN_INLINE unsigned int lca_ioread8(void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long result, base_and_type;
+
+	if (addr >= LCA_DENSE_MEM) {
+		addr -= LCA_DENSE_MEM;
+		LCA_SET_HAE;
+		base_and_type = LCA_SPARSE_MEM + 0x00;
+	} else {
+		addr -= LCA_IO;
+		base_and_type = LCA_IO + 0x00;
+	}
+
+	result = *(vip) ((addr << 5) + base_and_type);
+	return __kernel_extbl(result, addr & 3);
+}
+
+__EXTERN_INLINE void lca_iowrite8(u8 b, void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long w, base_and_type;
+
+	if (addr >= LCA_DENSE_MEM) {
+		addr -= LCA_DENSE_MEM;
+		LCA_SET_HAE;
+		base_and_type = LCA_SPARSE_MEM + 0x00;
+	} else {
+		addr -= LCA_IO;
+		base_and_type = LCA_IO + 0x00;
+	}
+
+	w = __kernel_insbl(b, addr & 3);
+	*(vuip) ((addr << 5) + base_and_type) = w;
+}
+
+__EXTERN_INLINE unsigned int lca_ioread16(void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long result, base_and_type;
+
+	if (addr >= LCA_DENSE_MEM) {
+		addr -= LCA_DENSE_MEM;
+		LCA_SET_HAE;
+		base_and_type = LCA_SPARSE_MEM + 0x08;
+	} else {
+		addr -= LCA_IO;
+		base_and_type = LCA_IO + 0x08;
+	}
+
+	result = *(vip) ((addr << 5) + base_and_type);
+	return __kernel_extwl(result, addr & 3);
+}
+
+__EXTERN_INLINE void lca_iowrite16(u16 b, void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long w, base_and_type;
+
+	if (addr >= LCA_DENSE_MEM) {
+		addr -= LCA_DENSE_MEM;
+		LCA_SET_HAE;
+		base_and_type = LCA_SPARSE_MEM + 0x08;
+	} else {
+		addr -= LCA_IO;
+		base_and_type = LCA_IO + 0x08;
+	}
+
+	w = __kernel_inswl(b, addr & 3);
+	*(vuip) ((addr << 5) + base_and_type) = w;
+}
+
+__EXTERN_INLINE unsigned int lca_ioread32(void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	if (addr < LCA_DENSE_MEM)
+		addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18;
+	return *(vuip)addr;
+}
+
+__EXTERN_INLINE void lca_iowrite32(u32 b, void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	if (addr < LCA_DENSE_MEM)
+		addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18;
+	*(vuip)addr = b;
+}
+
+__EXTERN_INLINE void __iomem *lca_ioportmap(unsigned long addr)
+{
+	return (void __iomem *)(addr + LCA_IO);
+}
+
+__EXTERN_INLINE void __iomem *lca_ioremap(unsigned long addr,
+					  unsigned long size)
+{
+	return (void __iomem *)(addr + LCA_DENSE_MEM);
+}
+
+__EXTERN_INLINE int lca_is_ioaddr(unsigned long addr)
+{
+	return addr >= IDENT_ADDR + 0x120000000UL;
+}
+
+__EXTERN_INLINE int lca_is_mmio(const volatile void __iomem *addr)
+{
+	return (unsigned long)addr >= LCA_DENSE_MEM;
+}
+
+#undef vip
+#undef vuip
+#undef vulp
+
+#undef __IO_PREFIX
+#define __IO_PREFIX		lca
+#define lca_trivial_rw_bw	2
+#define lca_trivial_rw_lq	1
+#define lca_trivial_io_bw	0
+#define lca_trivial_io_lq	0
+#define lca_trivial_iounmap	1
+#include <asm/io_trivial.h>
+
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_LCA__H__ */
diff --git a/include/asm-alpha/core_marvel.h b/include/asm-alpha/core_marvel.h
new file mode 100644
index 0000000..30d55fe
--- /dev/null
+++ b/include/asm-alpha/core_marvel.h
@@ -0,0 +1,378 @@
+/*
+ * Marvel systems use the IO7 I/O chip provides PCI/PCIX/AGP access
+ *
+ * This file is based on:
+ *
+ * Marvel / EV7 System Programmer's Manual
+ * Revision 1.00
+ * 14 May 2001
+ */
+
+#ifndef __ALPHA_MARVEL__H__
+#define __ALPHA_MARVEL__H__
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+
+#include <asm/compiler.h>
+
+#define MARVEL_MAX_PIDS		 32 /* as long as we rely on 43-bit superpage */
+#define MARVEL_IRQ_VEC_PE_SHIFT	(10)
+#define MARVEL_IRQ_VEC_IRQ_MASK	((1 << MARVEL_IRQ_VEC_PE_SHIFT) - 1)
+#define MARVEL_NR_IRQS		\
+	(16 + (MARVEL_MAX_PIDS * (1 << MARVEL_IRQ_VEC_PE_SHIFT)))
+
+/*
+ * EV7 RBOX Registers
+ */
+typedef struct {
+	volatile unsigned long csr __attribute__((aligned(16)));
+} ev7_csr;
+
+typedef struct {
+	ev7_csr	RBOX_CFG;		/* 0x0000 */
+	ev7_csr	RBOX_NSVC;
+	ev7_csr	RBOX_EWVC;
+	ev7_csr	RBOX_WHAMI;
+	ev7_csr	RBOX_TCTL;		/* 0x0040 */
+	ev7_csr	RBOX_INT;
+	ev7_csr	RBOX_IMASK;
+	ev7_csr	RBOX_IREQ;
+	ev7_csr	RBOX_INTQ;		/* 0x0080 */
+	ev7_csr	RBOX_INTA;
+	ev7_csr	RBOX_IT;
+	ev7_csr	RBOX_SCRATCH1;
+	ev7_csr	RBOX_SCRATCH2;		/* 0x00c0 */
+	ev7_csr	RBOX_L_ERR;
+} ev7_csrs;
+
+/*
+ * EV7 CSR addressing macros
+ */
+#define EV7_MASK40(addr)        ((addr) & ((1UL << 41) - 1))
+#define EV7_KERN_ADDR(addr)	((void *)(IDENT_ADDR | EV7_MASK40(addr)))
+
+#define EV7_PE_MASK		0x1ffUL /* 9 bits ( 256 + mem/io ) */
+#define EV7_IPE(pe)		((~((long)(pe)) & EV7_PE_MASK) << 35)
+
+#define EV7_CSR_PHYS(pe, off)	(EV7_IPE(pe) | (0x7FFCUL << 20) | (off))
+#define EV7_CSRS_PHYS(pe)	(EV7_CSR_PHYS(pe, 0UL))
+
+#define EV7_CSR_KERN(pe, off)	(EV7_KERN_ADDR(EV7_CSR_PHYS(pe, off)))
+#define EV7_CSRS_KERN(pe)	(EV7_KERN_ADDR(EV7_CSRS_PHYS(pe)))
+
+#define EV7_CSR_OFFSET(name)	((unsigned long)&((ev7_csrs *)NULL)->name.csr)
+
+/*
+ * IO7 registers
+ */
+typedef struct {
+	volatile unsigned long csr __attribute__((aligned(64)));
+} io7_csr;
+
+typedef struct {
+	/* I/O Port Control Registers */
+	io7_csr	POx_CTRL;	       	/* 0x0000 */
+	io7_csr	POx_CACHE_CTL;
+	io7_csr POx_TIMER;
+	io7_csr POx_IO_ADR_EXT;
+	io7_csr	POx_MEM_ADR_EXT;	/* 0x0100 */
+	io7_csr POx_XCAL_CTRL;
+	io7_csr rsvd1[2];	/* ?? spec doesn't show 0x180 */
+	io7_csr POx_DM_SOURCE;		/* 0x0200 */
+	io7_csr POx_DM_DEST;
+	io7_csr POx_DM_SIZE;
+	io7_csr POx_DM_CTRL;
+	io7_csr rsvd2[4];		/* 0x0300 */
+
+	/* AGP Control Registers -- port 3 only */
+	io7_csr AGP_CAP_ID;		/* 0x0400 */
+	io7_csr AGP_STAT;
+	io7_csr	AGP_CMD;
+	io7_csr	rsvd3;
+
+	/* I/O Port Monitor Registers */
+	io7_csr	POx_MONCTL;		/* 0x0500 */
+	io7_csr POx_CTRA;
+	io7_csr POx_CTRB;
+	io7_csr POx_CTR56;
+	io7_csr POx_SCRATCH;		/* 0x0600 */
+	io7_csr POx_XTRA_A;
+	io7_csr POx_XTRA_TS;
+	io7_csr POx_XTRA_Z;
+	io7_csr rsvd4;			/* 0x0700 */
+	io7_csr POx_THRESHA;
+	io7_csr POx_THRESHB;
+	io7_csr rsvd5[33];
+
+	/* System Address Space Window Control Registers */
+
+	io7_csr POx_WBASE[4];		/* 0x1000 */
+	io7_csr POx_WMASK[4];
+	io7_csr POx_TBASE[4];
+	io7_csr POx_SG_TBIA;
+	io7_csr POx_MSI_WBASE;
+	io7_csr rsvd6[50];
+
+	/* I/O Port Error Registers */
+	io7_csr POx_ERR_SUM;
+	io7_csr POx_FIRST_ERR;
+	io7_csr POx_MSK_HEI;
+	io7_csr POx_TLB_ERR;
+	io7_csr POx_SPL_COMPLT;
+	io7_csr POx_TRANS_SUM;
+	io7_csr POx_FRC_PCI_ERR;
+	io7_csr POx_MULT_ERR;
+	io7_csr rsvd7[8];
+
+	/* I/O Port End of Interrupt Registers */
+	io7_csr EOI_DAT;
+	io7_csr rsvd8[7];
+	io7_csr POx_IACK_SPECIAL;
+	io7_csr rsvd9[103];
+} io7_ioport_csrs;
+
+typedef struct {
+	io7_csr IO_ASIC_REV;		/* 0x30.0000 */
+	io7_csr IO_SYS_REV;
+	io7_csr SER_CHAIN3;
+	io7_csr PO7_RST1;
+	io7_csr PO7_RST2;		/* 0x30.0100 */
+	io7_csr POx_RST[4];
+	io7_csr IO7_DWNH;
+	io7_csr IO7_MAF;
+	io7_csr IO7_MAF_TO;
+	io7_csr IO7_ACC_CLUMP;		/* 0x30.0300 */
+	io7_csr IO7_PMASK;
+	io7_csr IO7_IOMASK;
+	io7_csr IO7_UPH;
+	io7_csr IO7_UPH_TO;		/* 0x30.0400 */
+	io7_csr RBX_IREQ_OFF;
+	io7_csr RBX_INTA_OFF;
+	io7_csr INT_RTY;
+	io7_csr PO7_MONCTL;		/* 0x30.0500 */
+	io7_csr PO7_CTRA;
+	io7_csr PO7_CTRB;
+	io7_csr PO7_CTR56;
+	io7_csr PO7_SCRATCH;		/* 0x30.0600 */
+	io7_csr PO7_XTRA_A;
+	io7_csr PO7_XTRA_TS;
+	io7_csr PO7_XTRA_Z;
+	io7_csr PO7_PMASK;		/* 0x30.0700 */
+	io7_csr PO7_THRESHA;
+	io7_csr PO7_THRESHB;
+	io7_csr rsvd1[97];
+	io7_csr PO7_ERROR_SUM;		/* 0x30.2000 */
+	io7_csr PO7_BHOLE_MASK;
+	io7_csr PO7_HEI_MSK;
+	io7_csr PO7_CRD_MSK;
+	io7_csr PO7_UNCRR_SYM;		/* 0x30.2100 */
+	io7_csr PO7_CRRCT_SYM;
+	io7_csr PO7_ERR_PKT[2];
+	io7_csr PO7_UGBGE_SYM;		/* 0x30.2200 */
+	io7_csr rsbv2[887];
+	io7_csr PO7_LSI_CTL[128];	/* 0x31.0000 */
+	io7_csr rsvd3[123];
+	io7_csr HLT_CTL;		/* 0x31.3ec0 */
+	io7_csr HPI_CTL;		/* 0x31.3f00 */
+	io7_csr CRD_CTL;
+	io7_csr STV_CTL;
+	io7_csr HEI_CTL;
+	io7_csr PO7_MSI_CTL[16];	/* 0x31.4000 */
+	io7_csr rsvd4[240];
+
+	/*
+	 * Interrupt Diagnostic / Test
+	 */
+	struct {
+		io7_csr INT_PND;
+		io7_csr INT_CLR;
+		io7_csr INT_EOI;
+		io7_csr rsvd[29];
+	} INT_DIAG[4];
+	io7_csr rsvd5[125];	    	/* 0x31.a000 */
+	io7_csr MISC_PND;		/* 0x31.b800 */
+	io7_csr rsvd6[31];
+	io7_csr MSI_PND[16];		/* 0x31.c000 */
+	io7_csr rsvd7[16];
+	io7_csr MSI_CLR[16];		/* 0x31.c800 */
+} io7_port7_csrs;
+
+/* 
+ * IO7 DMA Window Base register (POx_WBASEx)
+ */
+#define wbase_m_ena  0x1
+#define wbase_m_sg   0x2
+#define wbase_m_dac  0x4
+#define wbase_m_addr 0xFFF00000
+union IO7_POx_WBASE {
+	struct {
+		unsigned ena : 1;	/* <0>			*/
+		unsigned sg : 1;	/* <1>			*/
+		unsigned dac : 1;	/* <2> -- window 3 only */
+		unsigned rsvd1 : 17; 
+		unsigned addr : 12;	/* <31:20>		*/
+		unsigned rsvd2 : 32;
+	} bits;
+	unsigned as_long[2];
+	unsigned as_quad;
+};
+
+/*
+ * IO7 IID (Interrupt IDentifier) format
+ *
+ * For level-sensative interrupts, int_num is encoded as:
+ *
+ *	bus/port	slot/device	INTx
+ *	<7:5>		<4:2>		<1:0>
+ */
+union IO7_IID {
+	struct {
+		unsigned int_num : 9;		/* <8:0>    	*/
+		unsigned tpu_mask : 4;		/* <12:9> rsvd	*/
+		unsigned msi : 1;		/* 13		*/
+		unsigned ipe : 10;		/* <23:14>	*/
+		unsigned long rsvd : 40;		
+	} bits;
+	unsigned int as_long[2];
+	unsigned long as_quad;
+};
+
+/*
+ * IO7 addressing macros
+ */
+#define IO7_KERN_ADDR(addr)	(EV7_KERN_ADDR(addr))
+
+#define IO7_PORT_MASK	   	0x07UL	/* 3 bits of port 	   */
+
+#define IO7_IPE(pe)		(EV7_IPE(pe))
+#define IO7_IPORT(port)		((~((long)(port)) & IO7_PORT_MASK) << 32)
+
+#define IO7_HOSE(pe, port)	(IO7_IPE(pe) | IO7_IPORT(port))
+
+#define IO7_MEM_PHYS(pe, port)	(IO7_HOSE(pe, port) | 0x00000000UL)
+#define IO7_CONF_PHYS(pe, port)	(IO7_HOSE(pe, port) | 0xFE000000UL)
+#define IO7_IO_PHYS(pe, port)	(IO7_HOSE(pe, port) | 0xFF000000UL)
+#define IO7_CSR_PHYS(pe, port, off) \
+                                (IO7_HOSE(pe, port) | 0xFF800000UL | (off))
+#define IO7_CSRS_PHYS(pe, port)	(IO7_CSR_PHYS(pe, port, 0UL))
+#define IO7_PORT7_CSRS_PHYS(pe) (IO7_CSR_PHYS(pe, 7, 0x300000UL))
+
+#define IO7_MEM_KERN(pe, port)      (IO7_KERN_ADDR(IO7_MEM_PHYS(pe, port)))
+#define IO7_CONF_KERN(pe, port)     (IO7_KERN_ADDR(IO7_CONF_PHYS(pe, port)))
+#define IO7_IO_KERN(pe, port)       (IO7_KERN_ADDR(IO7_IO_PHYS(pe, port)))
+#define IO7_CSR_KERN(pe, port, off) (IO7_KERN_ADDR(IO7_CSR_PHYS(pe,port,off)))
+#define IO7_CSRS_KERN(pe, port)     (IO7_KERN_ADDR(IO7_CSRS_PHYS(pe, port)))
+#define IO7_PORT7_CSRS_KERN(pe)	    (IO7_KERN_ADDR(IO7_PORT7_CSRS_PHYS(pe)))
+
+#define IO7_PLL_RNGA(pll)	(((pll) >> 3) & 0x7)
+#define IO7_PLL_RNGB(pll)	(((pll) >> 6) & 0x7)
+
+#define IO7_MEM_SPACE		(2UL * 1024 * 1024 * 1024)	/* 2GB MEM */
+#define IO7_IO_SPACE		(8UL * 1024 * 1024)		/* 8MB I/O */
+
+ 
+/* 
+ * Offset between ram physical addresses and pci64 DAC addresses
+ */
+#define IO7_DAC_OFFSET		(1UL << 49)
+
+/*
+ * This is needed to satisify the IO() macro used in initializing the machvec
+ */
+#define MARVEL_IACK_SC 							\
+        ((unsigned long)						\
+	 (&(((io7_ioport_csrs *)IO7_CSRS_KERN(0, 0))->POx_IACK_SPECIAL)))
+
+#ifdef __KERNEL__
+
+/*
+ * IO7 structs
+ */
+#define IO7_NUM_PORTS 4
+#define IO7_AGP_PORT  3
+
+struct io7_port {
+	struct io7 *io7;
+	struct pci_controller *hose;
+
+	int enabled;
+	unsigned int port;
+	io7_ioport_csrs *csrs;
+
+	unsigned long saved_wbase[4];
+	unsigned long saved_wmask[4];
+	unsigned long saved_tbase[4];
+};
+
+struct io7 {
+	struct io7 *next;
+
+	unsigned int pe;
+	io7_port7_csrs *csrs;
+	struct io7_port ports[IO7_NUM_PORTS];
+
+	spinlock_t irq_lock;
+};
+
+#ifndef __EXTERN_INLINE
+# define __EXTERN_INLINE extern inline
+# define __IO_EXTERN_INLINE
+#endif
+
+/*
+ * I/O functions. All access through linear space.
+ */
+
+/*
+ * Memory functions.  All accesses through linear space.
+ */
+
+#define vucp	volatile unsigned char __force *
+#define vusp	volatile unsigned short __force *
+
+extern unsigned int marvel_ioread8(void __iomem *);
+extern void marvel_iowrite8(u8 b, void __iomem *);
+
+__EXTERN_INLINE unsigned int marvel_ioread16(void __iomem *addr)
+{
+	return __kernel_ldwu(*(vusp)addr);
+}
+
+__EXTERN_INLINE void marvel_iowrite16(u16 b, void __iomem *addr)
+{
+	__kernel_stw(b, *(vusp)addr);
+}
+
+extern void __iomem *marvel_ioremap(unsigned long addr, unsigned long size);
+extern void marvel_iounmap(volatile void __iomem *addr);
+extern void __iomem *marvel_ioportmap (unsigned long addr);
+
+__EXTERN_INLINE int marvel_is_ioaddr(unsigned long addr)
+{
+	return (addr >> 40) & 1;
+}
+
+extern int marvel_is_mmio(const volatile void __iomem *);
+
+#undef vucp
+#undef vusp
+
+#undef __IO_PREFIX
+#define __IO_PREFIX		marvel
+#define marvel_trivial_rw_bw	1
+#define marvel_trivial_rw_lq	1
+#define marvel_trivial_io_bw	0
+#define marvel_trivial_io_lq	1
+#define marvel_trivial_iounmap	0
+#include <asm/io_trivial.h>
+
+#ifdef __IO_EXTERN_INLINE
+# undef __EXTERN_INLINE
+# undef __IO_EXTERN_INLINE
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_MARVEL__H__ */
diff --git a/include/asm-alpha/core_mcpcia.h b/include/asm-alpha/core_mcpcia.h
new file mode 100644
index 0000000..980a3c5
--- /dev/null
+++ b/include/asm-alpha/core_mcpcia.h
@@ -0,0 +1,379 @@
+#ifndef __ALPHA_MCPCIA__H__
+#define __ALPHA_MCPCIA__H__
+
+/* Define to experiment with fitting everything into one 128MB HAE window.
+   One window per bus, that is.  */
+#define MCPCIA_ONE_HAE_WINDOW 1
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <asm/compiler.h>
+
+/*
+ * MCPCIA is the internal name for a core logic chipset which provides
+ * PCI access for the RAWHIDE family of systems.
+ *
+ * This file is based on:
+ *
+ * RAWHIDE System Programmer's Manual
+ * 16-May-96
+ * Rev. 1.4
+ *
+ */
+
+/*------------------------------------------------------------------------**
+**                                                                        **
+**  I/O procedures                                                        **
+**                                                                        **
+**      inport[b|w|t|l], outport[b|w|t|l] 8:16:24:32 IO xfers             **
+**	inportbxt: 8 bits only                                            **
+**      inport:    alias of inportw                                       **
+**      outport:   alias of outportw                                      **
+**                                                                        **
+**      inmem[b|w|t|l], outmem[b|w|t|l] 8:16:24:32 ISA memory xfers       **
+**	inmembxt: 8 bits only                                             **
+**      inmem:    alias of inmemw                                         **
+**      outmem:   alias of outmemw                                        **
+**                                                                        **
+**------------------------------------------------------------------------*/
+
+
+/* MCPCIA ADDRESS BIT DEFINITIONS
+ *
+ *  3333 3333 3322 2222 2222 1111 1111 11
+ *  9876 5432 1098 7654 3210 9876 5432 1098 7654 3210
+ *  ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
+ *  1                                             000
+ *  ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
+ *  |                                             |\|
+ *  |                               Byte Enable --+ |
+ *  |                             Transfer Length --+
+ *  +-- IO space, not cached
+ *
+ *   Byte      Transfer
+ *   Enable    Length    Transfer  Byte    Address
+ *   adr<6:5>  adr<4:3>  Length    Enable  Adder
+ *   ---------------------------------------------
+ *      00        00      Byte      1110   0x000
+ *      01        00      Byte      1101   0x020
+ *      10        00      Byte      1011   0x040
+ *      11        00      Byte      0111   0x060
+ *
+ *      00        01      Word      1100   0x008
+ *      01        01      Word      1001   0x028 <= Not supported in this code.
+ *      10        01      Word      0011   0x048
+ *
+ *      00        10      Tribyte   1000   0x010
+ *      01        10      Tribyte   0001   0x030
+ *
+ *      10        11      Longword  0000   0x058
+ *
+ *      Note that byte enables are asserted low.
+ *
+ */
+
+#define MCPCIA_MID(m)		((unsigned long)(m) << 33)
+
+/* Dodge has PCI0 and PCI1 at MID 4 and 5 respectively. 
+   Durango adds PCI2 and PCI3 at MID 6 and 7 respectively.  */
+#define MCPCIA_HOSE2MID(h)	((h) + 4)
+
+#define MCPCIA_MEM_MASK 0x07ffffff /* SPARSE Mem region mask is 27 bits */
+
+/*
+ * Memory spaces:
+ */
+#define MCPCIA_SPARSE(m)	(IDENT_ADDR + 0xf000000000UL + MCPCIA_MID(m))
+#define MCPCIA_DENSE(m)		(IDENT_ADDR + 0xf100000000UL + MCPCIA_MID(m))
+#define MCPCIA_IO(m)		(IDENT_ADDR + 0xf180000000UL + MCPCIA_MID(m))
+#define MCPCIA_CONF(m)		(IDENT_ADDR + 0xf1c0000000UL + MCPCIA_MID(m))
+#define MCPCIA_CSR(m)		(IDENT_ADDR + 0xf1e0000000UL + MCPCIA_MID(m))
+#define MCPCIA_IO_IACK(m)	(IDENT_ADDR + 0xf1f0000000UL + MCPCIA_MID(m))
+#define MCPCIA_DENSE_IO(m)	(IDENT_ADDR + 0xe1fc000000UL + MCPCIA_MID(m))
+#define MCPCIA_DENSE_CONF(m)	(IDENT_ADDR + 0xe1fe000000UL + MCPCIA_MID(m))
+
+/*
+ *  General Registers
+ */
+#define MCPCIA_REV(m)		(MCPCIA_CSR(m) + 0x000)
+#define MCPCIA_WHOAMI(m)	(MCPCIA_CSR(m) + 0x040)
+#define MCPCIA_PCI_LAT(m)	(MCPCIA_CSR(m) + 0x080)
+#define MCPCIA_CAP_CTRL(m)	(MCPCIA_CSR(m) + 0x100)
+#define MCPCIA_HAE_MEM(m)	(MCPCIA_CSR(m) + 0x400)
+#define MCPCIA_HAE_IO(m)	(MCPCIA_CSR(m) + 0x440)
+#define _MCPCIA_IACK_SC(m)	(MCPCIA_CSR(m) + 0x480)
+#define MCPCIA_HAE_DENSE(m)	(MCPCIA_CSR(m) + 0x4C0)
+
+/*
+ * Interrupt Control registers
+ */
+#define MCPCIA_INT_CTL(m)	(MCPCIA_CSR(m) + 0x500)
+#define MCPCIA_INT_REQ(m)	(MCPCIA_CSR(m) + 0x540)
+#define MCPCIA_INT_TARG(m)	(MCPCIA_CSR(m) + 0x580)
+#define MCPCIA_INT_ADR(m)	(MCPCIA_CSR(m) + 0x5C0)
+#define MCPCIA_INT_ADR_EXT(m)	(MCPCIA_CSR(m) + 0x600)
+#define MCPCIA_INT_MASK0(m)	(MCPCIA_CSR(m) + 0x640)
+#define MCPCIA_INT_MASK1(m)	(MCPCIA_CSR(m) + 0x680)
+#define MCPCIA_INT_ACK0(m)	(MCPCIA_CSR(m) + 0x10003f00)
+#define MCPCIA_INT_ACK1(m)	(MCPCIA_CSR(m) + 0x10003f40)
+
+/*
+ * Performance Monitor registers
+ */
+#define MCPCIA_PERF_MON(m)	(MCPCIA_CSR(m) + 0x300)
+#define MCPCIA_PERF_CONT(m)	(MCPCIA_CSR(m) + 0x340)
+
+/*
+ * Diagnostic Registers
+ */
+#define MCPCIA_CAP_DIAG(m)	(MCPCIA_CSR(m) + 0x700)
+#define MCPCIA_TOP_OF_MEM(m)	(MCPCIA_CSR(m) + 0x7C0)
+
+/*
+ * Error registers
+ */
+#define MCPCIA_MC_ERR0(m)	(MCPCIA_CSR(m) + 0x800)
+#define MCPCIA_MC_ERR1(m)	(MCPCIA_CSR(m) + 0x840)
+#define MCPCIA_CAP_ERR(m)	(MCPCIA_CSR(m) + 0x880)
+#define MCPCIA_PCI_ERR1(m)	(MCPCIA_CSR(m) + 0x1040)
+#define MCPCIA_MDPA_STAT(m)	(MCPCIA_CSR(m) + 0x4000)
+#define MCPCIA_MDPA_SYN(m)	(MCPCIA_CSR(m) + 0x4040)
+#define MCPCIA_MDPA_DIAG(m)	(MCPCIA_CSR(m) + 0x4080)
+#define MCPCIA_MDPB_STAT(m)	(MCPCIA_CSR(m) + 0x8000)
+#define MCPCIA_MDPB_SYN(m)	(MCPCIA_CSR(m) + 0x8040)
+#define MCPCIA_MDPB_DIAG(m)	(MCPCIA_CSR(m) + 0x8080)
+
+/*
+ * PCI Address Translation Registers.
+ */
+#define MCPCIA_SG_TBIA(m)	(MCPCIA_CSR(m) + 0x1300)
+#define MCPCIA_HBASE(m)		(MCPCIA_CSR(m) + 0x1340)
+
+#define MCPCIA_W0_BASE(m)	(MCPCIA_CSR(m) + 0x1400)
+#define MCPCIA_W0_MASK(m)	(MCPCIA_CSR(m) + 0x1440)
+#define MCPCIA_T0_BASE(m)	(MCPCIA_CSR(m) + 0x1480)
+
+#define MCPCIA_W1_BASE(m)	(MCPCIA_CSR(m) + 0x1500)
+#define MCPCIA_W1_MASK(m)	(MCPCIA_CSR(m) + 0x1540)
+#define MCPCIA_T1_BASE(m)	(MCPCIA_CSR(m) + 0x1580)
+
+#define MCPCIA_W2_BASE(m)	(MCPCIA_CSR(m) + 0x1600)
+#define MCPCIA_W2_MASK(m)	(MCPCIA_CSR(m) + 0x1640)
+#define MCPCIA_T2_BASE(m)	(MCPCIA_CSR(m) + 0x1680)
+
+#define MCPCIA_W3_BASE(m)	(MCPCIA_CSR(m) + 0x1700)
+#define MCPCIA_W3_MASK(m)	(MCPCIA_CSR(m) + 0x1740)
+#define MCPCIA_T3_BASE(m)	(MCPCIA_CSR(m) + 0x1780)
+
+/* Hack!  Only words for bus 0.  */
+
+#ifndef MCPCIA_ONE_HAE_WINDOW
+#define MCPCIA_HAE_ADDRESS	MCPCIA_HAE_MEM(4)
+#endif
+#define MCPCIA_IACK_SC		_MCPCIA_IACK_SC(4)
+
+/* 
+ * The canonical non-remaped I/O and MEM addresses have these values
+ * subtracted out.  This is arranged so that folks manipulating ISA
+ * devices can use their familiar numbers and have them map to bus 0.
+ */
+
+#define MCPCIA_IO_BIAS		MCPCIA_IO(4)
+#define MCPCIA_MEM_BIAS		MCPCIA_DENSE(4)
+
+/* Offset between ram physical addresses and pci64 DAC bus addresses.  */
+#define MCPCIA_DAC_OFFSET	(1UL << 40)
+
+/*
+ * Data structure for handling MCPCIA machine checks:
+ */
+struct el_MCPCIA_uncorrected_frame_mcheck {
+	struct el_common header;
+	struct el_common_EV5_uncorrectable_mcheck procdata;
+};
+
+
+#ifdef __KERNEL__
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
+/*
+ * I/O functions:
+ *
+ * MCPCIA, the RAWHIDE family PCI/memory support chipset for the EV5 (21164)
+ * and EV56 (21164a) processors, can use either a sparse address mapping
+ * scheme, or the so-called byte-word PCI address space, to get at PCI memory
+ * and I/O.
+ *
+ * Unfortunately, we can't use BWIO with EV5, so for now, we always use SPARSE.
+ */
+
+/*
+ * Memory functions.  64-bit and 32-bit accesses are done through
+ * dense memory space, everything else through sparse space.
+ *
+ * For reading and writing 8 and 16 bit quantities we need to
+ * go through one of the three sparse address mapping regions
+ * and use the HAE_MEM CSR to provide some bits of the address.
+ * The following few routines use only sparse address region 1
+ * which gives 1Gbyte of accessible space which relates exactly
+ * to the amount of PCI memory mapping *into* system address space.
+ * See p 6-17 of the specification but it looks something like this:
+ *
+ * 21164 Address:
+ *
+ *          3         2         1
+ * 9876543210987654321098765432109876543210
+ * 1ZZZZ0.PCI.QW.Address............BBLL
+ *
+ * ZZ = SBZ
+ * BB = Byte offset
+ * LL = Transfer length
+ *
+ * PCI Address:
+ *
+ * 3         2         1
+ * 10987654321098765432109876543210
+ * HHH....PCI.QW.Address........ 00
+ *
+ * HHH = 31:29 HAE_MEM CSR
+ *
+ */
+
+#define vip	volatile int __force *
+#define vuip	volatile unsigned int __force *
+
+#ifdef MCPCIA_ONE_HAE_WINDOW
+#define MCPCIA_FROB_MMIO						\
+	if (__mcpcia_is_mmio(hose)) {					\
+		set_hae(hose & 0xffffffff);				\
+		hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4);	\
+	}
+#else
+#define MCPCIA_FROB_MMIO						\
+	if (__mcpcia_is_mmio(hose)) {					\
+		hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4);	\
+	}
+#endif
+
+static inline int __mcpcia_is_mmio(unsigned long addr)
+{
+	return (addr & 0x80000000UL) == 0;
+}
+
+__EXTERN_INLINE unsigned int mcpcia_ioread8(void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK;
+	unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK;
+	unsigned long result;
+
+	MCPCIA_FROB_MMIO;
+
+	result = *(vip) ((addr << 5) + hose + 0x00);
+	return __kernel_extbl(result, addr & 3);
+}
+
+__EXTERN_INLINE void mcpcia_iowrite8(u8 b, void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK;
+	unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK;
+	unsigned long w;
+
+	MCPCIA_FROB_MMIO;
+
+	w = __kernel_insbl(b, addr & 3);
+	*(vuip) ((addr << 5) + hose + 0x00) = w;
+}
+
+__EXTERN_INLINE unsigned int mcpcia_ioread16(void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK;
+	unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK;
+	unsigned long result;
+
+	MCPCIA_FROB_MMIO;
+
+	result = *(vip) ((addr << 5) + hose + 0x08);
+	return __kernel_extwl(result, addr & 3);
+}
+
+__EXTERN_INLINE void mcpcia_iowrite16(u16 b, void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK;
+	unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK;
+	unsigned long w;
+
+	MCPCIA_FROB_MMIO;
+
+	w = __kernel_inswl(b, addr & 3);
+	*(vuip) ((addr << 5) + hose + 0x08) = w;
+}
+
+__EXTERN_INLINE unsigned int mcpcia_ioread32(void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long)xaddr;
+
+	if (!__mcpcia_is_mmio(addr))
+		addr = ((addr & 0xffff) << 5) + (addr & ~0xfffful) + 0x18;
+
+	return *(vuip)addr;
+}
+
+__EXTERN_INLINE void mcpcia_iowrite32(u32 b, void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long)xaddr;
+
+	if (!__mcpcia_is_mmio(addr))
+		addr = ((addr & 0xffff) << 5) + (addr & ~0xfffful) + 0x18;
+
+	*(vuip)addr = b;
+}
+
+
+__EXTERN_INLINE void __iomem *mcpcia_ioportmap(unsigned long addr)
+{
+	return (void __iomem *)(addr + MCPCIA_IO_BIAS);
+}
+
+__EXTERN_INLINE void __iomem *mcpcia_ioremap(unsigned long addr,
+					     unsigned long size)
+{
+	return (void __iomem *)(addr + MCPCIA_MEM_BIAS);
+}
+
+__EXTERN_INLINE int mcpcia_is_ioaddr(unsigned long addr)
+{
+	return addr >= MCPCIA_SPARSE(0);
+}
+
+__EXTERN_INLINE int mcpcia_is_mmio(const volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	return __mcpcia_is_mmio(addr);
+}
+
+#undef MCPCIA_FROB_MMIO
+
+#undef vip
+#undef vuip
+
+#undef __IO_PREFIX
+#define __IO_PREFIX		mcpcia
+#define mcpcia_trivial_rw_bw	2
+#define mcpcia_trivial_rw_lq	1
+#define mcpcia_trivial_io_bw	0
+#define mcpcia_trivial_io_lq	0
+#define mcpcia_trivial_iounmap	1
+#include <asm/io_trivial.h>
+
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_MCPCIA__H__ */
diff --git a/include/asm-alpha/core_polaris.h b/include/asm-alpha/core_polaris.h
new file mode 100644
index 0000000..2f966b6
--- /dev/null
+++ b/include/asm-alpha/core_polaris.h
@@ -0,0 +1,110 @@
+#ifndef __ALPHA_POLARIS__H__
+#define __ALPHA_POLARIS__H__
+
+#include <linux/types.h>
+#include <asm/compiler.h>
+
+/*
+ * POLARIS is the internal name for a core logic chipset which provides
+ * memory controller and PCI access for the 21164PC chip based systems.
+ *
+ * This file is based on:
+ *
+ * Polaris System Controller
+ * Device Functional Specification
+ * 22-Jan-98
+ * Rev. 4.2
+ *
+ */
+
+/* Polaris memory regions */
+#define POLARIS_SPARSE_MEM_BASE		(IDENT_ADDR + 0xf800000000UL)
+#define POLARIS_DENSE_MEM_BASE		(IDENT_ADDR + 0xf900000000UL)
+#define POLARIS_SPARSE_IO_BASE		(IDENT_ADDR + 0xf980000000UL)
+#define POLARIS_SPARSE_CONFIG_BASE	(IDENT_ADDR + 0xf9c0000000UL)
+#define POLARIS_IACK_BASE		(IDENT_ADDR + 0xf9f8000000UL)
+#define POLARIS_DENSE_IO_BASE		(IDENT_ADDR + 0xf9fc000000UL)
+#define POLARIS_DENSE_CONFIG_BASE	(IDENT_ADDR + 0xf9fe000000UL)
+
+#define POLARIS_IACK_SC			POLARIS_IACK_BASE
+
+/* The Polaris command/status registers live in PCI Config space for
+ * bus 0/device 0.  As such, they may be bytes, words, or doublewords.
+ */
+#define POLARIS_W_VENID		(POLARIS_DENSE_CONFIG_BASE)
+#define POLARIS_W_DEVID		(POLARIS_DENSE_CONFIG_BASE+2)
+#define POLARIS_W_CMD		(POLARIS_DENSE_CONFIG_BASE+4)
+#define POLARIS_W_STATUS	(POLARIS_DENSE_CONFIG_BASE+6)
+
+/*
+ * Data structure for handling POLARIS machine checks:
+ */
+struct el_POLARIS_sysdata_mcheck {
+    u_long      psc_status;
+    u_long	psc_pcictl0;
+    u_long	psc_pcictl1;
+    u_long	psc_pcictl2;
+};
+
+#ifdef __KERNEL__
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
+/*
+ * I/O functions:
+ *
+ * POLARIS, the PCI/memory support chipset for the PCA56 (21164PC)
+ * processors, can use either a sparse address  mapping scheme, or the 
+ * so-called byte-word PCI address space, to get at PCI memory and I/O.
+ *
+ * However, we will support only the BWX form.
+ */
+
+/*
+ * Memory functions.  Polaris allows all accesses (byte/word
+ * as well as long/quad) to be done through dense space.
+ *
+ * We will only support DENSE access via BWX insns.
+ */
+
+__EXTERN_INLINE void __iomem *polaris_ioportmap(unsigned long addr)
+{
+	return (void __iomem *)(addr + POLARIS_DENSE_IO_BASE);
+}
+
+__EXTERN_INLINE void __iomem *polaris_ioremap(unsigned long addr,
+					      unsigned long size)
+{
+	return (void __iomem *)(addr + POLARIS_DENSE_MEM_BASE);
+}
+
+__EXTERN_INLINE int polaris_is_ioaddr(unsigned long addr)
+{
+	return addr >= POLARIS_SPARSE_MEM_BASE;
+}
+
+__EXTERN_INLINE int polaris_is_mmio(const volatile void __iomem *addr)
+{
+	return (unsigned long)addr < POLARIS_SPARSE_IO_BASE;
+}
+
+#undef __IO_PREFIX
+#define __IO_PREFIX		polaris
+#define polaris_trivial_rw_bw	1
+#define polaris_trivial_rw_lq	1
+#define polaris_trivial_io_bw	1
+#define polaris_trivial_io_lq	1
+#define polaris_trivial_iounmap	1
+#include <asm/io_trivial.h>
+
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_POLARIS__H__ */
diff --git a/include/asm-alpha/core_t2.h b/include/asm-alpha/core_t2.h
new file mode 100644
index 0000000..5c1c403
--- /dev/null
+++ b/include/asm-alpha/core_t2.h
@@ -0,0 +1,628 @@
+#ifndef __ALPHA_T2__H__
+#define __ALPHA_T2__H__
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <asm/compiler.h>
+#include <asm/system.h>
+
+/*
+ * T2 is the internal name for the core logic chipset which provides
+ * memory controller and PCI access for the SABLE-based systems.
+ *
+ * This file is based on:
+ *
+ * SABLE I/O Specification
+ * Revision/Update Information: 1.3
+ *
+ * jestabro@amt.tay1.dec.com Initial Version.
+ *
+ */
+
+#define T2_MEM_R1_MASK 0x07ffffff  /* Mem sparse region 1 mask is 26 bits */
+
+/* GAMMA-SABLE is a SABLE with EV5-based CPUs */
+/* All LYNX machines, EV4 or EV5, use the GAMMA bias also */
+#define _GAMMA_BIAS		0x8000000000UL
+
+#if defined(CONFIG_ALPHA_GENERIC)
+#define GAMMA_BIAS		alpha_mv.sys.t2.gamma_bias
+#elif defined(CONFIG_ALPHA_GAMMA)
+#define GAMMA_BIAS		_GAMMA_BIAS
+#else
+#define GAMMA_BIAS		0
+#endif
+
+/*
+ * Memory spaces:
+ */
+#define T2_CONF		        (IDENT_ADDR + GAMMA_BIAS + 0x390000000UL)
+#define T2_IO			(IDENT_ADDR + GAMMA_BIAS + 0x3a0000000UL)
+#define T2_SPARSE_MEM		(IDENT_ADDR + GAMMA_BIAS + 0x200000000UL)
+#define T2_DENSE_MEM	        (IDENT_ADDR + GAMMA_BIAS + 0x3c0000000UL)
+
+#define T2_IOCSR		(IDENT_ADDR + GAMMA_BIAS + 0x38e000000UL)
+#define T2_CERR1		(IDENT_ADDR + GAMMA_BIAS + 0x38e000020UL)
+#define T2_CERR2		(IDENT_ADDR + GAMMA_BIAS + 0x38e000040UL)
+#define T2_CERR3		(IDENT_ADDR + GAMMA_BIAS + 0x38e000060UL)
+#define T2_PERR1		(IDENT_ADDR + GAMMA_BIAS + 0x38e000080UL)
+#define T2_PERR2		(IDENT_ADDR + GAMMA_BIAS + 0x38e0000a0UL)
+#define T2_PSCR			(IDENT_ADDR + GAMMA_BIAS + 0x38e0000c0UL)
+#define T2_HAE_1		(IDENT_ADDR + GAMMA_BIAS + 0x38e0000e0UL)
+#define T2_HAE_2		(IDENT_ADDR + GAMMA_BIAS + 0x38e000100UL)
+#define T2_HBASE		(IDENT_ADDR + GAMMA_BIAS + 0x38e000120UL)
+#define T2_WBASE1		(IDENT_ADDR + GAMMA_BIAS + 0x38e000140UL)
+#define T2_WMASK1		(IDENT_ADDR + GAMMA_BIAS + 0x38e000160UL)
+#define T2_TBASE1		(IDENT_ADDR + GAMMA_BIAS + 0x38e000180UL)
+#define T2_WBASE2		(IDENT_ADDR + GAMMA_BIAS + 0x38e0001a0UL)
+#define T2_WMASK2		(IDENT_ADDR + GAMMA_BIAS + 0x38e0001c0UL)
+#define T2_TBASE2		(IDENT_ADDR + GAMMA_BIAS + 0x38e0001e0UL)
+#define T2_TLBBR		(IDENT_ADDR + GAMMA_BIAS + 0x38e000200UL)
+#define T2_IVR			(IDENT_ADDR + GAMMA_BIAS + 0x38e000220UL)
+#define T2_HAE_3		(IDENT_ADDR + GAMMA_BIAS + 0x38e000240UL)
+#define T2_HAE_4		(IDENT_ADDR + GAMMA_BIAS + 0x38e000260UL)
+
+/* The CSRs below are T3/T4 only */
+#define T2_WBASE3		(IDENT_ADDR + GAMMA_BIAS + 0x38e000280UL)
+#define T2_WMASK3		(IDENT_ADDR + GAMMA_BIAS + 0x38e0002a0UL)
+#define T2_TBASE3		(IDENT_ADDR + GAMMA_BIAS + 0x38e0002c0UL)
+
+#define T2_TDR0			(IDENT_ADDR + GAMMA_BIAS + 0x38e000300UL)
+#define T2_TDR1			(IDENT_ADDR + GAMMA_BIAS + 0x38e000320UL)
+#define T2_TDR2			(IDENT_ADDR + GAMMA_BIAS + 0x38e000340UL)
+#define T2_TDR3			(IDENT_ADDR + GAMMA_BIAS + 0x38e000360UL)
+#define T2_TDR4			(IDENT_ADDR + GAMMA_BIAS + 0x38e000380UL)
+#define T2_TDR5			(IDENT_ADDR + GAMMA_BIAS + 0x38e0003a0UL)
+#define T2_TDR6			(IDENT_ADDR + GAMMA_BIAS + 0x38e0003c0UL)
+#define T2_TDR7			(IDENT_ADDR + GAMMA_BIAS + 0x38e0003e0UL)
+
+#define T2_WBASE4		(IDENT_ADDR + GAMMA_BIAS + 0x38e000400UL)
+#define T2_WMASK4		(IDENT_ADDR + GAMMA_BIAS + 0x38e000420UL)
+#define T2_TBASE4		(IDENT_ADDR + GAMMA_BIAS + 0x38e000440UL)
+
+#define T2_AIR			(IDENT_ADDR + GAMMA_BIAS + 0x38e000460UL)
+#define T2_VAR			(IDENT_ADDR + GAMMA_BIAS + 0x38e000480UL)
+#define T2_DIR			(IDENT_ADDR + GAMMA_BIAS + 0x38e0004a0UL)
+#define T2_ICE			(IDENT_ADDR + GAMMA_BIAS + 0x38e0004c0UL)
+
+#define T2_HAE_ADDRESS		T2_HAE_1
+
+/*  T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to
+ 3.8fff.ffff
+ *
+ *  +--------------+ 3 8000 0000
+ *  | CPU 0 CSRs   |
+ *  +--------------+ 3 8100 0000
+ *  | CPU 1 CSRs   |
+ *  +--------------+ 3 8200 0000
+ *  | CPU 2 CSRs   |
+ *  +--------------+ 3 8300 0000
+ *  | CPU 3 CSRs   |
+ *  +--------------+ 3 8400 0000
+ *  | CPU Reserved |
+ *  +--------------+ 3 8700 0000
+ *  | Mem Reserved |
+ *  +--------------+ 3 8800 0000
+ *  | Mem 0 CSRs   |
+ *  +--------------+ 3 8900 0000
+ *  | Mem 1 CSRs   |
+ *  +--------------+ 3 8a00 0000
+ *  | Mem 2 CSRs   |
+ *  +--------------+ 3 8b00 0000
+ *  | Mem 3 CSRs   |
+ *  +--------------+ 3 8c00 0000
+ *  | Mem Reserved |
+ *  +--------------+ 3 8e00 0000
+ *  | PCI Bridge   |
+ *  +--------------+ 3 8f00 0000
+ *  | Expansion IO |
+ *  +--------------+ 3 9000 0000
+ *
+ *
+ */
+#define T2_CPU0_BASE            (IDENT_ADDR + GAMMA_BIAS + 0x380000000L)
+#define T2_CPU1_BASE            (IDENT_ADDR + GAMMA_BIAS + 0x381000000L)
+#define T2_CPU2_BASE            (IDENT_ADDR + GAMMA_BIAS + 0x382000000L)
+#define T2_CPU3_BASE            (IDENT_ADDR + GAMMA_BIAS + 0x383000000L)
+
+#define T2_CPUn_BASE(n)		(T2_CPU0_BASE + (((n)&3) * 0x001000000L))
+
+#define T2_MEM0_BASE            (IDENT_ADDR + GAMMA_BIAS + 0x388000000L)
+#define T2_MEM1_BASE            (IDENT_ADDR + GAMMA_BIAS + 0x389000000L)
+#define T2_MEM2_BASE            (IDENT_ADDR + GAMMA_BIAS + 0x38a000000L)
+#define T2_MEM3_BASE            (IDENT_ADDR + GAMMA_BIAS + 0x38b000000L)
+
+
+/*
+ * Sable CPU Module CSRS
+ *
+ * These are CSRs for hardware other than the CPU chip on the CPU module.
+ * The CPU module has Backup Cache control logic, Cbus control logic, and
+ * interrupt control logic on it.  There is a duplicate tag store to speed
+ * up maintaining cache coherency.
+ */
+
+struct sable_cpu_csr {
+  unsigned long bcc;     long fill_00[3]; /* Backup Cache Control */
+  unsigned long bcce;    long fill_01[3]; /* Backup Cache Correctable Error */
+  unsigned long bccea;   long fill_02[3]; /* B-Cache Corr Err Address Latch */
+  unsigned long bcue;    long fill_03[3]; /* B-Cache Uncorrectable Error */
+  unsigned long bcuea;   long fill_04[3]; /* B-Cache Uncorr Err Addr Latch */
+  unsigned long dter;    long fill_05[3]; /* Duplicate Tag Error */
+  unsigned long cbctl;   long fill_06[3]; /* CBus Control */
+  unsigned long cbe;     long fill_07[3]; /* CBus Error */
+  unsigned long cbeal;   long fill_08[3]; /* CBus Error Addr Latch low */
+  unsigned long cbeah;   long fill_09[3]; /* CBus Error Addr Latch high */
+  unsigned long pmbx;    long fill_10[3]; /* Processor Mailbox */
+  unsigned long ipir;    long fill_11[3]; /* Inter-Processor Int Request */
+  unsigned long sic;     long fill_12[3]; /* System Interrupt Clear */
+  unsigned long adlk;    long fill_13[3]; /* Address Lock (LDxL/STxC) */
+  unsigned long madrl;   long fill_14[3]; /* CBus Miss Address */
+  unsigned long rev;     long fill_15[3]; /* CMIC Revision */
+};
+
+/*
+ * Data structure for handling T2 machine checks:
+ */
+struct el_t2_frame_header {
+	unsigned int	elcf_fid;	/* Frame ID (from above) */
+	unsigned int	elcf_size;	/* Size of frame in bytes */
+};
+
+struct el_t2_procdata_mcheck {
+	unsigned long	elfmc_paltemp[32];	/* PAL TEMP REGS. */
+	/* EV4-specific fields */
+	unsigned long	elfmc_exc_addr;	/* Addr of excepting insn. */
+	unsigned long	elfmc_exc_sum;	/* Summary of arith traps. */
+	unsigned long	elfmc_exc_mask;	/* Exception mask (from exc_sum). */
+	unsigned long	elfmc_iccsr;	/* IBox hardware enables. */
+	unsigned long	elfmc_pal_base;	/* Base address for PALcode. */
+	unsigned long	elfmc_hier;	/* Hardware Interrupt Enable. */
+	unsigned long	elfmc_hirr;	/* Hardware Interrupt Request. */
+	unsigned long	elfmc_mm_csr;	/* D-stream fault info. */
+	unsigned long	elfmc_dc_stat;	/* D-cache status (ECC/Parity Err). */
+	unsigned long	elfmc_dc_addr;	/* EV3 Phys Addr for ECC/DPERR. */
+	unsigned long	elfmc_abox_ctl;	/* ABox Control Register. */
+	unsigned long	elfmc_biu_stat;	/* BIU Status. */
+	unsigned long	elfmc_biu_addr;	/* BUI Address. */
+	unsigned long	elfmc_biu_ctl;	/* BIU Control. */
+	unsigned long	elfmc_fill_syndrome; /* For correcting ECC errors. */
+	unsigned long	elfmc_fill_addr;/* Cache block which was being read. */
+	unsigned long	elfmc_va;	/* Effective VA of fault or miss. */
+	unsigned long	elfmc_bc_tag;	/* Backup Cache Tag Probe Results. */
+};
+
+/*
+ * Sable processor specific Machine Check Data segment.
+ */
+
+struct el_t2_logout_header {
+	unsigned int	elfl_size;	/* size in bytes of logout area. */
+	unsigned int	elfl_sbz1:31;	/* Should be zero. */
+	unsigned int	elfl_retry:1;	/* Retry flag. */
+	unsigned int	elfl_procoffset; /* Processor-specific offset. */
+	unsigned int	elfl_sysoffset;	 /* Offset of system-specific. */
+	unsigned int	elfl_error_type;	/* PAL error type code. */
+	unsigned int	elfl_frame_rev;		/* PAL Frame revision. */
+};
+struct el_t2_sysdata_mcheck {
+	unsigned long    elcmc_bcc;	      /* CSR 0 */
+	unsigned long    elcmc_bcce;	      /* CSR 1 */
+	unsigned long    elcmc_bccea;      /* CSR 2 */
+	unsigned long    elcmc_bcue;	      /* CSR 3 */
+	unsigned long    elcmc_bcuea;      /* CSR 4 */
+	unsigned long    elcmc_dter;	      /* CSR 5 */
+	unsigned long    elcmc_cbctl;      /* CSR 6 */
+	unsigned long    elcmc_cbe;	      /* CSR 7 */
+	unsigned long    elcmc_cbeal;      /* CSR 8 */
+	unsigned long    elcmc_cbeah;      /* CSR 9 */
+	unsigned long    elcmc_pmbx;	      /* CSR 10 */
+	unsigned long    elcmc_ipir;	      /* CSR 11 */
+	unsigned long    elcmc_sic;	      /* CSR 12 */
+	unsigned long    elcmc_adlk;	      /* CSR 13 */
+	unsigned long    elcmc_madrl;      /* CSR 14 */
+	unsigned long    elcmc_crrev4;     /* CSR 15 */
+};
+
+/*
+ * Sable memory error frame - sable pfms section 3.42
+ */
+struct el_t2_data_memory {
+	struct	el_t2_frame_header elcm_hdr;	/* ID$MEM-FERR = 0x08 */
+	unsigned int  elcm_module;	/* Module id. */
+	unsigned int  elcm_res04;	/* Reserved. */
+	unsigned long elcm_merr;	/* CSR0: Error Reg 1. */
+	unsigned long elcm_mcmd1;	/* CSR1: Command Trap 1. */
+	unsigned long elcm_mcmd2;	/* CSR2: Command Trap 2. */
+	unsigned long elcm_mconf;	/* CSR3: Configuration. */
+	unsigned long elcm_medc1;	/* CSR4: EDC Status 1. */
+	unsigned long elcm_medc2;	/* CSR5: EDC Status 2. */
+	unsigned long elcm_medcc;	/* CSR6: EDC Control. */
+	unsigned long elcm_msctl;	/* CSR7: Stream Buffer Control. */
+	unsigned long elcm_mref;	/* CSR8: Refresh Control. */
+	unsigned long elcm_filter;	/* CSR9: CRD Filter Control. */
+};
+
+
+/*
+ * Sable other CPU error frame - sable pfms section 3.43
+ */
+struct el_t2_data_other_cpu {
+	short	      elco_cpuid;	/* CPU ID */
+	short	      elco_res02[3];
+	unsigned long elco_bcc;	/* CSR 0 */
+	unsigned long elco_bcce;	/* CSR 1 */
+	unsigned long elco_bccea;	/* CSR 2 */
+	unsigned long elco_bcue;	/* CSR 3 */
+	unsigned long elco_bcuea;	/* CSR 4 */
+	unsigned long elco_dter;	/* CSR 5 */
+	unsigned long elco_cbctl;	/* CSR 6 */
+	unsigned long elco_cbe;	/* CSR 7 */
+	unsigned long elco_cbeal;	/* CSR 8 */
+	unsigned long elco_cbeah;	/* CSR 9 */
+	unsigned long elco_pmbx;	/* CSR 10 */
+	unsigned long elco_ipir;	/* CSR 11 */
+	unsigned long elco_sic;	/* CSR 12 */
+	unsigned long elco_adlk;	/* CSR 13 */
+	unsigned long elco_madrl;	/* CSR 14 */
+	unsigned long elco_crrev4;	/* CSR 15 */
+};
+
+/*
+ * Sable other CPU error frame - sable pfms section 3.44
+ */
+struct el_t2_data_t2{
+	struct el_t2_frame_header elct_hdr;	/* ID$T2-FRAME */
+	unsigned long elct_iocsr;	/* IO Control and Status Register */
+	unsigned long elct_cerr1;	/* Cbus Error Register 1 */
+	unsigned long elct_cerr2;	/* Cbus Error Register 2 */
+	unsigned long elct_cerr3;	/* Cbus Error Register 3 */
+	unsigned long elct_perr1;	/* PCI Error Register 1 */
+	unsigned long elct_perr2;	/* PCI Error Register 2 */
+	unsigned long elct_hae0_1;	/* High Address Extension Register 1 */
+	unsigned long elct_hae0_2;	/* High Address Extension Register 2 */
+	unsigned long elct_hbase;	/* High Base Register */
+	unsigned long elct_wbase1;	/* Window Base Register 1 */
+	unsigned long elct_wmask1;	/* Window Mask Register 1 */
+	unsigned long elct_tbase1;	/* Translated Base Register 1 */
+	unsigned long elct_wbase2;	/* Window Base Register 2 */
+	unsigned long elct_wmask2;	/* Window Mask Register 2 */
+	unsigned long elct_tbase2;	/* Translated Base Register 2 */
+	unsigned long elct_tdr0;	/* TLB Data Register 0 */
+	unsigned long elct_tdr1;	/* TLB Data Register 1 */
+	unsigned long elct_tdr2;	/* TLB Data Register 2 */
+	unsigned long elct_tdr3;	/* TLB Data Register 3 */
+	unsigned long elct_tdr4;	/* TLB Data Register 4 */
+	unsigned long elct_tdr5;	/* TLB Data Register 5 */
+	unsigned long elct_tdr6;	/* TLB Data Register 6 */
+	unsigned long elct_tdr7;	/* TLB Data Register 7 */
+};
+
+/*
+ * Sable error log data structure - sable pfms section 3.40
+ */
+struct el_t2_data_corrected {
+	unsigned long elcpb_biu_stat;
+	unsigned long elcpb_biu_addr;
+	unsigned long elcpb_biu_ctl;
+	unsigned long elcpb_fill_syndrome;
+	unsigned long elcpb_fill_addr;
+	unsigned long elcpb_bc_tag;
+};
+
+/*
+ * Sable error log data structure
+ * Note there are 4 memory slots on sable (see t2.h)
+ */
+struct el_t2_frame_mcheck {
+	struct el_t2_frame_header elfmc_header;	/* ID$P-FRAME_MCHECK */
+	struct el_t2_logout_header elfmc_hdr;
+	struct el_t2_procdata_mcheck elfmc_procdata;
+	struct el_t2_sysdata_mcheck elfmc_sysdata;
+	struct el_t2_data_t2 elfmc_t2data;
+	struct el_t2_data_memory elfmc_memdata[4];
+	struct el_t2_frame_header elfmc_footer;	/* empty */
+};
+
+
+/*
+ * Sable error log data structures on memory errors
+ */
+struct el_t2_frame_corrected {
+	struct el_t2_frame_header elfcc_header;	/* ID$P-BC-COR */
+	struct el_t2_logout_header elfcc_hdr;
+	struct el_t2_data_corrected elfcc_procdata;
+/*	struct el_t2_data_t2 elfcc_t2data;		*/
+/*	struct el_t2_data_memory elfcc_memdata[4];	*/
+	struct el_t2_frame_header elfcc_footer;	/* empty */
+};
+
+
+#ifdef __KERNEL__
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
+/*
+ * I/O functions:
+ *
+ * T2 (the core logic PCI/memory support chipset for the SABLE
+ * series of processors uses a sparse address mapping scheme to
+ * get at PCI memory and I/O.
+ */
+
+#define vip	volatile int *
+#define vuip	volatile unsigned int *
+
+static inline u8 t2_inb(unsigned long addr)
+{
+	long result = *(vip) ((addr << 5) + T2_IO + 0x00);
+	return __kernel_extbl(result, addr & 3);
+}
+
+static inline void t2_outb(u8 b, unsigned long addr)
+{
+	unsigned long w;
+
+	w = __kernel_insbl(b, addr & 3);
+	*(vuip) ((addr << 5) + T2_IO + 0x00) = w;
+	mb();
+}
+
+static inline u16 t2_inw(unsigned long addr)
+{
+	long result = *(vip) ((addr << 5) + T2_IO + 0x08);
+	return __kernel_extwl(result, addr & 3);
+}
+
+static inline void t2_outw(u16 b, unsigned long addr)
+{
+	unsigned long w;
+
+	w = __kernel_inswl(b, addr & 3);
+	*(vuip) ((addr << 5) + T2_IO + 0x08) = w;
+	mb();
+}
+
+static inline u32 t2_inl(unsigned long addr)
+{
+	return *(vuip) ((addr << 5) + T2_IO + 0x18);
+}
+
+static inline void t2_outl(u32 b, unsigned long addr)
+{
+	*(vuip) ((addr << 5) + T2_IO + 0x18) = b;
+	mb();
+}
+
+
+/*
+ * Memory functions.
+ *
+ * For reading and writing 8 and 16 bit quantities we need to
+ * go through one of the three sparse address mapping regions
+ * and use the HAE_MEM CSR to provide some bits of the address.
+ * The following few routines use only sparse address region 1
+ * which gives 1Gbyte of accessible space which relates exactly
+ * to the amount of PCI memory mapping *into* system address space.
+ * See p 6-17 of the specification but it looks something like this:
+ *
+ * 21164 Address:
+ *
+ *          3         2         1
+ * 9876543210987654321098765432109876543210
+ * 1ZZZZ0.PCI.QW.Address............BBLL
+ *
+ * ZZ = SBZ
+ * BB = Byte offset
+ * LL = Transfer length
+ *
+ * PCI Address:
+ *
+ * 3         2         1
+ * 10987654321098765432109876543210
+ * HHH....PCI.QW.Address........ 00
+ *
+ * HHH = 31:29 HAE_MEM CSR
+ *
+ */
+
+#define t2_set_hae { \
+	msb = addr  >> 27; \
+	addr &= T2_MEM_R1_MASK; \
+	set_hae(msb); \
+}
+
+static spinlock_t t2_hae_lock = SPIN_LOCK_UNLOCKED;
+
+__EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long result, msb;
+	unsigned long flags;
+	spin_lock_irqsave(&t2_hae_lock, flags);
+
+	t2_set_hae;
+
+	result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00);
+	spin_unlock_irqrestore(&t2_hae_lock, flags);
+	return __kernel_extbl(result, addr & 3);
+}
+
+__EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long result, msb;
+	unsigned long flags;
+	spin_lock_irqsave(&t2_hae_lock, flags);
+
+	t2_set_hae;
+
+	result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
+	spin_unlock_irqrestore(&t2_hae_lock, flags);
+	return __kernel_extwl(result, addr & 3);
+}
+
+/*
+ * On SABLE with T2, we must use SPARSE memory even for 32-bit access,
+ * because we cannot access all of DENSE without changing its HAE.
+ */
+__EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long result, msb;
+	unsigned long flags;
+	spin_lock_irqsave(&t2_hae_lock, flags);
+
+	t2_set_hae;
+
+	result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
+	spin_unlock_irqrestore(&t2_hae_lock, flags);
+	return result & 0xffffffffUL;
+}
+
+__EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long r0, r1, work, msb;
+	unsigned long flags;
+	spin_lock_irqsave(&t2_hae_lock, flags);
+
+	t2_set_hae;
+
+	work = (addr << 5) + T2_SPARSE_MEM + 0x18;
+	r0 = *(vuip)(work);
+	r1 = *(vuip)(work + (4 << 5));
+	spin_unlock_irqrestore(&t2_hae_lock, flags);
+	return r1 << 32 | r0;
+}
+
+__EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long msb, w;
+	unsigned long flags;
+	spin_lock_irqsave(&t2_hae_lock, flags);
+
+	t2_set_hae;
+
+	w = __kernel_insbl(b, addr & 3);
+	*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w;
+	spin_unlock_irqrestore(&t2_hae_lock, flags);
+}
+
+__EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long msb, w;
+	unsigned long flags;
+	spin_lock_irqsave(&t2_hae_lock, flags);
+
+	t2_set_hae;
+
+	w = __kernel_inswl(b, addr & 3);
+	*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w;
+	spin_unlock_irqrestore(&t2_hae_lock, flags);
+}
+
+/*
+ * On SABLE with T2, we must use SPARSE memory even for 32-bit access,
+ * because we cannot access all of DENSE without changing its HAE.
+ */
+__EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long msb;
+	unsigned long flags;
+	spin_lock_irqsave(&t2_hae_lock, flags);
+
+	t2_set_hae;
+
+	*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
+	spin_unlock_irqrestore(&t2_hae_lock, flags);
+}
+
+__EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long msb, work;
+	unsigned long flags;
+	spin_lock_irqsave(&t2_hae_lock, flags);
+
+	t2_set_hae;
+
+	work = (addr << 5) + T2_SPARSE_MEM + 0x18;
+	*(vuip)work = b;
+	*(vuip)(work + (4 << 5)) = b >> 32;
+	spin_unlock_irqrestore(&t2_hae_lock, flags);
+}
+
+__EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr)
+{
+	return (void __iomem *)(addr + T2_IO);
+}
+
+__EXTERN_INLINE void __iomem *t2_ioremap(unsigned long addr, 
+					 unsigned long size)
+{
+	return (void __iomem *)(addr + T2_DENSE_MEM);
+}
+
+__EXTERN_INLINE int t2_is_ioaddr(unsigned long addr)
+{
+	return (long)addr >= 0;
+}
+
+__EXTERN_INLINE int t2_is_mmio(const volatile void __iomem *addr)
+{
+	return (unsigned long)addr >= T2_DENSE_MEM;
+}
+
+/* New-style ioread interface.  The mmio routines are so ugly for T2 that
+   it doesn't make sense to merge the pio and mmio routines.  */
+
+#define IOPORT(OS, NS)							\
+__EXTERN_INLINE unsigned int t2_ioread##NS(void __iomem *xaddr)		\
+{									\
+	if (t2_is_mmio(xaddr))						\
+		return t2_read##OS(xaddr - T2_DENSE_MEM);		\
+	else								\
+		return t2_in##OS((unsigned long)xaddr - T2_IO);		\
+}									\
+__EXTERN_INLINE void t2_iowrite##NS(u##NS b, void __iomem *xaddr)	\
+{									\
+	if (t2_is_mmio(xaddr))						\
+		t2_write##OS(b, xaddr - T2_DENSE_MEM);			\
+	else								\
+		t2_out##OS(b, (unsigned long)xaddr - T2_IO);		\
+}
+
+IOPORT(b, 8)
+IOPORT(w, 16)
+IOPORT(l, 32)
+
+#undef IOPORT
+
+#undef vip
+#undef vuip
+
+#undef __IO_PREFIX
+#define __IO_PREFIX		t2
+#define t2_trivial_rw_bw	0
+#define t2_trivial_rw_lq	0
+#define t2_trivial_io_bw	0
+#define t2_trivial_io_lq	0
+#define t2_trivial_iounmap	1
+#include <asm/io_trivial.h>
+
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_T2__H__ */
diff --git a/include/asm-alpha/core_titan.h b/include/asm-alpha/core_titan.h
new file mode 100644
index 0000000..a64ccbf
--- /dev/null
+++ b/include/asm-alpha/core_titan.h
@@ -0,0 +1,415 @@
+#ifndef __ALPHA_TITAN__H__
+#define __ALPHA_TITAN__H__
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <asm/compiler.h>
+
+/*
+ * TITAN is the internal names for a core logic chipset which provides
+ * memory controller and PCI/AGP access for 21264 based systems.
+ *
+ * This file is based on:
+ *
+ * Titan Chipset Engineering Specification
+ * Revision 0.12
+ * 13 July 1999
+ *
+ */
+
+/* XXX: Do we need to conditionalize on this?  */
+#ifdef USE_48_BIT_KSEG
+#define TI_BIAS 0x80000000000UL
+#else
+#define TI_BIAS 0x10000000000UL
+#endif
+
+/*
+ * CChip, DChip, and PChip registers
+ */
+
+typedef struct {
+	volatile unsigned long csr __attribute__((aligned(64)));
+} titan_64;
+
+typedef struct {
+	titan_64	csc;
+	titan_64	mtr;
+	titan_64	misc;
+	titan_64	mpd;
+	titan_64	aar0;
+	titan_64	aar1;
+	titan_64	aar2;
+	titan_64	aar3;
+	titan_64	dim0;
+	titan_64	dim1;
+	titan_64	dir0;
+	titan_64	dir1;
+	titan_64	drir;
+	titan_64	prben;
+	titan_64	iic0;
+	titan_64	iic1;
+	titan_64	mpr0;
+	titan_64	mpr1;
+	titan_64	mpr2;
+	titan_64	mpr3;
+	titan_64	rsvd[2];
+	titan_64	ttr;
+	titan_64	tdr;
+	titan_64	dim2;
+	titan_64	dim3;
+	titan_64	dir2;
+	titan_64	dir3;
+	titan_64	iic2;
+	titan_64	iic3;
+	titan_64	pwr;
+	titan_64	reserved[17];
+	titan_64	cmonctla;
+	titan_64	cmonctlb;
+	titan_64	cmoncnt01;
+	titan_64	cmoncnt23;
+	titan_64	cpen;
+} titan_cchip;
+
+typedef struct {
+	titan_64	dsc;
+	titan_64	str;
+	titan_64	drev;
+	titan_64	dsc2;
+} titan_dchip;
+
+typedef struct {
+	titan_64	wsba[4];
+	titan_64	wsm[4];
+	titan_64	tba[4];
+	titan_64	pctl;
+	titan_64	plat;
+	titan_64	reserved0[2];
+	union {
+		struct {
+			titan_64	serror;
+			titan_64	serren;
+			titan_64	serrset;
+			titan_64	reserved0;
+			titan_64	gperror;
+			titan_64	gperren;
+			titan_64	gperrset;
+			titan_64	reserved1;
+			titan_64	gtlbiv;
+			titan_64	gtlbia;
+			titan_64	reserved2[2];
+			titan_64	sctl;
+			titan_64	reserved3[3];
+		} g;
+		struct {
+			titan_64	agperror;
+			titan_64	agperren;
+			titan_64	agperrset;
+			titan_64	agplastwr;
+			titan_64	aperror;
+			titan_64	aperren;
+			titan_64	aperrset;
+			titan_64	reserved0;
+			titan_64	atlbiv;
+			titan_64	atlbia;
+			titan_64	reserved1[6];
+		} a;
+	} port_specific;
+	titan_64	sprst;
+	titan_64	reserved1[31];
+} titan_pachip_port;
+
+typedef struct {
+	titan_pachip_port	g_port;
+	titan_pachip_port	a_port;
+} titan_pachip;
+
+#define TITAN_cchip	((titan_cchip  *)(IDENT_ADDR+TI_BIAS+0x1A0000000UL))
+#define TITAN_dchip    	((titan_dchip  *)(IDENT_ADDR+TI_BIAS+0x1B0000800UL))
+#define TITAN_pachip0 	((titan_pachip *)(IDENT_ADDR+TI_BIAS+0x180000000UL))
+#define TITAN_pachip1 	((titan_pachip *)(IDENT_ADDR+TI_BIAS+0x380000000UL))
+extern unsigned TITAN_agp;
+extern int TITAN_bootcpu;
+
+/*
+ * TITAN PA-chip Window Space Base Address register.
+ * (WSBA[0-2])
+ */
+#define wsba_m_ena 0x1                
+#define wsba_m_sg 0x2
+#define wsba_m_addr 0xFFF00000  
+#define wmask_k_sz1gb 0x3FF00000                   
+union TPAchipWSBA {
+	struct  {
+		unsigned wsba_v_ena : 1;
+		unsigned wsba_v_sg : 1;
+		unsigned wsba_v_rsvd1 : 18;
+		unsigned wsba_v_addr : 12;
+		unsigned wsba_v_rsvd2 : 32;
+        } wsba_r_bits;
+	int wsba_q_whole [2];
+};
+
+/*
+ * TITAN PA-chip Control Register
+ * This definition covers both the G-Port GPCTL and the A-PORT APCTL.
+ * Bits <51:0> are the same in both cases. APCTL<63:52> are only 
+ * applicable to AGP.
+ */
+#define pctl_m_fbtb 			0x00000001
+#define pctl_m_thdis 			0x00000002
+#define pctl_m_chaindis 		0x00000004
+#define pctl_m_tgtlat 			0x00000018
+#define pctl_m_hole  	  		0x00000020
+#define pctl_m_mwin 	  		0x00000040
+#define pctl_m_arbena 	  		0x00000080
+#define pctl_m_prigrp 	  		0x0000FF00
+#define pctl_m_ppri 	  		0x00010000
+#define pctl_m_pcispd66  		0x00020000
+#define pctl_m_cngstlt	  		0x003C0000
+#define pctl_m_ptpdesten 		0x3FC00000
+#define pctl_m_dpcen			0x40000000
+#define pctl_m_apcen		0x0000000080000000UL
+#define pctl_m_dcrtv		0x0000000300000000UL
+#define pctl_m_en_stepping	0x0000000400000000UL
+#define apctl_m_rsvd1		0x000FFFF800000000UL
+#define apctl_m_agp_rate	0x0030000000000000UL
+#define apctl_m_agp_sba_en	0x0040000000000000UL
+#define apctl_m_agp_en		0x0080000000000000UL
+#define apctl_m_rsvd2		0x0100000000000000UL
+#define apctl_m_agp_present	0x0200000000000000UL
+#define apctl_agp_hp_rd		0x1C00000000000000UL
+#define apctl_agp_lp_rd		0xE000000000000000UL
+#define gpctl_m_rsvd		0xFFFFFFF800000000UL
+union TPAchipPCTL {
+	struct {
+		unsigned pctl_v_fbtb : 1;		/* A/G [0]     */
+		unsigned pctl_v_thdis : 1;		/* A/G [1]     */
+		unsigned pctl_v_chaindis : 1;		/* A/G [2]     */
+		unsigned pctl_v_tgtlat : 2;		/* A/G [4:3]   */
+		unsigned pctl_v_hole : 1;		/* A/G [5]     */
+		unsigned pctl_v_mwin : 1;		/* A/G [6]     */
+		unsigned pctl_v_arbena : 1;		/* A/G [7]     */
+		unsigned pctl_v_prigrp : 8;		/* A/G [15:8]  */
+		unsigned pctl_v_ppri : 1;		/* A/G [16]    */
+		unsigned pctl_v_pcispd66 : 1;		/* A/G [17]    */
+		unsigned pctl_v_cngstlt : 4;		/* A/G [21:18] */
+		unsigned pctl_v_ptpdesten : 8;		/* A/G [29:22] */
+		unsigned pctl_v_dpcen : 1;		/* A/G [30]    */
+		unsigned pctl_v_apcen : 1;		/* A/G [31]    */
+		unsigned pctl_v_dcrtv : 2;		/* A/G [33:32] */
+		unsigned pctl_v_en_stepping :1;		/* A/G [34]    */
+		unsigned apctl_v_rsvd1 : 17;		/* A   [51:35] */
+		unsigned apctl_v_agp_rate : 2;		/* A   [53:52] */
+		unsigned apctl_v_agp_sba_en : 1;	/* A   [54]    */
+		unsigned apctl_v_agp_en : 1;		/* A   [55]    */
+		unsigned apctl_v_rsvd2 : 1;		/* A   [56]    */
+		unsigned apctl_v_agp_present : 1;	/* A   [57]    */
+		unsigned apctl_v_agp_hp_rd : 3;		/* A   [60:58] */
+		unsigned apctl_v_agp_lp_rd : 3;		/* A   [63:61] */
+	} pctl_r_bits;
+	unsigned int pctl_l_whole [2];
+	unsigned long pctl_q_whole;
+};
+
+/*
+ * SERROR / SERREN / SERRSET
+ */
+union TPAchipSERR {
+	struct {
+		unsigned serr_v_lost_uecc : 1;		/* [0]		*/
+		unsigned serr_v_uecc : 1;		/* [1]  	*/
+		unsigned serr_v_cre : 1;		/* [2]		*/
+		unsigned serr_v_nxio : 1;		/* [3]		*/
+		unsigned serr_v_lost_cre : 1;		/* [4]		*/
+		unsigned serr_v_rsvd0 : 10;		/* [14:5]	*/
+		unsigned serr_v_addr : 32;		/* [46:15]	*/
+		unsigned serr_v_rsvd1 : 5;		/* [51:47]	*/
+		unsigned serr_v_source : 2;		/* [53:52]	*/
+		unsigned serr_v_cmd : 2;		/* [55:54]	*/
+		unsigned serr_v_syn : 8;		/* [63:56]	*/
+	} serr_r_bits;
+	unsigned int serr_l_whole[2];
+	unsigned long serr_q_whole;
+};
+
+/*
+ * GPERROR / APERROR / GPERREN / APERREN / GPERRSET / APERRSET
+ */
+union TPAchipPERR {
+	struct {
+		unsigned long perr_v_lost : 1;	     	/* [0]		*/
+		unsigned long perr_v_serr : 1;		/* [1]		*/
+		unsigned long perr_v_perr : 1;		/* [2]		*/
+		unsigned long perr_v_dcrto : 1;		/* [3]		*/
+		unsigned long perr_v_sge : 1;		/* [4]		*/
+		unsigned long perr_v_ape : 1;		/* [5]		*/
+		unsigned long perr_v_ta : 1;		/* [6]		*/
+		unsigned long perr_v_dpe : 1;		/* [7]		*/
+		unsigned long perr_v_nds : 1;		/* [8]		*/
+		unsigned long perr_v_iptpr : 1;		/* [9]		*/
+		unsigned long perr_v_iptpw : 1;		/* [10] 	*/
+		unsigned long perr_v_rsvd0 : 3;		/* [13:11]	*/
+		unsigned long perr_v_addr : 33;		/* [46:14]	*/
+		unsigned long perr_v_dac : 1;		/* [47]		*/
+		unsigned long perr_v_mwin : 1;		/* [48]		*/
+		unsigned long perr_v_rsvd1 : 3;		/* [51:49]	*/
+		unsigned long perr_v_cmd : 4;		/* [55:52]	*/
+		unsigned long perr_v_rsvd2 : 8;		/* [63:56]	*/
+	} perr_r_bits;
+	unsigned int perr_l_whole[2];
+	unsigned long perr_q_whole;
+};
+
+/*
+ * AGPERROR / AGPERREN / AGPERRSET
+ */
+union TPAchipAGPERR {
+	struct {
+		unsigned agperr_v_lost : 1;		/* [0]		*/
+		unsigned agperr_v_lpqfull : 1;		/* [1]		*/
+		unsigned apgerr_v_hpqfull : 1;		/* [2]		*/
+		unsigned agperr_v_rescmd : 1;		/* [3]		*/
+		unsigned agperr_v_ipte : 1;		/* [4]		*/
+		unsigned agperr_v_ptp :	1;      	/* [5]		*/
+		unsigned agperr_v_nowindow : 1;		/* [6]		*/
+		unsigned agperr_v_rsvd0 : 8;		/* [14:7]	*/
+		unsigned agperr_v_addr : 32;		/* [46:15]	*/
+		unsigned agperr_v_rsvd1 : 1;		/* [47]		*/
+		unsigned agperr_v_dac : 1;		/* [48]		*/
+		unsigned agperr_v_mwin : 1;		/* [49]		*/
+		unsigned agperr_v_cmd : 3;		/* [52:50]	*/
+		unsigned agperr_v_length : 6;		/* [58:53]	*/
+		unsigned agperr_v_fence : 1;		/* [59]		*/
+		unsigned agperr_v_rsvd2 : 4;		/* [63:60]	*/
+	} agperr_r_bits;
+	unsigned int agperr_l_whole[2];
+	unsigned long agperr_q_whole;
+};
+/*
+ * Memory spaces:
+ * Hose numbers are assigned as follows:
+ *		0 - pachip 0 / G Port
+ *		1 - pachip 1 / G Port
+ * 		2 - pachip 0 / A Port
+ *      	3 - pachip 1 / A Port
+ */
+#define TITAN_HOSE_SHIFT       (33) 
+#define TITAN_HOSE(h)		(((unsigned long)(h)) << TITAN_HOSE_SHIFT)
+#define TITAN_BASE		(IDENT_ADDR + TI_BIAS)
+#define TITAN_MEM(h)	     	(TITAN_BASE+TITAN_HOSE(h)+0x000000000UL)
+#define _TITAN_IACK_SC(h)    	(TITAN_BASE+TITAN_HOSE(h)+0x1F8000000UL)
+#define TITAN_IO(h)	     	(TITAN_BASE+TITAN_HOSE(h)+0x1FC000000UL)
+#define TITAN_CONF(h)	     	(TITAN_BASE+TITAN_HOSE(h)+0x1FE000000UL)
+
+#define TITAN_HOSE_MASK		TITAN_HOSE(3)
+#define TITAN_IACK_SC	     	_TITAN_IACK_SC(0) /* hack! */
+
+/* 
+ * The canonical non-remaped I/O and MEM addresses have these values
+ * subtracted out.  This is arranged so that folks manipulating ISA
+ * devices can use their familiar numbers and have them map to bus 0.
+ */
+
+#define TITAN_IO_BIAS		TITAN_IO(0)
+#define TITAN_MEM_BIAS		TITAN_MEM(0)
+
+/* The IO address space is larger than 0xffff */
+#define TITAN_IO_SPACE		(TITAN_CONF(0) - TITAN_IO(0))
+
+/* TIG Space */
+#define TITAN_TIG_SPACE		(TITAN_BASE + 0x100000000UL)
+
+/* Offset between ram physical addresses and pci64 DAC bus addresses.  */
+/* ??? Just a guess.  Ought to confirm it hasn't been moved.  */
+#define TITAN_DAC_OFFSET	(1UL << 40)
+
+/*
+ * Data structure for handling TITAN machine checks:
+ */
+#define SCB_Q_SYSERR	0x620
+#define SCB_Q_PROCERR	0x630
+#define SCB_Q_SYSMCHK	0x660
+#define SCB_Q_PROCMCHK	0x670
+#define SCB_Q_SYSEVENT	0x680	/* environmental / system management */
+struct el_TITAN_sysdata_mcheck {
+	u64 summary;	/* 0x00 */
+	u64 c_dirx;	/* 0x08 */
+	u64 c_misc;	/* 0x10 */
+	u64 p0_serror;	/* 0x18 */
+	u64 p0_gperror; /* 0x20 */
+	u64 p0_aperror; /* 0x28 */
+	u64 p0_agperror;/* 0x30 */
+	u64 p1_serror;	/* 0x38 */
+	u64 p1_gperror; /* 0x40 */
+	u64 p1_aperror; /* 0x48 */
+	u64 p1_agperror;/* 0x50 */
+};
+
+/*
+ * System area for a privateer 680 environmental/system management mcheck 
+ */
+struct el_PRIVATEER_envdata_mcheck {
+	u64 summary;	/* 0x00 */
+	u64 c_dirx;	/* 0x08 */
+	u64 smir;	/* 0x10 */
+	u64 cpuir;	/* 0x18 */
+	u64 psir;	/* 0x20 */
+	u64 fault;	/* 0x28 */
+	u64 sys_doors;	/* 0x30 */
+	u64 temp_warn;	/* 0x38 */
+	u64 fan_ctrl;	/* 0x40 */
+	u64 code;	/* 0x48 */
+	u64 reserved;	/* 0x50 */
+};
+
+#ifdef __KERNEL__
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
+/*
+ * I/O functions:
+ *
+ * TITAN, a 21??? PCI/memory support chipset for the EV6 (21264)
+ * can only use linear accesses to get at PCI/AGP memory and I/O spaces.
+ */
+
+/*
+ * Memory functions.  all accesses are done through linear space.
+ */
+
+__EXTERN_INLINE void __iomem *titan_ioportmap(unsigned long addr)
+{
+	return (void __iomem *)(addr + TITAN_IO_BIAS);
+}
+
+extern void __iomem *titan_ioremap(unsigned long addr, unsigned long size);
+extern void titan_iounmap(volatile void __iomem *addr);
+
+__EXTERN_INLINE int titan_is_ioaddr(unsigned long addr)
+{
+	return addr >= TITAN_BASE;
+}
+
+extern int titan_is_mmio(const volatile void __iomem *addr);
+
+#undef __IO_PREFIX
+#define __IO_PREFIX		titan
+#define titan_trivial_rw_bw	1
+#define titan_trivial_rw_lq	1
+#define titan_trivial_io_bw	1
+#define titan_trivial_io_lq	1
+#define titan_trivial_iounmap	0
+#include <asm/io_trivial.h>
+
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_TITAN__H__ */
diff --git a/include/asm-alpha/core_tsunami.h b/include/asm-alpha/core_tsunami.h
new file mode 100644
index 0000000..44e635d
--- /dev/null
+++ b/include/asm-alpha/core_tsunami.h
@@ -0,0 +1,344 @@
+#ifndef __ALPHA_TSUNAMI__H__
+#define __ALPHA_TSUNAMI__H__
+
+#include <linux/types.h>
+#include <asm/compiler.h>
+
+/*
+ * TSUNAMI/TYPHOON are the internal names for the core logic chipset which
+ * provides memory controller and PCI access for the 21264 based systems.
+ *
+ * This file is based on:
+ *
+ * Tsunami System Programmers Manual
+ * Preliminary, Chapters 2-5
+ *
+ */
+
+/* XXX: Do we need to conditionalize on this?  */
+#ifdef USE_48_BIT_KSEG
+#define TS_BIAS 0x80000000000UL
+#else
+#define TS_BIAS 0x10000000000UL
+#endif
+
+/*
+ * CChip, DChip, and PChip registers
+ */
+
+typedef struct {
+	volatile unsigned long csr __attribute__((aligned(64)));
+} tsunami_64;
+
+typedef struct {
+	tsunami_64	csc;
+	tsunami_64	mtr;
+	tsunami_64	misc;
+	tsunami_64	mpd;
+	tsunami_64	aar0;
+	tsunami_64	aar1;
+	tsunami_64	aar2;
+	tsunami_64	aar3;
+	tsunami_64	dim0;
+	tsunami_64	dim1;
+	tsunami_64	dir0;
+	tsunami_64	dir1;
+	tsunami_64	drir;
+	tsunami_64	prben;
+	tsunami_64	iic;	/* a.k.a. iic0 */
+	tsunami_64	wdr;	/* a.k.a. iic1 */
+	tsunami_64	mpr0;
+	tsunami_64	mpr1;
+	tsunami_64	mpr2;
+	tsunami_64	mpr3;
+	tsunami_64	mctl;
+	tsunami_64	__pad1;
+	tsunami_64	ttr;
+	tsunami_64	tdr;
+	tsunami_64	dim2;
+	tsunami_64	dim3;
+	tsunami_64	dir2;
+	tsunami_64	dir3;
+	tsunami_64	iic2;
+	tsunami_64	iic3;
+} tsunami_cchip;
+
+typedef struct {
+	tsunami_64	dsc;
+	tsunami_64	str;
+	tsunami_64	drev;
+} tsunami_dchip;
+
+typedef struct {
+	tsunami_64	wsba[4];
+	tsunami_64	wsm[4];
+	tsunami_64	tba[4];
+	tsunami_64	pctl;
+	tsunami_64	plat;
+	tsunami_64	reserved;
+	tsunami_64	perror;
+	tsunami_64	perrmask;
+	tsunami_64	perrset;
+	tsunami_64	tlbiv;
+	tsunami_64	tlbia;
+	tsunami_64	pmonctl;
+	tsunami_64	pmoncnt;
+} tsunami_pchip;
+
+#define TSUNAMI_cchip  ((tsunami_cchip *)(IDENT_ADDR+TS_BIAS+0x1A0000000UL))
+#define TSUNAMI_dchip  ((tsunami_dchip *)(IDENT_ADDR+TS_BIAS+0x1B0000800UL))
+#define TSUNAMI_pchip0 ((tsunami_pchip *)(IDENT_ADDR+TS_BIAS+0x180000000UL))
+#define TSUNAMI_pchip1 ((tsunami_pchip *)(IDENT_ADDR+TS_BIAS+0x380000000UL))
+extern int TSUNAMI_bootcpu;
+
+/*
+ * TSUNAMI Pchip Error register.
+ */
+
+#define perror_m_lost 0x1
+#define perror_m_serr 0x2
+#define perror_m_perr 0x4
+#define perror_m_dcrto 0x8
+#define perror_m_sge 0x10
+#define perror_m_ape 0x20
+#define perror_m_ta 0x40
+#define perror_m_rdpe 0x80
+#define perror_m_nds 0x100
+#define perror_m_rto 0x200
+#define perror_m_uecc 0x400
+#define perror_m_cre 0x800
+#define perror_m_addrl 0xFFFFFFFF0000UL
+#define perror_m_addrh 0x7000000000000UL
+#define perror_m_cmd 0xF0000000000000UL
+#define perror_m_syn 0xFF00000000000000UL
+union TPchipPERROR {   
+	struct  {
+		unsigned int perror_v_lost : 1;
+		unsigned perror_v_serr : 1;
+		unsigned perror_v_perr : 1;
+		unsigned perror_v_dcrto : 1;
+		unsigned perror_v_sge : 1;
+		unsigned perror_v_ape : 1;
+		unsigned perror_v_ta : 1;
+		unsigned perror_v_rdpe : 1;
+		unsigned perror_v_nds : 1;
+		unsigned perror_v_rto : 1;
+		unsigned perror_v_uecc : 1;
+		unsigned perror_v_cre : 1;                 
+		unsigned perror_v_rsvd1 : 4;
+		unsigned perror_v_addrl : 32;
+		unsigned perror_v_addrh : 3;
+		unsigned perror_v_rsvd2 : 1;
+		unsigned perror_v_cmd : 4;
+		unsigned perror_v_syn : 8;
+	} perror_r_bits;
+	int perror_q_whole [2];
+};                       
+
+/*
+ * TSUNAMI Pchip Window Space Base Address register.
+ */
+#define wsba_m_ena 0x1                
+#define wsba_m_sg 0x2
+#define wsba_m_ptp 0x4
+#define wsba_m_addr 0xFFF00000  
+#define wmask_k_sz1gb 0x3FF00000                   
+union TPchipWSBA {
+	struct  {
+		unsigned wsba_v_ena : 1;
+		unsigned wsba_v_sg : 1;
+		unsigned wsba_v_ptp : 1;
+		unsigned wsba_v_rsvd1 : 17;
+		unsigned wsba_v_addr : 12;
+		unsigned wsba_v_rsvd2 : 32;
+	} wsba_r_bits;
+	int wsba_q_whole [2];
+};
+
+/*
+ * TSUNAMI Pchip Control Register
+ */
+#define pctl_m_fdsc 0x1
+#define pctl_m_fbtb 0x2
+#define pctl_m_thdis 0x4
+#define pctl_m_chaindis 0x8
+#define pctl_m_tgtlat 0x10
+#define pctl_m_hole 0x20
+#define pctl_m_mwin 0x40
+#define pctl_m_arbena 0x80
+#define pctl_m_prigrp 0x7F00
+#define pctl_m_ppri 0x8000
+#define pctl_m_rsvd1 0x30000
+#define pctl_m_eccen 0x40000
+#define pctl_m_padm 0x80000
+#define pctl_m_cdqmax 0xF00000
+#define pctl_m_rev 0xFF000000
+#define pctl_m_crqmax 0xF00000000UL
+#define pctl_m_ptpmax 0xF000000000UL
+#define pctl_m_pclkx 0x30000000000UL
+#define pctl_m_fdsdis 0x40000000000UL
+#define pctl_m_fdwdis 0x80000000000UL
+#define pctl_m_ptevrfy 0x100000000000UL
+#define pctl_m_rpp 0x200000000000UL
+#define pctl_m_pid 0xC00000000000UL
+#define pctl_m_rsvd2 0xFFFF000000000000UL
+
+union TPchipPCTL {
+	struct {
+		unsigned pctl_v_fdsc : 1;
+		unsigned pctl_v_fbtb : 1;
+		unsigned pctl_v_thdis : 1;
+		unsigned pctl_v_chaindis : 1;
+		unsigned pctl_v_tgtlat : 1;
+		unsigned pctl_v_hole : 1;
+		unsigned pctl_v_mwin : 1;
+		unsigned pctl_v_arbena : 1;
+		unsigned pctl_v_prigrp : 7;
+		unsigned pctl_v_ppri : 1;
+		unsigned pctl_v_rsvd1 : 2;
+		unsigned pctl_v_eccen : 1;
+		unsigned pctl_v_padm : 1;
+		unsigned pctl_v_cdqmax : 4;
+		unsigned pctl_v_rev : 8;
+		unsigned pctl_v_crqmax : 4;
+		unsigned pctl_v_ptpmax : 4;
+		unsigned pctl_v_pclkx : 2;
+		unsigned pctl_v_fdsdis : 1;
+		unsigned pctl_v_fdwdis : 1;
+		unsigned pctl_v_ptevrfy : 1;
+		unsigned pctl_v_rpp : 1;
+		unsigned pctl_v_pid : 2;
+		unsigned pctl_v_rsvd2 : 16;
+	} pctl_r_bits;
+	int pctl_q_whole [2];
+};
+
+/*
+ * TSUNAMI Pchip Error Mask Register.
+ */
+#define perrmask_m_lost 0x1
+#define perrmask_m_serr 0x2
+#define perrmask_m_perr 0x4
+#define perrmask_m_dcrto 0x8
+#define perrmask_m_sge 0x10
+#define perrmask_m_ape 0x20
+#define perrmask_m_ta 0x40
+#define perrmask_m_rdpe 0x80
+#define perrmask_m_nds 0x100
+#define perrmask_m_rto 0x200
+#define perrmask_m_uecc 0x400
+#define perrmask_m_cre 0x800
+#define perrmask_m_rsvd 0xFFFFFFFFFFFFF000UL
+union TPchipPERRMASK {   
+	struct  {
+		unsigned int perrmask_v_lost : 1;
+		unsigned perrmask_v_serr : 1;
+		unsigned perrmask_v_perr : 1;
+		unsigned perrmask_v_dcrto : 1;
+		unsigned perrmask_v_sge : 1;
+		unsigned perrmask_v_ape : 1;
+		unsigned perrmask_v_ta : 1;
+		unsigned perrmask_v_rdpe : 1;
+		unsigned perrmask_v_nds : 1;
+		unsigned perrmask_v_rto : 1;
+		unsigned perrmask_v_uecc : 1;
+		unsigned perrmask_v_cre : 1;                 
+		unsigned perrmask_v_rsvd1 : 20;
+		unsigned perrmask_v_rsvd2 : 32;
+	} perrmask_r_bits;
+	int perrmask_q_whole [2];
+};                       
+
+/*
+ * Memory spaces:
+ */
+#define TSUNAMI_HOSE(h)		(((unsigned long)(h)) << 33)
+#define TSUNAMI_BASE		(IDENT_ADDR + TS_BIAS)
+
+#define TSUNAMI_MEM(h)		(TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x000000000UL)
+#define _TSUNAMI_IACK_SC(h)	(TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x1F8000000UL)
+#define TSUNAMI_IO(h)		(TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x1FC000000UL)
+#define TSUNAMI_CONF(h)		(TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x1FE000000UL)
+
+#define TSUNAMI_IACK_SC		_TSUNAMI_IACK_SC(0) /* hack! */
+
+
+/* 
+ * The canonical non-remaped I/O and MEM addresses have these values
+ * subtracted out.  This is arranged so that folks manipulating ISA
+ * devices can use their familiar numbers and have them map to bus 0.
+ */
+
+#define TSUNAMI_IO_BIAS          TSUNAMI_IO(0)
+#define TSUNAMI_MEM_BIAS         TSUNAMI_MEM(0)
+
+/* The IO address space is larger than 0xffff */
+#define TSUNAMI_IO_SPACE	(TSUNAMI_CONF(0) - TSUNAMI_IO(0))
+
+/* Offset between ram physical addresses and pci64 DAC bus addresses.  */
+#define TSUNAMI_DAC_OFFSET	(1UL << 40)
+
+/*
+ * Data structure for handling TSUNAMI machine checks:
+ */
+struct el_TSUNAMI_sysdata_mcheck {
+};
+
+
+#ifdef __KERNEL__
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
+/*
+ * I/O functions:
+ *
+ * TSUNAMI, the 21??? PCI/memory support chipset for the EV6 (21264)
+ * can only use linear accesses to get at PCI memory and I/O spaces.
+ */
+
+/*
+ * Memory functions.  all accesses are done through linear space.
+ */
+
+__EXTERN_INLINE void __iomem *tsunami_ioportmap(unsigned long addr)
+{
+	return (void __iomem *)(addr + TSUNAMI_IO_BIAS);
+}
+
+__EXTERN_INLINE void __iomem *tsunami_ioremap(unsigned long addr, 
+					      unsigned long size)
+{
+	return (void __iomem *)(addr + TSUNAMI_MEM_BIAS);
+}
+
+__EXTERN_INLINE int tsunami_is_ioaddr(unsigned long addr)
+{
+	return addr >= TSUNAMI_BASE;
+}
+
+__EXTERN_INLINE int tsunami_is_mmio(const volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	return (addr & 0x100000000UL) == 0;
+}
+
+#undef __IO_PREFIX
+#define __IO_PREFIX		tsunami
+#define tsunami_trivial_rw_bw	1
+#define tsunami_trivial_rw_lq	1
+#define tsunami_trivial_io_bw	1
+#define tsunami_trivial_io_lq	1
+#define tsunami_trivial_iounmap	1
+#include <asm/io_trivial.h>
+
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_TSUNAMI__H__ */
diff --git a/include/asm-alpha/core_wildfire.h b/include/asm-alpha/core_wildfire.h
new file mode 100644
index 0000000..12af803
--- /dev/null
+++ b/include/asm-alpha/core_wildfire.h
@@ -0,0 +1,318 @@
+#ifndef __ALPHA_WILDFIRE__H__
+#define __ALPHA_WILDFIRE__H__
+
+#include <linux/types.h>
+#include <asm/compiler.h>
+
+#define WILDFIRE_MAX_QBB	8	/* more than 8 requires other mods */
+#define WILDFIRE_PCA_PER_QBB	4
+#define WILDFIRE_IRQ_PER_PCA	64
+
+#define WILDFIRE_NR_IRQS \
+  (WILDFIRE_MAX_QBB * WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA)
+
+extern unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB];
+extern unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB];
+#define QBB_MAP_EMPTY	0xff
+
+extern unsigned long wildfire_hard_qbb_mask;
+extern unsigned long wildfire_soft_qbb_mask;
+extern unsigned long wildfire_gp_mask;
+extern unsigned long wildfire_hs_mask;
+extern unsigned long wildfire_iop_mask;
+extern unsigned long wildfire_ior_mask;
+extern unsigned long wildfire_pca_mask;
+extern unsigned long wildfire_cpu_mask;
+extern unsigned long wildfire_mem_mask;
+
+#define WILDFIRE_QBB_EXISTS(qbbno) (wildfire_soft_qbb_mask & (1 << (qbbno)))
+
+#define WILDFIRE_MEM_EXISTS(qbbno) (wildfire_mem_mask & (0xf << ((qbbno) << 2)))
+
+#define WILDFIRE_PCA_EXISTS(qbbno, pcano) \
+		(wildfire_pca_mask & (1 << (((qbbno) << 2) + (pcano))))
+
+typedef struct {
+	volatile unsigned long csr __attribute__((aligned(64)));
+} wildfire_64;
+
+typedef struct {
+	volatile unsigned long csr __attribute__((aligned(256)));
+} wildfire_256;
+
+typedef struct {
+	volatile unsigned long csr __attribute__((aligned(2048)));
+} wildfire_2k;
+
+typedef struct {
+	wildfire_64	qsd_whami;
+	wildfire_64	qsd_rev;
+	wildfire_64	qsd_port_present;
+	wildfire_64	qsd_port_active;
+	wildfire_64	qsd_fault_ena;
+	wildfire_64	qsd_cpu_int_ena;
+	wildfire_64	qsd_mem_config;
+	wildfire_64	qsd_err_sum;
+	wildfire_64	ce_sum[4];
+	wildfire_64	dev_init[4];
+	wildfire_64	it_int[4];
+	wildfire_64	ip_int[4];
+	wildfire_64	uce_sum[4];
+	wildfire_64	se_sum__non_dev_int[4];
+	wildfire_64	scratch[4];
+	wildfire_64	qsd_timer;
+	wildfire_64	qsd_diag;
+} wildfire_qsd;
+
+typedef struct {
+	wildfire_256	qsd_whami;
+	wildfire_256	__pad1;
+	wildfire_256	ce_sum;
+	wildfire_256	dev_init;
+	wildfire_256	it_int;
+	wildfire_256	ip_int;
+	wildfire_256	uce_sum;
+	wildfire_256	se_sum;
+} wildfire_fast_qsd;
+
+typedef struct {
+	wildfire_2k	qsa_qbb_id;
+	wildfire_2k	__pad1;
+	wildfire_2k	qsa_port_ena;
+	wildfire_2k	qsa_scratch;
+	wildfire_2k	qsa_config[5];
+	wildfire_2k	qsa_ref_int;
+	wildfire_2k	qsa_qbb_pop[2];
+	wildfire_2k	qsa_dtag_fc;
+	wildfire_2k	__pad2[3];
+	wildfire_2k	qsa_diag;
+	wildfire_2k	qsa_diag_lock[4];
+	wildfire_2k	__pad3[11];
+	wildfire_2k	qsa_cpu_err_sum;
+	wildfire_2k	qsa_misc_err_sum;
+	wildfire_2k	qsa_tmo_err_sum;
+	wildfire_2k	qsa_err_ena;
+	wildfire_2k	qsa_tmo_config;
+	wildfire_2k	qsa_ill_cmd_err_sum;
+	wildfire_2k	__pad4[26];
+	wildfire_2k	qsa_busy_mask;
+	wildfire_2k	qsa_arr_valid;
+	wildfire_2k	__pad5[2];
+	wildfire_2k	qsa_port_map[4];
+	wildfire_2k	qsa_arr_addr[8];
+	wildfire_2k	qsa_arr_mask[8];
+} wildfire_qsa;
+
+typedef struct {
+	wildfire_64	ioa_config;
+	wildfire_64	iod_config;
+	wildfire_64	iop_switch_credits;
+	wildfire_64	__pad1;
+	wildfire_64	iop_hose_credits;
+	wildfire_64	__pad2[11];
+	struct {
+		wildfire_64	__pad3;
+		wildfire_64	init;
+	} iop_hose[4];
+	wildfire_64	ioa_hose_0_ctrl;
+	wildfire_64	iod_hose_0_ctrl;
+	wildfire_64	ioa_hose_1_ctrl;
+	wildfire_64	iod_hose_1_ctrl;
+	wildfire_64	ioa_hose_2_ctrl;
+	wildfire_64	iod_hose_2_ctrl;
+	wildfire_64	ioa_hose_3_ctrl;
+	wildfire_64	iod_hose_3_ctrl;
+	struct {
+		wildfire_64	target;
+		wildfire_64	__pad4;
+	} iop_dev_int[4];
+
+	wildfire_64	iop_err_int_target;
+	wildfire_64	__pad5[7];
+	wildfire_64	iop_qbb_err_sum;
+	wildfire_64	__pad6;
+	wildfire_64	iop_qbb_se_sum;
+	wildfire_64	__pad7;
+	wildfire_64	ioa_err_sum;
+	wildfire_64	iod_err_sum;
+	wildfire_64	__pad8[4];
+	wildfire_64	ioa_diag_force_err;
+	wildfire_64	iod_diag_force_err;
+	wildfire_64	__pad9[4];
+	wildfire_64	iop_diag_send_err_int;
+	wildfire_64	__pad10[15];
+	wildfire_64	ioa_scratch;
+	wildfire_64	iod_scratch;
+} wildfire_iop;
+
+typedef struct {
+	wildfire_2k	gpa_qbb_map[4];
+	wildfire_2k	gpa_mem_pop_map;
+	wildfire_2k	gpa_scratch;
+	wildfire_2k	gpa_diag;
+	wildfire_2k	gpa_config_0;
+	wildfire_2k	__pad1;
+	wildfire_2k	gpa_init_id;
+	wildfire_2k	gpa_config_2;
+	/* not complete */
+} wildfire_gp;
+
+typedef struct {
+	wildfire_64	pca_what_am_i;
+	wildfire_64	pca_err_sum;
+	wildfire_64	pca_diag_force_err;
+	wildfire_64	pca_diag_send_err_int;
+	wildfire_64	pca_hose_credits;
+	wildfire_64	pca_scratch;
+	wildfire_64	pca_micro_addr;
+	wildfire_64	pca_micro_data;
+	wildfire_64	pca_pend_int;
+	wildfire_64	pca_sent_int;
+	wildfire_64	__pad1;
+	wildfire_64	pca_stdio_edge_level;
+	wildfire_64	__pad2[52];
+	struct {
+		wildfire_64	target;
+		wildfire_64	enable;
+	} pca_int[4];
+	wildfire_64	__pad3[56];
+	wildfire_64	pca_alt_sent_int[32];
+} wildfire_pca;
+
+typedef struct {
+	wildfire_64	ne_what_am_i;
+	/* not complete */
+} wildfire_ne;
+
+typedef struct {
+	wildfire_64	fe_what_am_i;
+	/* not complete */
+} wildfire_fe;
+
+typedef struct {
+	wildfire_64	pci_io_addr_ext;
+	wildfire_64	pci_ctrl;
+	wildfire_64	pci_err_sum;
+	wildfire_64	pci_err_addr;
+	wildfire_64	pci_stall_cnt;
+	wildfire_64	pci_iack_special;
+	wildfire_64	__pad1[2];
+	wildfire_64	pci_pend_int;
+	wildfire_64	pci_sent_int;
+	wildfire_64	__pad2[54];
+	struct {
+		wildfire_64	wbase;
+		wildfire_64	wmask;
+		wildfire_64	tbase;
+	} pci_window[4];
+	wildfire_64	pci_flush_tlb;
+	wildfire_64	pci_perf_mon;
+} wildfire_pci;
+
+#define WILDFIRE_ENTITY_SHIFT		18
+
+#define WILDFIRE_GP_ENTITY		(0x10UL << WILDFIRE_ENTITY_SHIFT)
+#define WILDFIRE_IOP_ENTITY		(0x08UL << WILDFIRE_ENTITY_SHIFT)
+#define WILDFIRE_QSA_ENTITY		(0x04UL << WILDFIRE_ENTITY_SHIFT)
+#define WILDFIRE_QSD_ENTITY_SLOW	(0x05UL << WILDFIRE_ENTITY_SHIFT)
+#define WILDFIRE_QSD_ENTITY_FAST	(0x01UL << WILDFIRE_ENTITY_SHIFT)
+
+#define WILDFIRE_PCA_ENTITY(pca)	((0xc|(pca))<<WILDFIRE_ENTITY_SHIFT)
+
+#define WILDFIRE_BASE		(IDENT_ADDR | (1UL << 40))
+
+#define WILDFIRE_QBB_MASK	0x0fUL	/* for now, only 4 bits/16 QBBs */
+
+#define WILDFIRE_QBB(q)		((~((long)(q)) & WILDFIRE_QBB_MASK) << 36)
+#define WILDFIRE_HOSE(h)	((long)(h) << 33)
+
+#define WILDFIRE_QBB_IO(q)	(WILDFIRE_BASE | WILDFIRE_QBB(q))
+#define WILDFIRE_QBB_HOSE(q,h)	(WILDFIRE_QBB_IO(q) | WILDFIRE_HOSE(h))
+
+#define WILDFIRE_MEM(q,h)	(WILDFIRE_QBB_HOSE(q,h) | 0x000000000UL)
+#define WILDFIRE_CONF(q,h)	(WILDFIRE_QBB_HOSE(q,h) | 0x1FE000000UL)
+#define WILDFIRE_IO(q,h)	(WILDFIRE_QBB_HOSE(q,h) | 0x1FF000000UL)
+
+#define WILDFIRE_qsd(q) \
+ ((wildfire_qsd *)(WILDFIRE_QBB_IO(q)|WILDFIRE_QSD_ENTITY_SLOW|(((1UL<<13)-1)<<23)))
+
+#define WILDFIRE_fast_qsd() \
+ ((wildfire_fast_qsd *)(WILDFIRE_QBB_IO(0)|WILDFIRE_QSD_ENTITY_FAST|(((1UL<<13)-1)<<23)))
+
+#define WILDFIRE_qsa(q) \
+ ((wildfire_qsa *)(WILDFIRE_QBB_IO(q)|WILDFIRE_QSA_ENTITY|(((1UL<<13)-1)<<23)))
+
+#define WILDFIRE_iop(q) \
+ ((wildfire_iop *)(WILDFIRE_QBB_IO(q)|WILDFIRE_IOP_ENTITY|(((1UL<<13)-1)<<23)))
+
+#define WILDFIRE_gp(q) \
+ ((wildfire_gp *)(WILDFIRE_QBB_IO(q)|WILDFIRE_GP_ENTITY|(((1UL<<13)-1)<<23)))
+
+#define WILDFIRE_pca(q,pca) \
+ ((wildfire_pca *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(pca)|(((1UL<<13)-1)<<23)))
+
+#define WILDFIRE_ne(q,pca) \
+ ((wildfire_ne *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(pca)|(((1UL<<13)-1)<<23)|(1UL<<16)))
+
+#define WILDFIRE_fe(q,pca) \
+ ((wildfire_fe *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(pca)|(((1UL<<13)-1)<<23)|(3UL<<15)))
+
+#define WILDFIRE_pci(q,h) \
+ ((wildfire_pci *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(((h)&6)>>1)|((((h)&1)|2)<<16)|(((1UL<<13)-1)<<23)))
+
+#define WILDFIRE_IO_BIAS        WILDFIRE_IO(0,0)
+#define WILDFIRE_MEM_BIAS       WILDFIRE_MEM(0,0) /* ??? */
+
+/* The IO address space is larger than 0xffff */
+#define WILDFIRE_IO_SPACE	(8UL*1024*1024)
+
+#ifdef __KERNEL__
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
+/*
+ * Memory functions.  all accesses are done through linear space.
+ */
+
+__EXTERN_INLINE void __iomem *wildfire_ioportmap(unsigned long addr)
+{
+	return (void __iomem *)(addr + WILDFIRE_IO_BIAS);
+}
+
+__EXTERN_INLINE void __iomem *wildfire_ioremap(unsigned long addr, 
+					       unsigned long size)
+{
+	return (void __iomem *)(addr + WILDFIRE_MEM_BIAS);
+}
+
+__EXTERN_INLINE int wildfire_is_ioaddr(unsigned long addr)
+{
+	return addr >= WILDFIRE_BASE;
+}
+
+__EXTERN_INLINE int wildfire_is_mmio(const volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long)addr;
+	return (addr & 0x100000000UL) == 0;
+}
+
+#undef __IO_PREFIX
+#define __IO_PREFIX			wildfire
+#define wildfire_trivial_rw_bw		1
+#define wildfire_trivial_rw_lq		1
+#define wildfire_trivial_io_bw		1
+#define wildfire_trivial_io_lq		1
+#define wildfire_trivial_iounmap	1
+#include <asm/io_trivial.h>
+
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_WILDFIRE__H__ */
diff --git a/include/asm-alpha/cputime.h b/include/asm-alpha/cputime.h
new file mode 100644
index 0000000..19577fd
--- /dev/null
+++ b/include/asm-alpha/cputime.h
@@ -0,0 +1,6 @@
+#ifndef __ALPHA_CPUTIME_H
+#define __ALPHA_CPUTIME_H
+
+#include <asm-generic/cputime.h>
+
+#endif /* __ALPHA_CPUTIME_H */
diff --git a/include/asm-alpha/current.h b/include/asm-alpha/current.h
new file mode 100644
index 0000000..8d88a13
--- /dev/null
+++ b/include/asm-alpha/current.h
@@ -0,0 +1,9 @@
+#ifndef _ALPHA_CURRENT_H
+#define _ALPHA_CURRENT_H
+
+#include <linux/thread_info.h>
+
+#define get_current()	(current_thread_info()->task + 0)
+#define current		get_current()
+
+#endif /* _ALPHA_CURRENT_H */
diff --git a/include/asm-alpha/delay.h b/include/asm-alpha/delay.h
new file mode 100644
index 0000000..2aa3f41
--- /dev/null
+++ b/include/asm-alpha/delay.h
@@ -0,0 +1,10 @@
+#ifndef __ALPHA_DELAY_H
+#define __ALPHA_DELAY_H
+
+extern void __delay(int loops);
+extern void udelay(unsigned long usecs);
+
+extern void ndelay(unsigned long nsecs);
+#define ndelay ndelay
+
+#endif /* defined(__ALPHA_DELAY_H) */
diff --git a/include/asm-alpha/div64.h b/include/asm-alpha/div64.h
new file mode 100644
index 0000000..6cd978c
--- /dev/null
+++ b/include/asm-alpha/div64.h
@@ -0,0 +1 @@
+#include <asm-generic/div64.h>
diff --git a/include/asm-alpha/dma-mapping.h b/include/asm-alpha/dma-mapping.h
new file mode 100644
index 0000000..c675f28
--- /dev/null
+++ b/include/asm-alpha/dma-mapping.h
@@ -0,0 +1,67 @@
+#ifndef _ALPHA_DMA_MAPPING_H
+#define _ALPHA_DMA_MAPPING_H
+
+#include <linux/config.h>
+
+#ifdef CONFIG_PCI
+
+#include <linux/pci.h>
+
+#define dma_map_single(dev, va, size, dir)		\
+		pci_map_single(alpha_gendev_to_pci(dev), va, size, dir)
+#define dma_unmap_single(dev, addr, size, dir)		\
+		pci_unmap_single(alpha_gendev_to_pci(dev), addr, size, dir)
+#define dma_alloc_coherent(dev, size, addr, gfp)	\
+		pci_alloc_consistent(alpha_gendev_to_pci(dev), size, addr)
+#define dma_free_coherent(dev, size, va, addr)		\
+		pci_free_consistent(alpha_gendev_to_pci(dev), size, va, addr)
+#define dma_map_page(dev, page, off, size, dir)		\
+		pci_map_single(alpha_gendev_to_pci(dev), page, off, size, dir)
+#define dma_unmap_page(dev, addr, size, dir)		\
+		pci_unmap_page(alpha_gendev_to_pci(dev), addr, size, dir)
+#define dma_map_sg(dev, sg, nents, dir)			\
+		pci_map_sg(alpha_gendev_to_pci(dev), sg, nents, dir)
+#define dma_unmap_sg(dev, sg, nents, dir)		\
+		pci_unmap_sg(alpha_gendev_to_pci(dev), sg, nents, dir)
+#define dma_supported(dev, mask)			\
+		pci_dma_supported(alpha_gendev_to_pci(dev), mask)
+#define dma_mapping_error(addr)				\
+		pci_dma_mapping_error(addr)
+
+#else	/* no PCI - no IOMMU. */
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+			 dma_addr_t *dma_handle, int gfp);
+int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+	       enum dma_data_direction direction);
+
+#define dma_free_coherent(dev, size, va, addr)		\
+		free_pages((unsigned long)va, get_order(size))
+#define dma_supported(dev, mask)		(mask < 0x00ffffffUL ? 0 : 1)
+#define dma_map_single(dev, va, size, dir)	virt_to_phys(va)
+#define dma_map_page(dev, page, off, size, dir)	(page_to_pa(page) + off)
+
+#define dma_unmap_single(dev, addr, size, dir)	do { } while (0)
+#define dma_unmap_page(dev, addr, size, dir)	do { } while (0)
+#define dma_unmap_sg(dev, sg, nents, dir)	do { } while (0)
+
+#define dma_mapping_error(addr)  (0)
+
+#endif	/* !CONFIG_PCI */
+
+#define dma_alloc_noncoherent(d, s, h, f)	dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h)	dma_free_coherent(d, s, v, h)
+#define dma_is_consistent(dev)			(1)
+
+int dma_set_mask(struct device *dev, u64 mask);
+
+#define dma_sync_single_for_cpu(dev, addr, size, dir)	  do { } while (0)
+#define dma_sync_single_for_device(dev, addr, size, dir)  do { } while (0)
+#define dma_sync_single_range(dev, addr, off, size, dir)  do { } while (0)
+#define dma_sync_sg_for_cpu(dev, sg, nents, dir)	  do { } while (0)
+#define dma_sync_sg_for_device(dev, sg, nents, dir)	  do { } while (0)
+#define dma_cache_sync(va, size, dir)			  do { } while (0)
+
+#define dma_get_cache_alignment()			  L1_CACHE_BYTES
+
+#endif	/* _ALPHA_DMA_MAPPING_H */
diff --git a/include/asm-alpha/dma.h b/include/asm-alpha/dma.h
new file mode 100644
index 0000000..683afaa
--- /dev/null
+++ b/include/asm-alpha/dma.h
@@ -0,0 +1,377 @@
+/*
+ * include/asm-alpha/dma.h
+ *
+ * This is essentially the same as the i386 DMA stuff, as the AlphaPCs
+ * use ISA-compatible dma.  The only extension is support for high-page
+ * registers that allow to set the top 8 bits of a 32-bit DMA address.
+ * This register should be written last when setting up a DMA address
+ * as this will also enable DMA across 64 KB boundaries.
+ */
+
+/* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $
+ * linux/include/asm/dma.h: Defines for using and allocating dma channels.
+ * Written by Hennus Bergman, 1992.
+ * High DMA channel support & info by Hannu Savolainen
+ * and John Boyd, Nov. 1992.
+ */
+
+#ifndef _ASM_DMA_H
+#define _ASM_DMA_H
+
+#include <linux/config.h>
+#include <linux/spinlock.h>
+#include <asm/io.h>
+
+#define dma_outb	outb
+#define dma_inb		inb
+
+/*
+ * NOTES about DMA transfers:
+ *
+ *  controller 1: channels 0-3, byte operations, ports 00-1F
+ *  controller 2: channels 4-7, word operations, ports C0-DF
+ *
+ *  - ALL registers are 8 bits only, regardless of transfer size
+ *  - channel 4 is not used - cascades 1 into 2.
+ *  - channels 0-3 are byte - addresses/counts are for physical bytes
+ *  - channels 5-7 are word - addresses/counts are for physical words
+ *  - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
+ *  - transfer count loaded to registers is 1 less than actual count
+ *  - controller 2 offsets are all even (2x offsets for controller 1)
+ *  - page registers for 5-7 don't use data bit 0, represent 128K pages
+ *  - page registers for 0-3 use bit 0, represent 64K pages
+ *
+ * DMA transfers are limited to the lower 16MB of _physical_ memory.  
+ * Note that addresses loaded into registers must be _physical_ addresses,
+ * not logical addresses (which may differ if paging is active).
+ *
+ *  Address mapping for channels 0-3:
+ *
+ *   A23 ... A16 A15 ... A8  A7 ... A0    (Physical addresses)
+ *    |  ...  |   |  ... |   |  ... |
+ *    |  ...  |   |  ... |   |  ... |
+ *    |  ...  |   |  ... |   |  ... |
+ *   P7  ...  P0  A7 ... A0  A7 ... A0   
+ * |    Page    | Addr MSB | Addr LSB |   (DMA registers)
+ *
+ *  Address mapping for channels 5-7:
+ *
+ *   A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0    (Physical addresses)
+ *    |  ...  |   \   \   ... \  \  \  ... \  \
+ *    |  ...  |    \   \   ... \  \  \  ... \  (not used)
+ *    |  ...  |     \   \   ... \  \  \  ... \
+ *   P7  ...  P1 (0) A7 A6  ... A0 A7 A6 ... A0   
+ * |      Page      |  Addr MSB   |  Addr LSB  |   (DMA registers)
+ *
+ * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
+ * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
+ * the hardware level, so odd-byte transfers aren't possible).
+ *
+ * Transfer count (_not # bytes_) is limited to 64K, represented as actual
+ * count - 1 : 64K => 0xFFFF, 1 => 0x0000.  Thus, count is always 1 or more,
+ * and up to 128K bytes may be transferred on channels 5-7 in one operation. 
+ *
+ */
+
+#define MAX_DMA_CHANNELS	8
+
+/*
+  ISA DMA limitations on Alpha platforms,
+
+  These may be due to SIO (PCI<->ISA bridge) chipset limitation, or
+  just a wiring limit.
+*/
+
+/* The maximum address for ISA DMA transfer on Alpha XL, due to an
+   hardware SIO limitation, is 64MB.
+*/
+#define ALPHA_XL_MAX_ISA_DMA_ADDRESS		0x04000000UL
+
+/* The maximum address for ISA DMA transfer on RUFFIAN,
+   due to an hardware SIO limitation, is 16MB.
+*/
+#define ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS	0x01000000UL
+
+/* The maximum address for ISA DMA transfer on SABLE, and some ALCORs,
+   due to an hardware SIO chip limitation, is 2GB.
+*/
+#define ALPHA_SABLE_MAX_ISA_DMA_ADDRESS		0x80000000UL
+#define ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS		0x80000000UL
+
+/*
+  Maximum address for all the others is the complete 32-bit bus
+  address space.
+*/
+#define ALPHA_MAX_ISA_DMA_ADDRESS		0x100000000UL
+
+#ifdef CONFIG_ALPHA_GENERIC
+# define MAX_ISA_DMA_ADDRESS		(alpha_mv.max_isa_dma_address)
+#else
+# if defined(CONFIG_ALPHA_XL)
+#  define MAX_ISA_DMA_ADDRESS		ALPHA_XL_MAX_ISA_DMA_ADDRESS
+# elif defined(CONFIG_ALPHA_RUFFIAN)
+#  define MAX_ISA_DMA_ADDRESS		ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS
+# elif defined(CONFIG_ALPHA_SABLE)
+#  define MAX_ISA_DMA_ADDRESS		ALPHA_SABLE_MAX_ISA_DMA_ADDRESS
+# elif defined(CONFIG_ALPHA_ALCOR)
+#  define MAX_ISA_DMA_ADDRESS		ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS
+# else
+#  define MAX_ISA_DMA_ADDRESS		ALPHA_MAX_ISA_DMA_ADDRESS
+# endif
+#endif
+
+/* If we have the iommu, we don't have any address limitations on DMA.
+   Otherwise (Nautilus, RX164), we have to have 0-16 Mb DMA zone
+   like i386. */
+#define MAX_DMA_ADDRESS		(alpha_mv.mv_pci_tbi ?	\
+				 ~0UL : IDENT_ADDR + 0x01000000)
+
+/* 8237 DMA controllers */
+#define IO_DMA1_BASE	0x00	/* 8 bit slave DMA, channels 0..3 */
+#define IO_DMA2_BASE	0xC0	/* 16 bit master DMA, ch 4(=slave input)..7 */
+
+/* DMA controller registers */
+#define DMA1_CMD_REG		0x08	/* command register (w) */
+#define DMA1_STAT_REG		0x08	/* status register (r) */
+#define DMA1_REQ_REG            0x09    /* request register (w) */
+#define DMA1_MASK_REG		0x0A	/* single-channel mask (w) */
+#define DMA1_MODE_REG		0x0B	/* mode register (w) */
+#define DMA1_CLEAR_FF_REG	0x0C	/* clear pointer flip-flop (w) */
+#define DMA1_TEMP_REG           0x0D    /* Temporary Register (r) */
+#define DMA1_RESET_REG		0x0D	/* Master Clear (w) */
+#define DMA1_CLR_MASK_REG       0x0E    /* Clear Mask */
+#define DMA1_MASK_ALL_REG       0x0F    /* all-channels mask (w) */
+#define DMA1_EXT_MODE_REG	(0x400 | DMA1_MODE_REG)
+
+#define DMA2_CMD_REG		0xD0	/* command register (w) */
+#define DMA2_STAT_REG		0xD0	/* status register (r) */
+#define DMA2_REQ_REG            0xD2    /* request register (w) */
+#define DMA2_MASK_REG		0xD4	/* single-channel mask (w) */
+#define DMA2_MODE_REG		0xD6	/* mode register (w) */
+#define DMA2_CLEAR_FF_REG	0xD8	/* clear pointer flip-flop (w) */
+#define DMA2_TEMP_REG           0xDA    /* Temporary Register (r) */
+#define DMA2_RESET_REG		0xDA	/* Master Clear (w) */
+#define DMA2_CLR_MASK_REG       0xDC    /* Clear Mask */
+#define DMA2_MASK_ALL_REG       0xDE    /* all-channels mask (w) */
+#define DMA2_EXT_MODE_REG	(0x400 | DMA2_MODE_REG)
+
+#define DMA_ADDR_0              0x00    /* DMA address registers */
+#define DMA_ADDR_1              0x02
+#define DMA_ADDR_2              0x04
+#define DMA_ADDR_3              0x06
+#define DMA_ADDR_4              0xC0
+#define DMA_ADDR_5              0xC4
+#define DMA_ADDR_6              0xC8
+#define DMA_ADDR_7              0xCC
+
+#define DMA_CNT_0               0x01    /* DMA count registers */
+#define DMA_CNT_1               0x03
+#define DMA_CNT_2               0x05
+#define DMA_CNT_3               0x07
+#define DMA_CNT_4               0xC2
+#define DMA_CNT_5               0xC6
+#define DMA_CNT_6               0xCA
+#define DMA_CNT_7               0xCE
+
+#define DMA_PAGE_0              0x87    /* DMA page registers */
+#define DMA_PAGE_1              0x83
+#define DMA_PAGE_2              0x81
+#define DMA_PAGE_3              0x82
+#define DMA_PAGE_5              0x8B
+#define DMA_PAGE_6              0x89
+#define DMA_PAGE_7              0x8A
+
+#define DMA_HIPAGE_0		(0x400 | DMA_PAGE_0)
+#define DMA_HIPAGE_1		(0x400 | DMA_PAGE_1)
+#define DMA_HIPAGE_2		(0x400 | DMA_PAGE_2)
+#define DMA_HIPAGE_3		(0x400 | DMA_PAGE_3)
+#define DMA_HIPAGE_4		(0x400 | DMA_PAGE_4)
+#define DMA_HIPAGE_5		(0x400 | DMA_PAGE_5)
+#define DMA_HIPAGE_6		(0x400 | DMA_PAGE_6)
+#define DMA_HIPAGE_7		(0x400 | DMA_PAGE_7)
+
+#define DMA_MODE_READ	0x44	/* I/O to memory, no autoinit, increment, single mode */
+#define DMA_MODE_WRITE	0x48	/* memory to I/O, no autoinit, increment, single mode */
+#define DMA_MODE_CASCADE 0xC0   /* pass thru DREQ->HRQ, DACK<-HLDA only */
+
+#define DMA_AUTOINIT	0x10
+
+extern spinlock_t  dma_spin_lock;
+
+static __inline__ unsigned long claim_dma_lock(void)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&dma_spin_lock, flags);
+	return flags;
+}
+
+static __inline__ void release_dma_lock(unsigned long flags)
+{
+	spin_unlock_irqrestore(&dma_spin_lock, flags);
+}
+
+/* enable/disable a specific DMA channel */
+static __inline__ void enable_dma(unsigned int dmanr)
+{
+	if (dmanr<=3)
+		dma_outb(dmanr,  DMA1_MASK_REG);
+	else
+		dma_outb(dmanr & 3,  DMA2_MASK_REG);
+}
+
+static __inline__ void disable_dma(unsigned int dmanr)
+{
+	if (dmanr<=3)
+		dma_outb(dmanr | 4,  DMA1_MASK_REG);
+	else
+		dma_outb((dmanr & 3) | 4,  DMA2_MASK_REG);
+}
+
+/* Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ * Use this once to initialize the FF to a known state.
+ * After that, keep track of it. :-)
+ * --- In order to do that, the DMA routines below should ---
+ * --- only be used while interrupts are disabled! ---
+ */
+static __inline__ void clear_dma_ff(unsigned int dmanr)
+{
+	if (dmanr<=3)
+		dma_outb(0,  DMA1_CLEAR_FF_REG);
+	else
+		dma_outb(0,  DMA2_CLEAR_FF_REG);
+}
+
+/* set mode (above) for a specific DMA channel */
+static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+{
+	if (dmanr<=3)
+		dma_outb(mode | dmanr,  DMA1_MODE_REG);
+	else
+		dma_outb(mode | (dmanr&3),  DMA2_MODE_REG);
+}
+
+/* set extended mode for a specific DMA channel */
+static __inline__ void set_dma_ext_mode(unsigned int dmanr, char ext_mode)
+{
+	if (dmanr<=3)
+		dma_outb(ext_mode | dmanr,  DMA1_EXT_MODE_REG);
+	else
+		dma_outb(ext_mode | (dmanr&3),  DMA2_EXT_MODE_REG);
+}
+
+/* Set only the page register bits of the transfer address.
+ * This is used for successive transfers when we know the contents of
+ * the lower 16 bits of the DMA current address register.
+ */
+static __inline__ void set_dma_page(unsigned int dmanr, unsigned int pagenr)
+{
+	switch(dmanr) {
+		case 0:
+			dma_outb(pagenr, DMA_PAGE_0);
+			dma_outb((pagenr >> 8), DMA_HIPAGE_0);
+			break;
+		case 1:
+			dma_outb(pagenr, DMA_PAGE_1);
+			dma_outb((pagenr >> 8), DMA_HIPAGE_1);
+			break;
+		case 2:
+			dma_outb(pagenr, DMA_PAGE_2);
+			dma_outb((pagenr >> 8), DMA_HIPAGE_2);
+			break;
+		case 3:
+			dma_outb(pagenr, DMA_PAGE_3);
+			dma_outb((pagenr >> 8), DMA_HIPAGE_3);
+			break;
+		case 5:
+			dma_outb(pagenr & 0xfe, DMA_PAGE_5);
+			dma_outb((pagenr >> 8), DMA_HIPAGE_5);
+			break;
+		case 6:
+			dma_outb(pagenr & 0xfe, DMA_PAGE_6);
+			dma_outb((pagenr >> 8), DMA_HIPAGE_6);
+			break;
+		case 7:
+			dma_outb(pagenr & 0xfe, DMA_PAGE_7);
+			dma_outb((pagenr >> 8), DMA_HIPAGE_7);
+			break;
+	}
+}
+
+
+/* Set transfer address & page bits for specific DMA channel.
+ * Assumes dma flipflop is clear.
+ */
+static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
+{
+	if (dmanr <= 3)  {
+	    dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+            dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+	}  else  {
+	    dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
+	    dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
+	}
+	set_dma_page(dmanr, a>>16);	/* set hipage last to enable 32-bit mode */
+}
+
+
+/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
+ * a specific DMA channel.
+ * You must ensure the parameters are valid.
+ * NOTE: from a manual: "the number of transfers is one more
+ * than the initial word count"! This is taken into account.
+ * Assumes dma flip-flop is clear.
+ * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
+ */
+static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+        count--;
+	if (dmanr <= 3)  {
+	    dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
+	    dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
+        } else {
+	    dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
+	    dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
+        }
+}
+
+
+/* Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * If called before the channel has been used, it may return 1.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ *
+ * Assumes DMA flip-flop is clear.
+ */
+static __inline__ int get_dma_residue(unsigned int dmanr)
+{
+	unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
+					 : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
+
+	/* using short to get 16-bit wrap around */
+	unsigned short count;
+
+	count = 1 + dma_inb(io_port);
+	count += dma_inb(io_port) << 8;
+	
+	return (dmanr<=3)? count : (count<<1);
+}
+
+
+/* These are in kernel/dma.c: */
+extern int request_dma(unsigned int dmanr, const char * device_id);	/* reserve a DMA channel */
+extern void free_dma(unsigned int dmanr);	/* release it again */
+#define KERNEL_HAVE_CHECK_DMA
+extern int check_dma(unsigned int dmanr);
+
+/* From PCI */
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy 	(0)
+#endif
+
+
+#endif /* _ASM_DMA_H */
diff --git a/include/asm-alpha/elf.h b/include/asm-alpha/elf.h
new file mode 100644
index 0000000..e94a945
--- /dev/null
+++ b/include/asm-alpha/elf.h
@@ -0,0 +1,185 @@
+#ifndef __ASM_ALPHA_ELF_H
+#define __ASM_ALPHA_ELF_H
+
+/* Special values for the st_other field in the symbol table.  */
+
+#define STO_ALPHA_NOPV		0x80
+#define STO_ALPHA_STD_GPLOAD	0x88
+
+/*
+ * Alpha ELF relocation types
+ */
+#define R_ALPHA_NONE            0       /* No reloc */
+#define R_ALPHA_REFLONG         1       /* Direct 32 bit */
+#define R_ALPHA_REFQUAD         2       /* Direct 64 bit */
+#define R_ALPHA_GPREL32         3       /* GP relative 32 bit */
+#define R_ALPHA_LITERAL         4       /* GP relative 16 bit w/optimization */
+#define R_ALPHA_LITUSE          5       /* Optimization hint for LITERAL */
+#define R_ALPHA_GPDISP          6       /* Add displacement to GP */
+#define R_ALPHA_BRADDR          7       /* PC+4 relative 23 bit shifted */
+#define R_ALPHA_HINT            8       /* PC+4 relative 16 bit shifted */
+#define R_ALPHA_SREL16          9       /* PC relative 16 bit */
+#define R_ALPHA_SREL32          10      /* PC relative 32 bit */
+#define R_ALPHA_SREL64          11      /* PC relative 64 bit */
+#define R_ALPHA_GPRELHIGH       17      /* GP relative 32 bit, high 16 bits */
+#define R_ALPHA_GPRELLOW        18      /* GP relative 32 bit, low 16 bits */
+#define R_ALPHA_GPREL16         19      /* GP relative 16 bit */
+#define R_ALPHA_COPY            24      /* Copy symbol at runtime */
+#define R_ALPHA_GLOB_DAT        25      /* Create GOT entry */
+#define R_ALPHA_JMP_SLOT        26      /* Create PLT entry */
+#define R_ALPHA_RELATIVE        27      /* Adjust by program base */
+#define R_ALPHA_BRSGP		28
+#define R_ALPHA_TLSGD           29
+#define R_ALPHA_TLS_LDM         30
+#define R_ALPHA_DTPMOD64        31
+#define R_ALPHA_GOTDTPREL       32
+#define R_ALPHA_DTPREL64        33
+#define R_ALPHA_DTPRELHI        34
+#define R_ALPHA_DTPRELLO        35
+#define R_ALPHA_DTPREL16        36
+#define R_ALPHA_GOTTPREL        37
+#define R_ALPHA_TPREL64         38
+#define R_ALPHA_TPRELHI         39
+#define R_ALPHA_TPRELLO         40
+#define R_ALPHA_TPREL16         41
+
+#define SHF_ALPHA_GPREL		0x10000000
+
+/* Legal values for e_flags field of Elf64_Ehdr.  */
+
+#define EF_ALPHA_32BIT		1	/* All addresses are below 2GB */
+
+/*
+ * ELF register definitions..
+ */
+
+/*
+ * The OSF/1 version of <sys/procfs.h> makes gregset_t 46 entries long.
+ * I have no idea why that is so.  For now, we just leave it at 33
+ * (32 general regs + processor status word). 
+ */
+#define ELF_NGREG	33
+#define ELF_NFPREG	32
+
+typedef unsigned long elf_greg_t;
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef double elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == EM_ALPHA)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS	ELFCLASS64
+#define ELF_DATA	ELFDATA2LSB
+#define ELF_ARCH	EM_ALPHA
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE	8192
+
+/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+   use of this is to invoke "./ld.so someprog" to test out a new version of
+   the loader.  We need to make sure that it is out of the way of the program
+   that it will "exec", and that there is sufficient room for the brk.  */
+
+#define ELF_ET_DYN_BASE		(TASK_UNMAPPED_BASE + 0x1000000)
+
+/* $0 is set by ld.so to a pointer to a function which might be 
+   registered using atexit.  This provides a mean for the dynamic
+   linker to call DT_FINI functions for shared libraries that have
+   been loaded before the code runs.
+
+   So that we can use the same startup file with static executables,
+   we start programs with a value of 0 to indicate that there is no
+   such function.  */
+
+#define ELF_PLAT_INIT(_r, load_addr)	_r->r0 = 0
+
+/* The registers are layed out in pt_regs for PAL and syscall
+   convenience.  Re-order them for the linear elf_gregset_t.  */
+
+struct pt_regs;
+struct thread_info;
+struct task_struct;
+extern void dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt,
+			    struct thread_info *ti);
+#define ELF_CORE_COPY_REGS(DEST, REGS) \
+	dump_elf_thread(DEST, REGS, current_thread_info());
+
+/* Similar, but for a thread other than current.  */
+
+extern int dump_elf_task(elf_greg_t *dest, struct task_struct *task);
+#define ELF_CORE_COPY_TASK_REGS(TASK, DEST) \
+	dump_elf_task(*(DEST), TASK)
+
+/* Similar, but for the FP registers.  */
+
+extern int dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task);
+#define ELF_CORE_COPY_FPREGS(TASK, DEST) \
+	dump_elf_task_fp(*(DEST), TASK)
+
+/* This yields a mask that user programs can use to figure out what
+   instruction set this CPU supports.  This is trivial on Alpha, 
+   but not so on other machines. */
+
+#define ELF_HWCAP  (~amask(-1))
+
+/* This yields a string that ld.so will use to load implementation
+   specific libraries for optimization.  This is more specific in
+   intent than poking at uname or /proc/cpuinfo.  */
+
+#define ELF_PLATFORM				\
+({						\
+	enum implver_enum i_ = implver();	\
+	( i_ == IMPLVER_EV4 ? "ev4"		\
+	: i_ == IMPLVER_EV5			\
+	  ? (amask(AMASK_BWX) ? "ev5" : "ev56")	\
+	: amask (AMASK_CIX) ? "ev6" : "ev67");	\
+})
+
+/* Reserve these numbers for any future use of a VDSO.  */
+#if 0
+#define AT_SYSINFO		32
+#define AT_SYSINFO_EHDR		33
+#endif
+
+/* More complete cache descriptions than AT_[DIU]CACHEBSIZE.  If the
+   value is -1, then the cache doesn't exist.  Otherwise:
+
+      bit 0-3:	  Cache set-associativity; 0 means fully associative.
+      bit 4-7:	  Log2 of cacheline size.
+      bit 8-31:	  Size of the entire cache >> 8.
+      bit 32-63:  Reserved.
+*/
+
+#define AT_L1I_CACHESHAPE	34
+#define AT_L1D_CACHESHAPE	35
+#define AT_L2_CACHESHAPE	36
+#define AT_L3_CACHESHAPE	37
+
+#ifdef __KERNEL__
+
+#define SET_PERSONALITY(EX, IBCS2)				\
+	set_personality(((EX).e_flags & EF_ALPHA_32BIT)		\
+	   ? PER_LINUX_32BIT : (IBCS2) ? PER_SVR4 : PER_LINUX)
+
+extern int alpha_l1i_cacheshape;
+extern int alpha_l1d_cacheshape;
+extern int alpha_l2_cacheshape;
+extern int alpha_l3_cacheshape;
+
+#define ARCH_DLINFO						\
+  do {								\
+    NEW_AUX_ENT(AT_L1I_CACHESHAPE, alpha_l1i_cacheshape);	\
+    NEW_AUX_ENT(AT_L1D_CACHESHAPE, alpha_l1d_cacheshape);	\
+    NEW_AUX_ENT(AT_L2_CACHESHAPE, alpha_l2_cacheshape);		\
+    NEW_AUX_ENT(AT_L3_CACHESHAPE, alpha_l3_cacheshape);		\
+  } while (0)
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_ALPHA_ELF_H */
diff --git a/include/asm-alpha/err_common.h b/include/asm-alpha/err_common.h
new file mode 100644
index 0000000..c250959
--- /dev/null
+++ b/include/asm-alpha/err_common.h
@@ -0,0 +1,118 @@
+/*
+ *	linux/include/asm-alpha/err_common.h
+ *
+ *	Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation)
+ *
+ *	Contains declarations and macros to support Alpha error handling
+ * 	implementations.
+ */
+
+#ifndef __ALPHA_ERR_COMMON_H
+#define __ALPHA_ERR_COMMON_H 1
+
+/*
+ * SCB Vector definitions
+ */
+#define SCB_Q_SYSERR	0x620
+#define SCB_Q_PROCERR	0x630
+#define SCB_Q_SYSMCHK	0x660
+#define SCB_Q_PROCMCHK	0x670
+#define SCB_Q_SYSEVENT	0x680
+
+/*
+ * Disposition definitions for logout frame parser
+ */
+#define MCHK_DISPOSITION_UNKNOWN_ERROR		0x00
+#define MCHK_DISPOSITION_REPORT			0x01
+#define MCHK_DISPOSITION_DISMISS		0x02
+
+/*
+ * Error Log definitions
+ */
+/*
+ * Types
+ */
+
+#define EL_CLASS__TERMINATION		(0)
+#  define EL_TYPE__TERMINATION__TERMINATION		(0)
+#define EL_CLASS__HEADER		(5)
+#  define EL_TYPE__HEADER__SYSTEM_ERROR_FRAME		(1)
+#  define EL_TYPE__HEADER__SYSTEM_EVENT_FRAME		(2)
+#  define EL_TYPE__HEADER__HALT_FRAME			(3)
+#  define EL_TYPE__HEADER__LOGOUT_FRAME			(19)
+#define EL_CLASS__GENERAL_NOTIFICATION	(9)
+#define EL_CLASS__PCI_ERROR_FRAME	(11)
+#define EL_CLASS__REGATTA_FAMILY	(12)
+#  define EL_TYPE__REGATTA__PROCESSOR_ERROR_FRAME	(1)
+#  define EL_TYPE__REGATTA__SYSTEM_ERROR_FRAME		(2)
+#  define EL_TYPE__REGATTA__ENVIRONMENTAL_FRAME		(3)
+#  define EL_TYPE__REGATTA__TITAN_PCHIP0_EXTENDED	(8)
+#  define EL_TYPE__REGATTA__TITAN_PCHIP1_EXTENDED	(9)
+#  define EL_TYPE__REGATTA__TITAN_MEMORY_EXTENDED	(10)
+#  define EL_TYPE__REGATTA__PROCESSOR_DBL_ERROR_HALT	(11)
+#  define EL_TYPE__REGATTA__SYSTEM_DBL_ERROR_HALT	(12)
+#define EL_CLASS__PAL                   (14)
+#  define EL_TYPE__PAL__LOGOUT_FRAME                    (1)
+#  define EL_TYPE__PAL__EV7_PROCESSOR			(4)
+#  define EL_TYPE__PAL__EV7_ZBOX			(5)
+#  define EL_TYPE__PAL__EV7_RBOX			(6)
+#  define EL_TYPE__PAL__EV7_IO				(7)
+#  define EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE	(10)
+#  define EL_TYPE__PAL__ENV__AIRMOVER_FAN		(11)
+#  define EL_TYPE__PAL__ENV__VOLTAGE			(12)
+#  define EL_TYPE__PAL__ENV__INTRUSION			(13)
+#  define EL_TYPE__PAL__ENV__POWER_SUPPLY		(14)
+#  define EL_TYPE__PAL__ENV__LAN			(15)
+#  define EL_TYPE__PAL__ENV__HOT_PLUG			(16)
+
+union el_timestamp {
+	struct {
+		u8 second;
+		u8 minute;
+		u8 hour;
+		u8 day;
+		u8 month;
+		u8 year;
+	} b;
+	u64 as_int;
+};
+
+struct el_subpacket {
+	u16 length;		/* length of header (in bytes)	*/
+	u16 class;		/* header class and type...   	*/
+	u16 type;		/* ...determine content     	*/
+	u16 revision;		/* header revision 		*/
+	union {
+		struct {	/* Class 5, Type 1 - System Error	*/
+			u32 frame_length;
+			u32 frame_packet_count;			
+		} sys_err;			
+		struct {	/* Class 5, Type 2 - System Event 	*/
+			union el_timestamp timestamp;
+			u32 frame_length;
+			u32 frame_packet_count;			
+		} sys_event;
+		struct {	/* Class 5, Type 3 - Double Error Halt	*/
+			u16 halt_code;
+			u16 reserved;
+			union el_timestamp timestamp;
+			u32 frame_length;
+			u32 frame_packet_count;
+		} err_halt;
+		struct {	/* Clasee 5, Type 19 - Logout Frame Header */
+			u32 frame_length;
+			u32 frame_flags;
+			u32 cpu_offset;	
+			u32 system_offset;
+		} logout_header;
+		struct {	/* Class 12 - Regatta			*/
+			u64 cpuid;
+			u64 data_start[1];
+		} regatta_frame;
+		struct {	/* Raw 				        */
+			u64 data_start[1];
+		} raw;
+	} by_type;
+};
+
+#endif /* __ALPHA_ERR_COMMON_H */
diff --git a/include/asm-alpha/err_ev6.h b/include/asm-alpha/err_ev6.h
new file mode 100644
index 0000000..ea63779
--- /dev/null
+++ b/include/asm-alpha/err_ev6.h
@@ -0,0 +1,6 @@
+#ifndef __ALPHA_ERR_EV6_H
+#define __ALPHA_ERR_EV6_H 1
+
+/* Dummy include for now. */
+
+#endif /* __ALPHA_ERR_EV6_H */
diff --git a/include/asm-alpha/err_ev7.h b/include/asm-alpha/err_ev7.h
new file mode 100644
index 0000000..87f9977
--- /dev/null
+++ b/include/asm-alpha/err_ev7.h
@@ -0,0 +1,202 @@
+#ifndef __ALPHA_ERR_EV7_H
+#define __ALPHA_ERR_EV7_H 1
+
+/*
+ * Data for el packet class PAL (14), type LOGOUT_FRAME (1)
+ */
+struct ev7_pal_logout_subpacket {
+	u32 mchk_code;
+	u32 subpacket_count;
+	u64 whami;
+	u64 rbox_whami;
+	u64 rbox_int;
+	u64 exc_addr;
+	union el_timestamp timestamp;
+	u64 halt_code;
+	u64 reserved;
+};
+
+/*
+ * Data for el packet class PAL (14), type EV7_PROCESSOR (4)
+ */
+struct ev7_pal_processor_subpacket {
+	u64 i_stat;
+	u64 dc_stat;
+	u64 c_addr;
+	u64 c_syndrome_1;
+	u64 c_syndrome_0;
+	u64 c_stat;
+	u64 c_sts;
+	u64 mm_stat;
+	u64 exc_addr;
+	u64 ier_cm;
+	u64 isum;
+	u64 pal_base;
+	u64 i_ctl;
+	u64 process_context;
+	u64 cbox_ctl;
+	u64 cbox_stp_ctl;
+	u64 cbox_acc_ctl;
+	u64 cbox_lcl_set;
+	u64 cbox_gbl_set;
+	u64 bbox_ctl;
+	u64 bbox_err_sts;
+	u64 bbox_err_idx;
+	u64 cbox_ddp_err_sts;
+	u64 bbox_dat_rmp;
+	u64 reserved[2];
+};
+
+/*
+ * Data for el packet class PAL (14), type EV7_ZBOX (5)
+ */
+struct ev7_pal_zbox_subpacket {
+	u32 zbox0_dram_err_status_1;
+	u32 zbox0_dram_err_status_2;
+	u32 zbox0_dram_err_status_3;
+	u32 zbox0_dram_err_ctl;
+	u32 zbox0_dram_err_adr;
+	u32 zbox0_dift_timeout;
+	u32 zbox0_dram_mapper_ctl;
+	u32 zbox0_frc_err_adr;
+	u32 zbox0_dift_err_status;
+	u32 reserved1;
+	u32 zbox1_dram_err_status_1;
+	u32 zbox1_dram_err_status_2;
+	u32 zbox1_dram_err_status_3;
+	u32 zbox1_dram_err_ctl;
+	u32 zbox1_dram_err_adr;
+	u32 zbox1_dift_timeout;
+	u32 zbox1_dram_mapper_ctl;
+	u32 zbox1_frc_err_adr;
+	u32 zbox1_dift_err_status;
+	u32 reserved2;
+	u64 cbox_ctl;
+	u64 cbox_stp_ctl;
+	u64 zbox0_error_pa;
+	u64 zbox1_error_pa;
+	u64 zbox0_ored_syndrome;
+	u64 zbox1_ored_syndrome;
+	u64 reserved3[2];
+};
+
+/*
+ * Data for el packet class PAL (14), type EV7_RBOX (6)
+ */
+struct ev7_pal_rbox_subpacket {
+	u64 rbox_cfg;
+	u64 rbox_n_cfg;
+	u64 rbox_s_cfg;
+	u64 rbox_e_cfg;
+	u64 rbox_w_cfg;
+	u64 rbox_n_err;
+	u64 rbox_s_err;
+	u64 rbox_e_err;
+	u64 rbox_w_err;
+	u64 rbox_io_cfg;
+	u64 rbox_io_err;
+	u64 rbox_l_err;
+	u64 rbox_whoami;
+	u64 rbox_imask;
+	u64 rbox_intq;
+	u64 rbox_int;
+	u64 reserved[2];
+};
+
+/*
+ * Data for el packet class PAL (14), type EV7_IO (7)
+ */
+struct ev7_pal_io_one_port {
+	u64 pox_err_sum;
+	u64 pox_tlb_err;
+	u64 pox_spl_cmplt;
+	u64 pox_trans_sum;
+	u64 pox_first_err;
+	u64 pox_mult_err;
+	u64 pox_dm_source;
+	u64 pox_dm_dest;
+	u64 pox_dm_size;
+	u64 pox_dm_ctrl;
+	u64 reserved;
+};
+
+struct ev7_pal_io_subpacket {
+	u64 io_asic_rev;
+	u64 io_sys_rev;
+	u64 io7_uph;
+	u64 hpi_ctl;
+	u64 crd_ctl;
+	u64 hei_ctl;
+	u64 po7_error_sum;
+	u64 po7_uncrr_sym;
+	u64 po7_crrct_sym;
+	u64 po7_ugbge_sym;
+	u64 po7_err_pkt0;
+	u64 po7_err_pkt1;
+	u64 reserved[2];
+	struct ev7_pal_io_one_port ports[4];
+};
+
+/*
+ * Environmental subpacket. Data used for el packets:
+ * 	   class PAL (14), type AMBIENT_TEMPERATURE (10)
+ * 	   class PAL (14), type AIRMOVER_FAN (11)
+ * 	   class PAL (14), type VOLTAGE (12)
+ * 	   class PAL (14), type INTRUSION (13)
+ *	   class PAL (14), type POWER_SUPPLY (14)
+ *	   class PAL (14), type LAN (15)
+ *	   class PAL (14), type HOT_PLUG (16)
+ */
+struct ev7_pal_environmental_subpacket {
+	u16 cabinet;
+	u16 drawer;
+	u16 reserved1[2];
+	u8 module_type;
+	u8 unit_id;		/* unit reporting condition */
+	u8 reserved2;
+	u8 condition;		/* condition reported       */
+};
+
+/*
+ * Convert environmental type to index
+ */
+static inline int ev7_lf_env_index(int type)
+{
+	BUG_ON((type < EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE) 
+	       || (type > EL_TYPE__PAL__ENV__HOT_PLUG));
+
+	return type - EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE;
+}
+
+/*
+ * Data for generic el packet class PAL.
+ */
+struct ev7_pal_subpacket {
+	union {
+		struct ev7_pal_logout_subpacket logout;	     /* Type     1 */
+		struct ev7_pal_processor_subpacket ev7;	     /* Type     4 */
+		struct ev7_pal_zbox_subpacket zbox;	     /* Type     5 */
+		struct ev7_pal_rbox_subpacket rbox;	     /* Type     6 */
+		struct ev7_pal_io_subpacket io;		     /* Type     7 */
+		struct ev7_pal_environmental_subpacket env;  /* Type 10-16 */
+		u64 as_quad[1];				     /* Raw u64    */
+	} by_type;
+};
+
+/*
+ * Struct to contain collected logout from subpackets.
+ */
+struct ev7_lf_subpackets {
+	struct ev7_pal_logout_subpacket *logout;		/* Type  1 */
+	struct ev7_pal_processor_subpacket *ev7;		/* Type  4 */
+	struct ev7_pal_zbox_subpacket *zbox;			/* Type  5 */
+	struct ev7_pal_rbox_subpacket *rbox;			/* Type  6 */
+	struct ev7_pal_io_subpacket *io;			/* Type  7 */
+	struct ev7_pal_environmental_subpacket *env[7];	     /* Type 10-16 */
+
+	unsigned int io_pid;
+};
+
+#endif /* __ALPHA_ERR_EV7_H */
+
+
diff --git a/include/asm-alpha/errno.h b/include/asm-alpha/errno.h
new file mode 100644
index 0000000..c85ab6b
--- /dev/null
+++ b/include/asm-alpha/errno.h
@@ -0,0 +1,119 @@
+#ifndef _ALPHA_ERRNO_H
+#define _ALPHA_ERRNO_H
+
+#include <asm-generic/errno-base.h>
+
+#undef	EAGAIN			/* 11 in errno-base.h */
+
+#define	EDEADLK		11	/* Resource deadlock would occur */
+
+#define	EAGAIN		35	/* Try again */
+#define	EWOULDBLOCK	EAGAIN	/* Operation would block */
+#define	EINPROGRESS	36	/* Operation now in progress */
+#define	EALREADY	37	/* Operation already in progress */
+#define	ENOTSOCK	38	/* Socket operation on non-socket */
+#define	EDESTADDRREQ	39	/* Destination address required */
+#define	EMSGSIZE	40	/* Message too long */
+#define	EPROTOTYPE	41	/* Protocol wrong type for socket */
+#define	ENOPROTOOPT	42	/* Protocol not available */
+#define	EPROTONOSUPPORT	43	/* Protocol not supported */
+#define	ESOCKTNOSUPPORT	44	/* Socket type not supported */
+#define	EOPNOTSUPP	45	/* Operation not supported on transport endpoint */
+#define	EPFNOSUPPORT	46	/* Protocol family not supported */
+#define	EAFNOSUPPORT	47	/* Address family not supported by protocol */
+#define	EADDRINUSE	48	/* Address already in use */
+#define	EADDRNOTAVAIL	49	/* Cannot assign requested address */
+#define	ENETDOWN	50	/* Network is down */
+#define	ENETUNREACH	51	/* Network is unreachable */
+#define	ENETRESET	52	/* Network dropped connection because of reset */
+#define	ECONNABORTED	53	/* Software caused connection abort */
+#define	ECONNRESET	54	/* Connection reset by peer */
+#define	ENOBUFS		55	/* No buffer space available */
+#define	EISCONN		56	/* Transport endpoint is already connected */
+#define	ENOTCONN	57	/* Transport endpoint is not connected */
+#define	ESHUTDOWN	58	/* Cannot send after transport endpoint shutdown */
+#define	ETOOMANYREFS	59	/* Too many references: cannot splice */
+#define	ETIMEDOUT	60	/* Connection timed out */
+#define	ECONNREFUSED	61	/* Connection refused */
+#define	ELOOP		62	/* Too many symbolic links encountered */
+#define	ENAMETOOLONG	63	/* File name too long */
+#define	EHOSTDOWN	64	/* Host is down */
+#define	EHOSTUNREACH	65	/* No route to host */
+#define	ENOTEMPTY	66	/* Directory not empty */
+
+#define	EUSERS		68	/* Too many users */
+#define	EDQUOT		69	/* Quota exceeded */
+#define	ESTALE		70	/* Stale NFS file handle */
+#define	EREMOTE		71	/* Object is remote */
+
+#define	ENOLCK		77	/* No record locks available */
+#define	ENOSYS		78	/* Function not implemented */
+
+#define	ENOMSG		80	/* No message of desired type */
+#define	EIDRM		81	/* Identifier removed */
+#define	ENOSR		82	/* Out of streams resources */
+#define	ETIME		83	/* Timer expired */
+#define	EBADMSG		84	/* Not a data message */
+#define	EPROTO		85	/* Protocol error */
+#define	ENODATA		86	/* No data available */
+#define	ENOSTR		87	/* Device not a stream */
+
+#define	ENOPKG		92	/* Package not installed */
+
+#define	EILSEQ		116	/* Illegal byte sequence */
+
+/* The following are just random noise.. */
+#define	ECHRNG		88	/* Channel number out of range */
+#define	EL2NSYNC	89	/* Level 2 not synchronized */
+#define	EL3HLT		90	/* Level 3 halted */
+#define	EL3RST		91	/* Level 3 reset */
+
+#define	ELNRNG		93	/* Link number out of range */
+#define	EUNATCH		94	/* Protocol driver not attached */
+#define	ENOCSI		95	/* No CSI structure available */
+#define	EL2HLT		96	/* Level 2 halted */
+#define	EBADE		97	/* Invalid exchange */
+#define	EBADR		98	/* Invalid request descriptor */
+#define	EXFULL		99	/* Exchange full */
+#define	ENOANO		100	/* No anode */
+#define	EBADRQC		101	/* Invalid request code */
+#define	EBADSLT		102	/* Invalid slot */
+
+#define	EDEADLOCK	EDEADLK
+
+#define	EBFONT		104	/* Bad font file format */
+#define	ENONET		105	/* Machine is not on the network */
+#define	ENOLINK		106	/* Link has been severed */
+#define	EADV		107	/* Advertise error */
+#define	ESRMNT		108	/* Srmount error */
+#define	ECOMM		109	/* Communication error on send */
+#define	EMULTIHOP	110	/* Multihop attempted */
+#define	EDOTDOT		111	/* RFS specific error */
+#define	EOVERFLOW	112	/* Value too large for defined data type */
+#define	ENOTUNIQ	113	/* Name not unique on network */
+#define	EBADFD		114	/* File descriptor in bad state */
+#define	EREMCHG		115	/* Remote address changed */
+
+#define	EUCLEAN		117	/* Structure needs cleaning */
+#define	ENOTNAM		118	/* Not a XENIX named type file */
+#define	ENAVAIL		119	/* No XENIX semaphores available */
+#define	EISNAM		120	/* Is a named type file */
+#define	EREMOTEIO	121	/* Remote I/O error */
+
+#define	ELIBACC		122	/* Can not access a needed shared library */
+#define	ELIBBAD		123	/* Accessing a corrupted shared library */
+#define	ELIBSCN		124	/* .lib section in a.out corrupted */
+#define	ELIBMAX		125	/* Attempting to link in too many shared libraries */
+#define	ELIBEXEC	126	/* Cannot exec a shared library directly */
+#define	ERESTART	127	/* Interrupted system call should be restarted */
+#define	ESTRPIPE	128	/* Streams pipe error */
+
+#define ENOMEDIUM	129	/* No medium found */
+#define EMEDIUMTYPE	130	/* Wrong medium type */
+#define	ECANCELED	131	/* Operation Cancelled */
+#define	ENOKEY		132	/* Required key not available */
+#define	EKEYEXPIRED	133	/* Key has expired */
+#define	EKEYREVOKED	134	/* Key has been revoked */
+#define	EKEYREJECTED	135	/* Key was rejected by service */
+
+#endif
diff --git a/include/asm-alpha/fcntl.h b/include/asm-alpha/fcntl.h
new file mode 100644
index 0000000..6b7d6c1
--- /dev/null
+++ b/include/asm-alpha/fcntl.h
@@ -0,0 +1,75 @@
+#ifndef _ALPHA_FCNTL_H
+#define _ALPHA_FCNTL_H
+
+/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
+   located on an ext2 file system */
+#define O_ACCMODE	  0003
+#define O_RDONLY	    00
+#define O_WRONLY	    01
+#define O_RDWR		    02
+#define O_CREAT		 01000	/* not fcntl */
+#define O_TRUNC		 02000	/* not fcntl */
+#define O_EXCL		 04000	/* not fcntl */
+#define O_NOCTTY	010000	/* not fcntl */
+
+#define O_NONBLOCK	 00004
+#define O_APPEND	 00010
+#define O_NDELAY	O_NONBLOCK
+#define O_SYNC		040000
+#define FASYNC		020000	/* fcntl, for BSD compatibility */
+#define O_DIRECTORY	0100000	/* must be a directory */
+#define O_NOFOLLOW	0200000 /* don't follow links */
+#define O_LARGEFILE	0400000 /* will be set by the kernel on every open */
+#define O_DIRECT	02000000 /* direct disk access - should check with OSF/1 */
+#define O_NOATIME	04000000
+
+#define F_DUPFD		0	/* dup */
+#define F_GETFD		1	/* get close_on_exec */
+#define F_SETFD		2	/* set/clear close_on_exec */
+#define F_GETFL		3	/* get file->f_flags */
+#define F_SETFL		4       /* set file->f_flags */
+#define F_GETLK		7
+#define F_SETLK		8
+#define F_SETLKW	9
+
+#define F_SETOWN	5	/*  for sockets. */
+#define F_GETOWN	6	/*  for sockets. */
+#define F_SETSIG	10	/*  for sockets. */
+#define F_GETSIG	11	/*  for sockets. */
+
+/* for F_[GET|SET]FL */
+#define FD_CLOEXEC	1	/* actually anything with low bit set goes */
+
+/* for posix fcntl() and lockf() */
+#define F_RDLCK		1
+#define F_WRLCK		2
+#define F_UNLCK		8
+
+/* for old implementation of bsd flock () */
+#define F_EXLCK		16	/* or 3 */
+#define F_SHLCK		32	/* or 4 */
+
+#define F_INPROGRESS	64
+
+/* operations for bsd flock(), also used by the kernel implementation */
+#define LOCK_SH		1	/* shared lock */
+#define LOCK_EX		2	/* exclusive lock */
+#define LOCK_NB		4	/* or'd with one of the above to prevent
+				   blocking */
+#define LOCK_UN		8	/* remove lock */
+#define LOCK_MAND      32      /* This is a mandatory flock */
+#define LOCK_READ      64      /* ... Which allows concurrent read operations */
+#define LOCK_WRITE     128     /* ... Which allows concurrent write operations */
+#define LOCK_RW        192     /* ... Which allows concurrent read & write ops */
+ 
+struct flock {
+	short l_type;
+	short l_whence;
+	__kernel_off_t l_start;
+	__kernel_off_t l_len;
+	__kernel_pid_t l_pid;
+};
+
+#define F_LINUX_SPECIFIC_BASE  1024
+
+#endif
diff --git a/include/asm-alpha/floppy.h b/include/asm-alpha/floppy.h
new file mode 100644
index 0000000..289a00d
--- /dev/null
+++ b/include/asm-alpha/floppy.h
@@ -0,0 +1,119 @@
+/*
+ * Architecture specific parts of the Floppy driver
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995
+ */
+#ifndef __ASM_ALPHA_FLOPPY_H
+#define __ASM_ALPHA_FLOPPY_H
+
+#include <linux/config.h>
+
+#define fd_inb(port)			inb_p(port)
+#define fd_outb(value,port)		outb_p(value,port)
+
+#define fd_enable_dma()         enable_dma(FLOPPY_DMA)
+#define fd_disable_dma()        disable_dma(FLOPPY_DMA)
+#define fd_request_dma()        request_dma(FLOPPY_DMA,"floppy")
+#define fd_free_dma()           free_dma(FLOPPY_DMA)
+#define fd_clear_dma_ff()       clear_dma_ff(FLOPPY_DMA)
+#define fd_set_dma_mode(mode)   set_dma_mode(FLOPPY_DMA,mode)
+#define fd_set_dma_addr(addr)   set_dma_addr(FLOPPY_DMA,virt_to_bus(addr))
+#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count)
+#define fd_enable_irq()         enable_irq(FLOPPY_IRQ)
+#define fd_disable_irq()        disable_irq(FLOPPY_IRQ)
+#define fd_cacheflush(addr,size) /* nothing */
+#define fd_request_irq()        request_irq(FLOPPY_IRQ, floppy_interrupt, \
+					    SA_INTERRUPT|SA_SAMPLE_RANDOM, \
+				            "floppy", NULL)
+#define fd_free_irq()           free_irq(FLOPPY_IRQ, NULL);
+
+#ifdef CONFIG_PCI
+
+#include <linux/pci.h>
+
+#define fd_dma_setup(addr,size,mode,io) alpha_fd_dma_setup(addr,size,mode,io)
+
+static __inline__ int 
+alpha_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+	static unsigned long prev_size;
+	static dma_addr_t bus_addr = 0;
+	static char *prev_addr;
+	static int prev_dir;
+	int dir;
+
+	dir = (mode != DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE;
+
+	if (bus_addr 
+	    && (addr != prev_addr || size != prev_size || dir != prev_dir)) {
+		/* different from last time -- unmap prev */
+		pci_unmap_single(isa_bridge, bus_addr, prev_size, prev_dir);
+		bus_addr = 0;
+	}
+
+	if (!bus_addr)	/* need to map it */
+		bus_addr = pci_map_single(isa_bridge, addr, size, dir);
+
+	/* remember this one as prev */
+	prev_addr = addr;
+	prev_size = size;
+	prev_dir = dir;
+
+	fd_clear_dma_ff();
+	fd_cacheflush(addr, size);
+	fd_set_dma_mode(mode);
+	set_dma_addr(FLOPPY_DMA, bus_addr);
+	fd_set_dma_count(size);
+	virtual_dma_port = io;
+	fd_enable_dma();
+
+	return 0;
+}
+
+#endif /* CONFIG_PCI */
+
+__inline__ void virtual_dma_init(void)
+{
+	/* Nothing to do on an Alpha */
+}
+
+static int FDC1 = 0x3f0;
+static int FDC2 = -1;
+
+/*
+ * Again, the CMOS information doesn't work on the alpha..
+ */
+#define FLOPPY0_TYPE 6
+#define FLOPPY1_TYPE 0
+
+#define N_FDC 2
+#define N_DRIVE 8
+
+#define FLOPPY_MOTOR_MASK 0xf0
+
+/*
+ * Most Alphas have no problems with floppy DMA crossing 64k borders,
+ * except for certain ones, like XL and RUFFIAN.
+ *
+ * However, the test is simple and fast, and this *is* floppy, after all,
+ * so we do it for all platforms, just to make sure.
+ *
+ * This is advantageous in other circumstances as well, as in moving
+ * about the PCI DMA windows and forcing the floppy to start doing
+ * scatter-gather when it never had before, and there *is* a problem
+ * on that platform... ;-}
+ */
+
+static inline unsigned long CROSS_64KB(void *a, unsigned long s)
+{
+	unsigned long p = (unsigned long)a;
+	return ((p + s - 1) ^ p) & ~0xffffUL;
+}
+
+#define EXTRA_FLOPPY_PARAMS
+
+#endif /* __ASM_ALPHA_FLOPPY_H */
diff --git a/include/asm-alpha/fpu.h b/include/asm-alpha/fpu.h
new file mode 100644
index 0000000..c203fc2
--- /dev/null
+++ b/include/asm-alpha/fpu.h
@@ -0,0 +1,193 @@
+#ifndef __ASM_ALPHA_FPU_H
+#define __ASM_ALPHA_FPU_H
+
+/*
+ * Alpha floating-point control register defines:
+ */
+#define FPCR_DNOD	(1UL<<47)	/* denorm INV trap disable */
+#define FPCR_DNZ	(1UL<<48)	/* denorms to zero */
+#define FPCR_INVD	(1UL<<49)	/* invalid op disable (opt.) */
+#define FPCR_DZED	(1UL<<50)	/* division by zero disable (opt.) */
+#define FPCR_OVFD	(1UL<<51)	/* overflow disable (optional) */
+#define FPCR_INV	(1UL<<52)	/* invalid operation */
+#define FPCR_DZE	(1UL<<53)	/* division by zero */
+#define FPCR_OVF	(1UL<<54)	/* overflow */
+#define FPCR_UNF	(1UL<<55)	/* underflow */
+#define FPCR_INE	(1UL<<56)	/* inexact */
+#define FPCR_IOV	(1UL<<57)	/* integer overflow */
+#define FPCR_UNDZ	(1UL<<60)	/* underflow to zero (opt.) */
+#define FPCR_UNFD	(1UL<<61)	/* underflow disable (opt.) */
+#define FPCR_INED	(1UL<<62)	/* inexact disable (opt.) */
+#define FPCR_SUM	(1UL<<63)	/* summary bit */
+
+#define FPCR_DYN_SHIFT	58		/* first dynamic rounding mode bit */
+#define FPCR_DYN_CHOPPED (0x0UL << FPCR_DYN_SHIFT)	/* towards 0 */
+#define FPCR_DYN_MINUS	 (0x1UL << FPCR_DYN_SHIFT)	/* towards -INF */
+#define FPCR_DYN_NORMAL	 (0x2UL << FPCR_DYN_SHIFT)	/* towards nearest */
+#define FPCR_DYN_PLUS	 (0x3UL << FPCR_DYN_SHIFT)	/* towards +INF */
+#define FPCR_DYN_MASK	 (0x3UL << FPCR_DYN_SHIFT)
+
+#define FPCR_MASK	0xffff800000000000L
+
+/*
+ * IEEE trap enables are implemented in software.  These per-thread
+ * bits are stored in the "ieee_state" field of "struct thread_info".
+ * Thus, the bits are defined so as not to conflict with the
+ * floating-point enable bit (which is architected).  On top of that,
+ * we want to make these bits compatible with OSF/1 so
+ * ieee_set_fp_control() etc. can be implemented easily and
+ * compatibly.  The corresponding definitions are in
+ * /usr/include/machine/fpu.h under OSF/1.
+ */
+#define IEEE_TRAP_ENABLE_INV	(1UL<<1)	/* invalid op */
+#define IEEE_TRAP_ENABLE_DZE	(1UL<<2)	/* division by zero */
+#define IEEE_TRAP_ENABLE_OVF	(1UL<<3)	/* overflow */
+#define IEEE_TRAP_ENABLE_UNF	(1UL<<4)	/* underflow */
+#define IEEE_TRAP_ENABLE_INE	(1UL<<5)	/* inexact */
+#define IEEE_TRAP_ENABLE_DNO	(1UL<<6)	/* denorm */
+#define IEEE_TRAP_ENABLE_MASK	(IEEE_TRAP_ENABLE_INV | IEEE_TRAP_ENABLE_DZE |\
+				 IEEE_TRAP_ENABLE_OVF | IEEE_TRAP_ENABLE_UNF |\
+				 IEEE_TRAP_ENABLE_INE | IEEE_TRAP_ENABLE_DNO)
+
+/* Denorm and Underflow flushing */
+#define IEEE_MAP_DMZ		(1UL<<12)	/* Map denorm inputs to zero */
+#define IEEE_MAP_UMZ		(1UL<<13)	/* Map underflowed outputs to zero */
+
+#define IEEE_MAP_MASK		(IEEE_MAP_DMZ | IEEE_MAP_UMZ)
+
+/* status bits coming from fpcr: */
+#define IEEE_STATUS_INV		(1UL<<17)
+#define IEEE_STATUS_DZE		(1UL<<18)
+#define IEEE_STATUS_OVF		(1UL<<19)
+#define IEEE_STATUS_UNF		(1UL<<20)
+#define IEEE_STATUS_INE		(1UL<<21)
+#define IEEE_STATUS_DNO		(1UL<<22)
+
+#define IEEE_STATUS_MASK	(IEEE_STATUS_INV | IEEE_STATUS_DZE |	\
+				 IEEE_STATUS_OVF | IEEE_STATUS_UNF |	\
+				 IEEE_STATUS_INE | IEEE_STATUS_DNO)
+
+#define IEEE_SW_MASK		(IEEE_TRAP_ENABLE_MASK |		\
+				 IEEE_STATUS_MASK | IEEE_MAP_MASK)
+
+#define IEEE_CURRENT_RM_SHIFT	32
+#define IEEE_CURRENT_RM_MASK	(3UL<<IEEE_CURRENT_RM_SHIFT)
+
+#define IEEE_STATUS_TO_EXCSUM_SHIFT	16
+
+#define IEEE_INHERIT    (1UL<<63)	/* inherit on thread create? */
+
+/*
+ * Convert the software IEEE trap enable and status bits into the
+ * hardware fpcr format. 
+ *
+ * Digital Unix engineers receive my thanks for not defining the
+ * software bits identical to the hardware bits.  The chip designers
+ * receive my thanks for making all the not-implemented fpcr bits
+ * RAZ forcing us to use system calls to read/write this value.
+ */
+
+static inline unsigned long
+ieee_swcr_to_fpcr(unsigned long sw)
+{
+	unsigned long fp;
+	fp = (sw & IEEE_STATUS_MASK) << 35;
+	fp |= (sw & IEEE_MAP_DMZ) << 36;
+	fp |= (sw & IEEE_STATUS_MASK ? FPCR_SUM : 0);
+	fp |= (~sw & (IEEE_TRAP_ENABLE_INV
+		      | IEEE_TRAP_ENABLE_DZE
+		      | IEEE_TRAP_ENABLE_OVF)) << 48;
+	fp |= (~sw & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE)) << 57;
+	fp |= (sw & IEEE_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
+	fp |= (~sw & IEEE_TRAP_ENABLE_DNO) << 41;
+	return fp;
+}
+
+static inline unsigned long
+ieee_fpcr_to_swcr(unsigned long fp)
+{
+	unsigned long sw;
+	sw = (fp >> 35) & IEEE_STATUS_MASK;
+	sw |= (fp >> 36) & IEEE_MAP_DMZ;
+	sw |= (~fp >> 48) & (IEEE_TRAP_ENABLE_INV
+			     | IEEE_TRAP_ENABLE_DZE
+			     | IEEE_TRAP_ENABLE_OVF);
+	sw |= (~fp >> 57) & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE);
+	sw |= (fp >> 47) & IEEE_MAP_UMZ;
+	sw |= (~fp >> 41) & IEEE_TRAP_ENABLE_DNO;
+	return sw;
+}
+
+#ifdef __KERNEL__
+
+/* The following two functions don't need trapb/excb instructions
+   around the mf_fpcr/mt_fpcr instructions because (a) the kernel
+   never generates arithmetic faults and (b) call_pal instructions
+   are implied trap barriers.  */
+
+static inline unsigned long
+rdfpcr(void)
+{
+	unsigned long tmp, ret;
+
+#if defined(__alpha_cix__) || defined(__alpha_fix__)
+	__asm__ __volatile__ (
+		"ftoit $f0,%0\n\t"
+		"mf_fpcr $f0\n\t"
+		"ftoit $f0,%1\n\t"
+		"itoft %0,$f0"
+		: "=r"(tmp), "=r"(ret));
+#else
+	__asm__ __volatile__ (
+		"stt $f0,%0\n\t"
+		"mf_fpcr $f0\n\t"
+		"stt $f0,%1\n\t"
+		"ldt $f0,%0"
+		: "=m"(tmp), "=m"(ret));
+#endif
+
+	return ret;
+}
+
+static inline void
+wrfpcr(unsigned long val)
+{
+	unsigned long tmp;
+
+#if defined(__alpha_cix__) || defined(__alpha_fix__)
+	__asm__ __volatile__ (
+		"ftoit $f0,%0\n\t"
+		"itoft %1,$f0\n\t"
+		"mt_fpcr $f0\n\t"
+		"itoft %0,$f0"
+		: "=&r"(tmp) : "r"(val));
+#else
+	__asm__ __volatile__ (
+		"stt $f0,%0\n\t"
+		"ldt $f0,%1\n\t"
+		"mt_fpcr $f0\n\t"
+		"ldt $f0,%0"
+		: "=m"(tmp) : "m"(val));
+#endif
+}
+
+static inline unsigned long
+swcr_update_status(unsigned long swcr, unsigned long fpcr)
+{
+	/* EV6 implements most of the bits in hardware.  Collect
+	   the acrued exception bits from the real fpcr.  */
+	if (implver() == IMPLVER_EV6) {
+		swcr &= ~IEEE_STATUS_MASK;
+		swcr |= (fpcr >> 35) & IEEE_STATUS_MASK;
+	}
+	return swcr;
+}
+
+extern unsigned long alpha_read_fp_reg (unsigned long reg);
+extern void alpha_write_fp_reg (unsigned long reg, unsigned long val);
+extern unsigned long alpha_read_fp_reg_s (unsigned long reg);
+extern void alpha_write_fp_reg_s (unsigned long reg, unsigned long val);
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_ALPHA_FPU_H */
diff --git a/include/asm-alpha/gct.h b/include/asm-alpha/gct.h
new file mode 100644
index 0000000..3504c70
--- /dev/null
+++ b/include/asm-alpha/gct.h
@@ -0,0 +1,58 @@
+#ifndef __ALPHA_GCT_H
+#define __ALPHA_GCT_H
+
+typedef u64 gct_id;
+typedef u64 gct6_handle;
+
+typedef struct __gct6_node {
+	u8 type;	
+	u8 subtype;
+	u16 size;
+	u32 hd_extension;
+	gct6_handle owner;
+	gct6_handle active_user;
+	gct_id id;
+	u64 flags;
+	u16 rev;
+	u16 change_counter;
+	u16 max_child;
+	u16 reserved1;
+	gct6_handle saved_owner;
+	gct6_handle affinity;
+	gct6_handle parent;
+	gct6_handle next;
+	gct6_handle prev;
+	gct6_handle child;
+	u64 fw_flags;
+	u64 os_usage;
+	u64 fru_id;
+	u32 checksum;
+	u32 magic;	/* 'GLXY' */
+} gct6_node;
+
+typedef struct {
+	u8 type;	
+	u8 subtype;
+	void (*callout)(gct6_node *);
+} gct6_search_struct;
+
+#define GCT_NODE_MAGIC	  0x59584c47	/* 'GLXY' */
+
+/* 
+ * node types 
+ */
+#define GCT_TYPE_HOSE			0x0E
+
+/*
+ * node subtypes
+ */
+#define GCT_SUBTYPE_IO_PORT_MODULE	0x2C
+
+#define GCT_NODE_PTR(off) ((gct6_node *)((char *)hwrpb + 		\
+					 hwrpb->frut_offset + 		\
+					 (gct6_handle)(off)))		\
+
+int gct6_find_nodes(gct6_node *, gct6_search_struct *);
+
+#endif /* __ALPHA_GCT_H */
+
diff --git a/include/asm-alpha/gentrap.h b/include/asm-alpha/gentrap.h
new file mode 100644
index 0000000..ae50cc3
--- /dev/null
+++ b/include/asm-alpha/gentrap.h
@@ -0,0 +1,37 @@
+#ifndef _ASMAXP_GENTRAP_H
+#define _ASMAXP_GENTRAP_H
+
+/*
+ * Definitions for gentrap causes.  They are generated by user-level
+ * programs and therefore should be compatible with the corresponding
+ * OSF/1 definitions.
+ */
+#define GEN_INTOVF	-1	/* integer overflow */
+#define GEN_INTDIV	-2	/* integer division by zero */
+#define GEN_FLTOVF	-3	/* fp overflow */
+#define GEN_FLTDIV	-4	/* fp division by zero */
+#define GEN_FLTUND	-5	/* fp underflow */
+#define GEN_FLTINV	-6	/* invalid fp operand */
+#define GEN_FLTINE	-7	/* inexact fp operand */
+#define GEN_DECOVF	-8	/* decimal overflow (for COBOL??) */
+#define GEN_DECDIV	-9	/* decimal division by zero */
+#define GEN_DECINV	-10	/* invalid decimal operand */
+#define GEN_ROPRAND	-11	/* reserved operand */
+#define GEN_ASSERTERR	-12	/* assertion error */
+#define GEN_NULPTRERR	-13	/* null pointer error */
+#define GEN_STKOVF	-14	/* stack overflow */
+#define GEN_STRLENERR	-15	/* string length error */
+#define GEN_SUBSTRERR	-16	/* substring error */
+#define GEN_RANGERR	-17	/* range error */
+#define GEN_SUBRNG	-18
+#define GEN_SUBRNG1	-19	 
+#define GEN_SUBRNG2	-20
+#define GEN_SUBRNG3	-21	/* these report range errors for */
+#define GEN_SUBRNG4	-22	/* subscripting (indexing) at levels 0..7 */
+#define GEN_SUBRNG5	-23
+#define GEN_SUBRNG6	-24
+#define GEN_SUBRNG7	-25
+
+/* the remaining codes (-26..-1023) are reserved. */
+
+#endif /* _ASMAXP_GENTRAP_H */
diff --git a/include/asm-alpha/hardirq.h b/include/asm-alpha/hardirq.h
new file mode 100644
index 0000000..c0593f9
--- /dev/null
+++ b/include/asm-alpha/hardirq.h
@@ -0,0 +1,29 @@
+#ifndef _ALPHA_HARDIRQ_H
+#define _ALPHA_HARDIRQ_H
+
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/cache.h>
+
+
+/* entry.S is sensitive to the offsets of these fields */
+typedef struct {
+	unsigned long __softirq_pending;
+} ____cacheline_aligned irq_cpustat_t;
+
+#include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */
+
+#define HARDIRQ_BITS	12
+
+/*
+ * The hardirq mask has to be large enough to have
+ * space for potentially nestable IRQ sources in the system
+ * to nest on a single CPU. On Alpha, interrupts are masked at the CPU
+ * by IPL as well as at the system level. We only have 8 IPLs (UNIX PALcode)
+ * so we really only have 8 nestable IRQs, but allow some overhead
+ */
+#if (1 << HARDIRQ_BITS) < 16
+#error HARDIRQ_BITS is too low!
+#endif
+
+#endif /* _ALPHA_HARDIRQ_H */
diff --git a/include/asm-alpha/hdreg.h b/include/asm-alpha/hdreg.h
new file mode 100644
index 0000000..7f7fd1a
--- /dev/null
+++ b/include/asm-alpha/hdreg.h
@@ -0,0 +1 @@
+#include <asm-generic/hdreg.h>
diff --git a/include/asm-alpha/hw_irq.h b/include/asm-alpha/hw_irq.h
new file mode 100644
index 0000000..a310b9e
--- /dev/null
+++ b/include/asm-alpha/hw_irq.h
@@ -0,0 +1,16 @@
+#ifndef _ALPHA_HW_IRQ_H
+#define _ALPHA_HW_IRQ_H
+
+#include <linux/config.h>
+
+static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
+
+extern volatile unsigned long irq_err_count;
+
+#ifdef CONFIG_ALPHA_GENERIC
+#define ACTUAL_NR_IRQS	alpha_mv.nr_irqs
+#else
+#define ACTUAL_NR_IRQS	NR_IRQS
+#endif
+
+#endif
diff --git a/include/asm-alpha/hwrpb.h b/include/asm-alpha/hwrpb.h
new file mode 100644
index 0000000..8e8f871a
--- /dev/null
+++ b/include/asm-alpha/hwrpb.h
@@ -0,0 +1,220 @@
+#ifndef __ALPHA_HWRPB_H
+#define __ALPHA_HWRPB_H
+
+#define INIT_HWRPB ((struct hwrpb_struct *) 0x10000000)
+
+/*
+ * DEC processor types for Alpha systems.  Found in HWRPB.
+ * These values are architected.
+ */
+
+#define EV3_CPU                 1       /* EV3                  */
+#define EV4_CPU                 2       /* EV4 (21064)          */
+#define LCA4_CPU                4       /* LCA4 (21066/21068)   */
+#define EV5_CPU                 5       /* EV5 (21164)          */
+#define EV45_CPU                6       /* EV4.5 (21064/xxx)    */
+#define EV56_CPU		7	/* EV5.6 (21164)	*/
+#define EV6_CPU			8	/* EV6 (21264)		*/
+#define PCA56_CPU		9	/* PCA56 (21164PC)	*/
+#define PCA57_CPU		10	/* PCA57 (notyet)	*/
+#define EV67_CPU		11	/* EV67 (21264A)	*/
+#define EV68CB_CPU		12	/* EV68CB (21264C)	*/
+#define EV68AL_CPU		13	/* EV68AL (21264B)	*/
+#define EV68CX_CPU		14	/* EV68CX (21264D)	*/
+#define EV7_CPU			15	/* EV7 (21364)		*/
+#define EV79_CPU		16	/* EV79 (21364??)	*/
+#define EV69_CPU		17	/* EV69 (21264/EV69A)	*/
+
+/*
+ * DEC system types for Alpha systems.  Found in HWRPB.
+ * These values are architected.
+ */
+
+#define ST_ADU			  1	/* Alpha ADU systype	*/
+#define ST_DEC_4000		  2	/* Cobra systype	*/
+#define ST_DEC_7000		  3	/* Ruby systype		*/
+#define ST_DEC_3000_500		  4	/* Flamingo systype	*/
+#define ST_DEC_2000_300		  6	/* Jensen systype	*/
+#define ST_DEC_3000_300		  7	/* Pelican systype	*/
+#define ST_DEC_2100_A500	  9	/* Sable systype	*/
+#define ST_DEC_AXPVME_64	 10	/* AXPvme system type	*/
+#define ST_DEC_AXPPCI_33	 11	/* NoName system type	*/
+#define ST_DEC_TLASER		 12	/* Turbolaser systype	*/
+#define ST_DEC_2100_A50		 13	/* Avanti systype	*/
+#define ST_DEC_MUSTANG		 14	/* Mustang systype	*/
+#define ST_DEC_ALCOR		 15	/* Alcor (EV5) systype	*/
+#define ST_DEC_1000		 17	/* Mikasa systype	*/
+#define ST_DEC_EB64		 18	/* EB64 systype		*/
+#define ST_DEC_EB66		 19	/* EB66 systype		*/
+#define ST_DEC_EB64P		 20	/* EB64+ systype	*/
+#define ST_DEC_BURNS		 21	/* laptop systype	*/
+#define ST_DEC_RAWHIDE		 22	/* Rawhide systype	*/
+#define ST_DEC_K2		 23	/* K2 systype		*/
+#define ST_DEC_LYNX		 24	/* Lynx systype		*/
+#define ST_DEC_XL		 25	/* Alpha XL systype	*/
+#define ST_DEC_EB164		 26	/* EB164 systype	*/
+#define ST_DEC_NORITAKE		 27	/* Noritake systype	*/
+#define ST_DEC_CORTEX		 28	/* Cortex systype	*/
+#define ST_DEC_MIATA		 30	/* Miata systype        */
+#define ST_DEC_XXM		 31	/* XXM systype		*/
+#define ST_DEC_TAKARA		 32	/* Takara systype	*/
+#define ST_DEC_YUKON		 33	/* Yukon systype	*/
+#define ST_DEC_TSUNAMI		 34	/* Tsunami systype	*/
+#define ST_DEC_WILDFIRE		 35	/* Wildfire systype	*/
+#define ST_DEC_CUSCO		 36	/* CUSCO systype	*/
+#define ST_DEC_EIGER		 37	/* Eiger systype	*/
+#define ST_DEC_TITAN		 38	/* Titan systype	*/
+#define ST_DEC_MARVEL		 39	/* Marvel systype	*/
+
+/* UNOFFICIAL!!! */
+#define ST_UNOFFICIAL_BIAS	100
+#define ST_DTI_RUFFIAN		101	/* RUFFIAN systype	*/
+
+/* Alpha Processor, Inc. systems */
+#define ST_API_BIAS		200
+#define ST_API_NAUTILUS		201	/* UP1000 systype	*/
+
+struct pcb_struct {
+	unsigned long ksp;
+	unsigned long usp;
+	unsigned long ptbr;
+	unsigned int pcc;
+	unsigned int asn;
+	unsigned long unique;
+	unsigned long flags;
+	unsigned long res1, res2;
+};
+
+struct percpu_struct {
+	unsigned long hwpcb[16];
+	unsigned long flags;
+	unsigned long pal_mem_size;
+	unsigned long pal_scratch_size;
+	unsigned long pal_mem_pa;
+	unsigned long pal_scratch_pa;
+	unsigned long pal_revision;
+	unsigned long type;
+	unsigned long variation;
+	unsigned long revision;
+	unsigned long serial_no[2];
+	unsigned long logout_area_pa;
+	unsigned long logout_area_len;
+	unsigned long halt_PCBB;
+	unsigned long halt_PC;
+	unsigned long halt_PS;
+	unsigned long halt_arg;
+	unsigned long halt_ra;
+	unsigned long halt_pv;
+	unsigned long halt_reason;
+	unsigned long res;
+	unsigned long ipc_buffer[21];
+	unsigned long palcode_avail[16];
+	unsigned long compatibility;
+	unsigned long console_data_log_pa;
+	unsigned long console_data_log_length;
+	unsigned long bcache_info;
+};
+
+struct procdesc_struct {
+	unsigned long weird_vms_stuff;
+	unsigned long address;
+};
+
+struct vf_map_struct {
+	unsigned long va;
+	unsigned long pa;
+	unsigned long count;
+};
+
+struct crb_struct {
+	struct procdesc_struct * dispatch_va;
+	struct procdesc_struct * dispatch_pa;
+	struct procdesc_struct * fixup_va;
+	struct procdesc_struct * fixup_pa;
+	/* virtual->physical map */
+	unsigned long map_entries;
+	unsigned long map_pages;
+	struct vf_map_struct map[1];
+};
+
+struct memclust_struct {
+	unsigned long start_pfn;
+	unsigned long numpages;
+	unsigned long numtested;
+	unsigned long bitmap_va;
+	unsigned long bitmap_pa;
+	unsigned long bitmap_chksum;
+	unsigned long usage;
+};
+
+struct memdesc_struct {
+	unsigned long chksum;
+	unsigned long optional_pa;
+	unsigned long numclusters;
+	struct memclust_struct cluster[0];
+};
+
+struct dsr_struct {
+	long smm;			/* SMM nubber used by LMF       */
+	unsigned long  lurt_off;	/* offset to LURT table         */
+	unsigned long  sysname_off;	/* offset to sysname char count */
+};
+
+struct hwrpb_struct {
+	unsigned long phys_addr;	/* check: physical address of the hwrpb */
+	unsigned long id;		/* check: "HWRPB\0\0\0" */
+	unsigned long revision;	
+	unsigned long size;		/* size of hwrpb */
+	unsigned long cpuid;
+	unsigned long pagesize;		/* 8192, I hope */
+	unsigned long pa_bits;		/* number of physical address bits */
+	unsigned long max_asn;
+	unsigned char ssn[16];		/* system serial number: big bother is watching */
+	unsigned long sys_type;
+	unsigned long sys_variation;
+	unsigned long sys_revision;
+	unsigned long intr_freq;	/* interval clock frequency * 4096 */
+	unsigned long cycle_freq;	/* cycle counter frequency */
+	unsigned long vptb;		/* Virtual Page Table Base address */
+	unsigned long res1;
+	unsigned long tbhb_offset;	/* Translation Buffer Hint Block */
+	unsigned long nr_processors;
+	unsigned long processor_size;
+	unsigned long processor_offset;
+	unsigned long ctb_nr;
+	unsigned long ctb_size;		/* console terminal block size */
+	unsigned long ctbt_offset;	/* console terminal block table offset */
+	unsigned long crb_offset;	/* console callback routine block */
+	unsigned long mddt_offset;	/* memory data descriptor table */
+	unsigned long cdb_offset;	/* configuration data block (or NULL) */
+	unsigned long frut_offset;	/* FRU table (or NULL) */
+	void (*save_terminal)(unsigned long);
+	unsigned long save_terminal_data;
+	void (*restore_terminal)(unsigned long);
+	unsigned long restore_terminal_data;
+	void (*CPU_restart)(unsigned long);
+	unsigned long CPU_restart_data;
+	unsigned long res2;
+	unsigned long res3;
+	unsigned long chksum;
+	unsigned long rxrdy;
+	unsigned long txrdy;
+	unsigned long dsr_offset;	/* "Dynamic System Recognition Data Block Table" */
+};
+
+#ifdef __KERNEL__
+
+extern struct hwrpb_struct *hwrpb;
+
+static inline void
+hwrpb_update_checksum(struct hwrpb_struct *h)
+{
+	unsigned long sum = 0, *l;
+        for (l = (unsigned long *) h; l < (unsigned long *) &h->chksum; ++l)
+                sum += *l;
+        h->chksum = sum;
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_HWRPB_H */
diff --git a/include/asm-alpha/ide.h b/include/asm-alpha/ide.h
new file mode 100644
index 0000000..68934a2
--- /dev/null
+++ b/include/asm-alpha/ide.h
@@ -0,0 +1,61 @@
+/*
+ *  linux/include/asm-alpha/ide.h
+ *
+ *  Copyright (C) 1994-1996  Linus Torvalds & authors
+ */
+
+/*
+ *  This file contains the alpha architecture specific IDE code.
+ */
+
+#ifndef __ASMalpha_IDE_H
+#define __ASMalpha_IDE_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+
+#ifndef MAX_HWIFS
+#define MAX_HWIFS	CONFIG_IDE_MAX_HWIFS
+#endif
+
+#define IDE_ARCH_OBSOLETE_DEFAULTS
+
+static inline int ide_default_irq(unsigned long base)
+{
+	switch (base) {
+		case 0x1f0: return 14;
+		case 0x170: return 15;
+		case 0x1e8: return 11;
+		case 0x168: return 10;
+		default:
+			return 0;
+	}
+}
+
+static inline unsigned long ide_default_io_base(int index)
+{
+	switch (index) {
+		case 0:	return 0x1f0;
+		case 1:	return 0x170;
+		case 2: return 0x1e8;
+		case 3: return 0x168;
+		default:
+			return 0;
+	}
+}
+
+#define IDE_ARCH_OBSOLETE_INIT
+#define ide_default_io_ctl(base)	((base) + 0x206) /* obsolete */
+
+#ifdef CONFIG_PCI
+#define ide_init_default_irq(base)	(0)
+#else
+#define ide_init_default_irq(base)	ide_default_irq(base)
+#endif
+
+#include <asm-generic/ide_iops.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASMalpha_IDE_H */
diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h
new file mode 100644
index 0000000..871dd7a
--- /dev/null
+++ b/include/asm-alpha/io.h
@@ -0,0 +1,682 @@
+#ifndef __ALPHA_IO_H
+#define __ALPHA_IO_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <asm/compiler.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/machvec.h>
+#include <asm/hwrpb.h>
+
+/* The generic header contains only prototypes.  Including it ensures that
+   the implementation we have here matches that interface.  */
+#include <asm-generic/iomap.h>
+
+/* We don't use IO slowdowns on the Alpha, but.. */
+#define __SLOW_DOWN_IO	do { } while (0)
+#define SLOW_DOWN_IO	do { } while (0)
+
+/*
+ * Virtual -> physical identity mapping starts at this offset
+ */
+#ifdef USE_48_BIT_KSEG
+#define IDENT_ADDR     0xffff800000000000UL
+#else
+#define IDENT_ADDR     0xfffffc0000000000UL
+#endif
+
+/*
+ * We try to avoid hae updates (thus the cache), but when we
+ * do need to update the hae, we need to do it atomically, so
+ * that any interrupts wouldn't get confused with the hae
+ * register not being up-to-date with respect to the hardware
+ * value.
+ */
+static inline void __set_hae(unsigned long new_hae)
+{
+	unsigned long flags;
+	local_irq_save(flags);
+
+	alpha_mv.hae_cache = new_hae;
+	*alpha_mv.hae_register = new_hae;
+	mb();
+	/* Re-read to make sure it was written.  */
+	new_hae = *alpha_mv.hae_register;
+
+	local_irq_restore(flags);
+}
+
+static inline void set_hae(unsigned long new_hae)
+{
+	if (new_hae != alpha_mv.hae_cache)
+		__set_hae(new_hae);
+}
+
+/*
+ * Change virtual addresses to physical addresses and vv.
+ */
+#ifdef USE_48_BIT_KSEG
+static inline unsigned long virt_to_phys(void *address)
+{
+	return (unsigned long)address - IDENT_ADDR;
+}
+
+static inline void * phys_to_virt(unsigned long address)
+{
+	return (void *) (address + IDENT_ADDR);
+}
+#else
+static inline unsigned long virt_to_phys(void *address)
+{
+        unsigned long phys = (unsigned long)address;
+
+	/* Sign-extend from bit 41.  */
+	phys <<= (64 - 41);
+	phys = (long)phys >> (64 - 41);
+
+	/* Crop to the physical address width of the processor.  */
+        phys &= (1ul << hwrpb->pa_bits) - 1;
+
+        return phys;
+}
+
+static inline void * phys_to_virt(unsigned long address)
+{
+        return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
+}
+#endif
+
+#define page_to_phys(page)	page_to_pa(page)
+
+/* This depends on working iommu.  */
+#define BIO_VMERGE_BOUNDARY	(alpha_mv.mv_pci_tbi ? PAGE_SIZE : 0)
+
+/* Maximum PIO space address supported?  */
+#define IO_SPACE_LIMIT 0xffff
+
+/*
+ * Change addresses as seen by the kernel (virtual) to addresses as
+ * seen by a device (bus), and vice versa.
+ *
+ * Note that this only works for a limited range of kernel addresses,
+ * and very well may not span all memory.  Consider this interface 
+ * deprecated in favour of the mapping functions in <asm/pci.h>.
+ */
+extern unsigned long __direct_map_base;
+extern unsigned long __direct_map_size;
+
+static inline unsigned long virt_to_bus(void *address)
+{
+	unsigned long phys = virt_to_phys(address);
+	unsigned long bus = phys + __direct_map_base;
+	return phys <= __direct_map_size ? bus : 0;
+}
+
+static inline void *bus_to_virt(unsigned long address)
+{
+	void *virt;
+
+	/* This check is a sanity check but also ensures that bus address 0
+	   maps to virtual address 0 which is useful to detect null pointers
+	   (the NCR driver is much simpler if NULL pointers are preserved).  */
+	address -= __direct_map_base;
+	virt = phys_to_virt(address);
+	return (long)address <= 0 ? NULL : virt;
+}
+
+/*
+ * There are different chipsets to interface the Alpha CPUs to the world.
+ */
+
+#define IO_CONCAT(a,b)	_IO_CONCAT(a,b)
+#define _IO_CONCAT(a,b)	a ## _ ## b
+
+#ifdef CONFIG_ALPHA_GENERIC
+
+/* In a generic kernel, we always go through the machine vector.  */
+
+#define REMAP1(TYPE, NAME, QUAL)					\
+static inline TYPE generic_##NAME(QUAL void __iomem *addr)		\
+{									\
+	return alpha_mv.mv_##NAME(addr);				\
+}
+
+#define REMAP2(TYPE, NAME, QUAL)					\
+static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr)	\
+{									\
+	alpha_mv.mv_##NAME(b, addr);					\
+}
+
+REMAP1(unsigned int, ioread8, /**/)
+REMAP1(unsigned int, ioread16, /**/)
+REMAP1(unsigned int, ioread32, /**/)
+REMAP1(u8, readb, const volatile)
+REMAP1(u16, readw, const volatile)
+REMAP1(u32, readl, const volatile)
+REMAP1(u64, readq, const volatile)
+
+REMAP2(u8, iowrite8, /**/)
+REMAP2(u16, iowrite16, /**/)
+REMAP2(u32, iowrite32, /**/)
+REMAP2(u8, writeb, volatile)
+REMAP2(u16, writew, volatile)
+REMAP2(u32, writel, volatile)
+REMAP2(u64, writeq, volatile)
+
+#undef REMAP1
+#undef REMAP2
+
+static inline void __iomem *generic_ioportmap(unsigned long a)
+{
+	return alpha_mv.mv_ioportmap(a);
+}
+
+static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s)
+{
+	return alpha_mv.mv_ioremap(a, s);
+}
+
+static inline void generic_iounmap(volatile void __iomem *a)
+{
+	return alpha_mv.mv_iounmap(a);
+}
+
+static inline int generic_is_ioaddr(unsigned long a)
+{
+	return alpha_mv.mv_is_ioaddr(a);
+}
+
+static inline int generic_is_mmio(const volatile void __iomem *a)
+{
+	return alpha_mv.mv_is_mmio(a);
+}
+
+#define __IO_PREFIX		generic
+#define generic_trivial_rw_bw	0
+#define generic_trivial_rw_lq	0
+#define generic_trivial_io_bw	0
+#define generic_trivial_io_lq	0
+#define generic_trivial_iounmap	0
+
+#else
+
+#if defined(CONFIG_ALPHA_APECS)
+# include <asm/core_apecs.h>
+#elif defined(CONFIG_ALPHA_CIA)
+# include <asm/core_cia.h>
+#elif defined(CONFIG_ALPHA_IRONGATE)
+# include <asm/core_irongate.h>
+#elif defined(CONFIG_ALPHA_JENSEN)
+# include <asm/jensen.h>
+#elif defined(CONFIG_ALPHA_LCA)
+# include <asm/core_lca.h>
+#elif defined(CONFIG_ALPHA_MARVEL)
+# include <asm/core_marvel.h>
+#elif defined(CONFIG_ALPHA_MCPCIA)
+# include <asm/core_mcpcia.h>
+#elif defined(CONFIG_ALPHA_POLARIS)
+# include <asm/core_polaris.h>
+#elif defined(CONFIG_ALPHA_T2)
+# include <asm/core_t2.h>
+#elif defined(CONFIG_ALPHA_TSUNAMI)
+# include <asm/core_tsunami.h>
+#elif defined(CONFIG_ALPHA_TITAN)
+# include <asm/core_titan.h>
+#elif defined(CONFIG_ALPHA_WILDFIRE)
+# include <asm/core_wildfire.h>
+#else
+#error "What system is this?"
+#endif
+
+#endif /* GENERIC */
+
+/*
+ * We always have external versions of these routines.
+ */
+extern u8		inb(unsigned long port);
+extern u16		inw(unsigned long port);
+extern u32		inl(unsigned long port);
+extern void		outb(u8 b, unsigned long port);
+extern void		outw(u16 b, unsigned long port);
+extern void		outl(u32 b, unsigned long port);
+
+extern u8		readb(const volatile void __iomem *addr);
+extern u16		readw(const volatile void __iomem *addr);
+extern u32		readl(const volatile void __iomem *addr);
+extern u64		readq(const volatile void __iomem *addr);
+extern void		writeb(u8 b, volatile void __iomem *addr);
+extern void		writew(u16 b, volatile void __iomem *addr);
+extern void		writel(u32 b, volatile void __iomem *addr);
+extern void		writeq(u64 b, volatile void __iomem *addr);
+
+extern u8		__raw_readb(const volatile void __iomem *addr);
+extern u16		__raw_readw(const volatile void __iomem *addr);
+extern u32		__raw_readl(const volatile void __iomem *addr);
+extern u64		__raw_readq(const volatile void __iomem *addr);
+extern void		__raw_writeb(u8 b, volatile void __iomem *addr);
+extern void		__raw_writew(u16 b, volatile void __iomem *addr);
+extern void		__raw_writel(u32 b, volatile void __iomem *addr);
+extern void		__raw_writeq(u64 b, volatile void __iomem *addr);
+
+/*
+ * Mapping from port numbers to __iomem space is pretty easy.
+ */
+
+/* These two have to be extern inline because of the extern prototype from
+   <asm-generic/iomap.h>.  It is not legal to mix "extern" and "static" for
+   the same declaration.  */
+extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
+{
+	return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
+}
+
+extern inline void ioport_unmap(void __iomem *addr)
+{
+}
+
+static inline void __iomem *ioremap(unsigned long port, unsigned long size)
+{
+	return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
+}
+
+static inline void __iomem *__ioremap(unsigned long port, unsigned long size,
+				      unsigned long flags)
+{
+	return ioremap(port, size);
+}
+
+static inline void __iomem * ioremap_nocache(unsigned long offset,
+					     unsigned long size)
+{
+	return ioremap(offset, size);
+} 
+
+static inline void iounmap(volatile void __iomem *addr)
+{
+	IO_CONCAT(__IO_PREFIX,iounmap)(addr);
+}
+
+static inline int __is_ioaddr(unsigned long addr)
+{
+	return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr);
+}
+#define __is_ioaddr(a)		__is_ioaddr((unsigned long)(a))
+
+static inline int __is_mmio(const volatile void __iomem *addr)
+{
+	return IO_CONCAT(__IO_PREFIX,is_mmio)(addr);
+}
+
+
+/*
+ * If the actual I/O bits are sufficiently trivial, then expand inline.
+ */
+
+#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
+extern inline unsigned int ioread8(void __iomem *addr)
+{
+	unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
+	mb();
+	return ret;
+}
+
+extern inline unsigned int ioread16(void __iomem *addr)
+{
+	unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
+	mb();
+	return ret;
+}
+
+extern inline void iowrite8(u8 b, void __iomem *addr)
+{
+	IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr);
+	mb();
+}
+
+extern inline void iowrite16(u16 b, void __iomem *addr)
+{
+	IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr);
+	mb();
+}
+
+extern inline u8 inb(unsigned long port)
+{
+	return ioread8(ioport_map(port, 1));
+}
+
+extern inline u16 inw(unsigned long port)
+{
+	return ioread16(ioport_map(port, 2));
+}
+
+extern inline void outb(u8 b, unsigned long port)
+{
+	iowrite8(b, ioport_map(port, 1));
+}
+
+extern inline void outw(u16 b, unsigned long port)
+{
+	iowrite16(b, ioport_map(port, 2));
+}
+#endif
+
+#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
+extern inline unsigned int ioread32(void __iomem *addr)
+{
+	unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
+	mb();
+	return ret;
+}
+
+extern inline void iowrite32(u32 b, void __iomem *addr)
+{
+	IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr);
+	mb();
+}
+
+extern inline u32 inl(unsigned long port)
+{
+	return ioread32(ioport_map(port, 4));
+}
+
+extern inline void outl(u32 b, unsigned long port)
+{
+	iowrite32(b, ioport_map(port, 4));
+}
+#endif
+
+#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
+extern inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+	return IO_CONCAT(__IO_PREFIX,readb)(addr);
+}
+
+extern inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+	return IO_CONCAT(__IO_PREFIX,readw)(addr);
+}
+
+extern inline void __raw_writeb(u8 b, volatile void __iomem *addr)
+{
+	IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
+}
+
+extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
+{
+	IO_CONCAT(__IO_PREFIX,writew)(b, addr);
+}
+
+extern inline u8 readb(const volatile void __iomem *addr)
+{
+	u8 ret = __raw_readb(addr);
+	mb();
+	return ret;
+}
+
+extern inline u16 readw(const volatile void __iomem *addr)
+{
+	u16 ret = __raw_readw(addr);
+	mb();
+	return ret;
+}
+
+extern inline void writeb(u8 b, volatile void __iomem *addr)
+{
+	__raw_writeb(b, addr);
+	mb();
+}
+
+extern inline void writew(u16 b, volatile void __iomem *addr)
+{
+	__raw_writew(b, addr);
+	mb();
+}
+#endif
+
+#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
+extern inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+	return IO_CONCAT(__IO_PREFIX,readl)(addr);
+}
+
+extern inline u64 __raw_readq(const volatile void __iomem *addr)
+{
+	return IO_CONCAT(__IO_PREFIX,readq)(addr);
+}
+
+extern inline void __raw_writel(u32 b, volatile void __iomem *addr)
+{
+	IO_CONCAT(__IO_PREFIX,writel)(b, addr);
+}
+
+extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
+{
+	IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
+}
+
+extern inline u32 readl(const volatile void __iomem *addr)
+{
+	u32 ret = __raw_readl(addr);
+	mb();
+	return ret;
+}
+
+extern inline u64 readq(const volatile void __iomem *addr)
+{
+	u64 ret = __raw_readq(addr);
+	mb();
+	return ret;
+}
+
+extern inline void writel(u32 b, volatile void __iomem *addr)
+{
+	__raw_writel(b, addr);
+	mb();
+}
+
+extern inline void writeq(u64 b, volatile void __iomem *addr)
+{
+	__raw_writeq(b, addr);
+	mb();
+}
+#endif
+
+#define inb_p		inb
+#define inw_p		inw
+#define inl_p		inl
+#define outb_p		outb
+#define outw_p		outw
+#define outl_p		outl
+#define readb_relaxed(addr) __raw_readb(addr)
+#define readw_relaxed(addr) __raw_readw(addr)
+#define readl_relaxed(addr) __raw_readl(addr)
+#define readq_relaxed(addr) __raw_readq(addr)
+
+#define mmiowb()
+
+/*
+ * String version of IO memory access ops:
+ */
+extern void memcpy_fromio(void *, const volatile void __iomem *, long);
+extern void memcpy_toio(volatile void __iomem *, const void *, long);
+extern void _memset_c_io(volatile void __iomem *, unsigned long, long);
+
+static inline void memset_io(volatile void __iomem *addr, u8 c, long len)
+{
+	_memset_c_io(addr, 0x0101010101010101UL * c, len);
+}
+
+#define __HAVE_ARCH_MEMSETW_IO
+static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len)
+{
+	_memset_c_io(addr, 0x0001000100010001UL * c, len);
+}
+
+/*
+ * String versions of in/out ops:
+ */
+extern void insb (unsigned long port, void *dst, unsigned long count);
+extern void insw (unsigned long port, void *dst, unsigned long count);
+extern void insl (unsigned long port, void *dst, unsigned long count);
+extern void outsb (unsigned long port, const void *src, unsigned long count);
+extern void outsw (unsigned long port, const void *src, unsigned long count);
+extern void outsl (unsigned long port, const void *src, unsigned long count);
+
+/*
+ * XXX - We don't have csum_partial_copy_fromio() yet, so we cheat here and 
+ * just copy it. The net code will then do the checksum later. Presently 
+ * only used by some shared memory 8390 Ethernet cards anyway.
+ */
+
+#define eth_io_copy_and_sum(skb,src,len,unused) \
+  memcpy_fromio((skb)->data,src,len)
+
+#define isa_eth_io_copy_and_sum(skb,src,len,unused) \
+  isa_memcpy_fromio((skb)->data,src,len)
+
+static inline int
+check_signature(const volatile void __iomem *io_addr,
+		const unsigned char *signature, int length)
+{
+	do {
+		if (readb(io_addr) != *signature)
+			return 0;
+		io_addr++;
+		signature++;
+	} while (--length);
+	return 1;
+}
+
+
+/*
+ * ISA space is mapped to some machine-specific location on Alpha.
+ * Call into the existing hooks to get the address translated.
+ */
+
+static inline u8
+isa_readb(unsigned long offset)
+{
+	void __iomem *addr = ioremap(offset, 1);
+	u8 ret = readb(addr);
+	iounmap(addr);
+	return ret;
+}
+
+static inline u16
+isa_readw(unsigned long offset)
+{
+	void __iomem *addr = ioremap(offset, 2);
+	u16 ret = readw(addr);
+	iounmap(addr);
+	return ret;
+}
+
+static inline u32
+isa_readl(unsigned long offset)
+{
+	void __iomem *addr = ioremap(offset, 2);
+	u32 ret = readl(addr);
+	iounmap(addr);
+	return ret;
+}
+
+static inline void
+isa_writeb(u8 b, unsigned long offset)
+{
+	void __iomem *addr = ioremap(offset, 2);
+	writeb(b, addr);
+	iounmap(addr);
+}
+
+static inline void
+isa_writew(u16 w, unsigned long offset)
+{
+	void __iomem *addr = ioremap(offset, 2);
+	writew(w, addr);
+	iounmap(addr);
+}
+
+static inline void
+isa_writel(u32 l, unsigned long offset)
+{
+	void __iomem *addr = ioremap(offset, 2);
+	writel(l, addr);
+	iounmap(addr);
+}
+
+static inline void
+isa_memset_io(unsigned long offset, u8 val, long n)
+{
+	void __iomem *addr = ioremap(offset, n);
+	memset_io(addr, val, n);
+	iounmap(addr);
+}
+
+static inline void
+isa_memcpy_fromio(void *dest, unsigned long offset, long n)
+{
+	void __iomem *addr = ioremap(offset, n);
+	memcpy_fromio(dest, addr, n);
+	iounmap(addr);
+}
+
+static inline void
+isa_memcpy_toio(unsigned long offset, const void *src, long n)
+{
+	void __iomem *addr = ioremap(offset, n);
+	memcpy_toio(addr, src, n);
+	iounmap(addr);
+}
+
+/*
+ * The Alpha Jensen hardware for some rather strange reason puts
+ * the RTC clock at 0x170 instead of 0x70. Probably due to some
+ * misguided idea about using 0x70 for NMI stuff.
+ *
+ * These defines will override the defaults when doing RTC queries
+ */
+
+#ifdef CONFIG_ALPHA_GENERIC
+# define RTC_PORT(x)	((x) + alpha_mv.rtc_port)
+#else
+# ifdef CONFIG_ALPHA_JENSEN
+#  define RTC_PORT(x)	(0x170+(x))
+# else
+#  define RTC_PORT(x)	(0x70 + (x))
+# endif
+#endif
+#define RTC_ALWAYS_BCD	0
+
+/* Nothing to do */
+
+#define dma_cache_inv(_start,_size)		do { } while (0)
+#define dma_cache_wback(_start,_size)		do { } while (0)
+#define dma_cache_wback_inv(_start,_size)	do { } while (0)
+
+/*
+ * Some mucking forons use if[n]def writeq to check if platform has it.
+ * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them
+ * to play with; for now just use cpp anti-recursion logics and make sure
+ * that damn thing is defined and expands to itself.
+ */
+
+#define writeq writeq
+#define readq readq
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p)	__va(p)
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p)	p
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_IO_H */
diff --git a/include/asm-alpha/io_trivial.h b/include/asm-alpha/io_trivial.h
new file mode 100644
index 0000000..b10d1aa
--- /dev/null
+++ b/include/asm-alpha/io_trivial.h
@@ -0,0 +1,127 @@
+/* Trivial implementations of basic i/o routines.  Assumes that all
+   of the hard work has been done by ioremap and ioportmap, and that
+   access to i/o space is linear.  */
+
+/* This file may be included multiple times.  */
+
+#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
+__EXTERN_INLINE unsigned int
+IO_CONCAT(__IO_PREFIX,ioread8)(void __iomem *a)
+{
+	return __kernel_ldbu(*(volatile u8 __force *)a);
+}
+
+__EXTERN_INLINE unsigned int
+IO_CONCAT(__IO_PREFIX,ioread16)(void __iomem *a)
+{
+	return __kernel_ldwu(*(volatile u16 __force *)a);
+}
+
+__EXTERN_INLINE void
+IO_CONCAT(__IO_PREFIX,iowrite8)(u8 b, void __iomem *a)
+{
+	__kernel_stb(b, *(volatile u8 __force *)a);
+}
+
+__EXTERN_INLINE void
+IO_CONCAT(__IO_PREFIX,iowrite16)(u16 b, void __iomem *a)
+{
+	__kernel_stw(b, *(volatile u16 __force *)a);
+}
+#endif
+
+#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
+__EXTERN_INLINE unsigned int
+IO_CONCAT(__IO_PREFIX,ioread32)(void __iomem *a)
+{
+	return *(volatile u32 __force *)a;
+}
+
+__EXTERN_INLINE void
+IO_CONCAT(__IO_PREFIX,iowrite32)(u32 b, void __iomem *a)
+{
+	*(volatile u32 __force *)a = b;
+}
+#endif
+
+#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
+__EXTERN_INLINE u8
+IO_CONCAT(__IO_PREFIX,readb)(const volatile void __iomem *a)
+{
+	return __kernel_ldbu(*(const volatile u8 __force *)a);
+}
+
+__EXTERN_INLINE u16
+IO_CONCAT(__IO_PREFIX,readw)(const volatile void __iomem *a)
+{
+	return __kernel_ldwu(*(const volatile u16 __force *)a);
+}
+
+__EXTERN_INLINE void
+IO_CONCAT(__IO_PREFIX,writeb)(u8 b, volatile void __iomem *a)
+{
+	__kernel_stb(b, *(volatile u8 __force *)a);
+}
+
+__EXTERN_INLINE void
+IO_CONCAT(__IO_PREFIX,writew)(u16 b, volatile void __iomem *a)
+{
+	__kernel_stw(b, *(volatile u16 __force *)a);
+}
+#elif IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 2
+__EXTERN_INLINE u8
+IO_CONCAT(__IO_PREFIX,readb)(const volatile void __iomem *a)
+{
+	return IO_CONCAT(__IO_PREFIX,ioread8)((void __iomem *)a);
+}
+
+__EXTERN_INLINE u16
+IO_CONCAT(__IO_PREFIX,readw)(const volatile void __iomem *a)
+{
+	return IO_CONCAT(__IO_PREFIX,ioread16)((void __iomem *)a);
+}
+
+__EXTERN_INLINE void
+IO_CONCAT(__IO_PREFIX,writeb)(u8 b, volatile void __iomem *a)
+{
+	IO_CONCAT(__IO_PREFIX,iowrite8)(b, (void __iomem *)a);
+}
+
+__EXTERN_INLINE void
+IO_CONCAT(__IO_PREFIX,writew)(u16 b, volatile void __iomem *a)
+{
+	IO_CONCAT(__IO_PREFIX,iowrite16)(b, (void __iomem *)a);
+}
+#endif
+
+#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
+__EXTERN_INLINE u32
+IO_CONCAT(__IO_PREFIX,readl)(const volatile void __iomem *a)
+{
+	return *(const volatile u32 __force *)a;
+}
+
+__EXTERN_INLINE u64
+IO_CONCAT(__IO_PREFIX,readq)(const volatile void __iomem *a)
+{
+	return *(const volatile u64 __force *)a;
+}
+
+__EXTERN_INLINE void
+IO_CONCAT(__IO_PREFIX,writel)(u32 b, volatile void __iomem *a)
+{
+	*(volatile u32 __force *)a = b;
+}
+
+__EXTERN_INLINE void
+IO_CONCAT(__IO_PREFIX,writeq)(u64 b, volatile void __iomem *a)
+{
+	*(volatile u64 __force *)a = b;
+}
+#endif
+
+#if IO_CONCAT(__IO_PREFIX,trivial_iounmap)
+__EXTERN_INLINE void IO_CONCAT(__IO_PREFIX,iounmap)(volatile void __iomem *a)
+{
+}
+#endif
diff --git a/include/asm-alpha/ioctl.h b/include/asm-alpha/ioctl.h
new file mode 100644
index 0000000..fc63727
--- /dev/null
+++ b/include/asm-alpha/ioctl.h
@@ -0,0 +1,66 @@
+#ifndef _ALPHA_IOCTL_H
+#define _ALPHA_IOCTL_H
+
+/*
+ * The original linux ioctl numbering scheme was just a general
+ * "anything goes" setup, where more or less random numbers were
+ * assigned.  Sorry, I was clueless when I started out on this.
+ *
+ * On the alpha, we'll try to clean it up a bit, using a more sane
+ * ioctl numbering, and also trying to be compatible with OSF/1 in
+ * the process. I'd like to clean it up for the i386 as well, but
+ * it's so painful recognizing both the new and the old numbers..
+ */
+
+#define _IOC_NRBITS	8
+#define _IOC_TYPEBITS	8
+#define _IOC_SIZEBITS	13
+#define _IOC_DIRBITS	3
+
+#define _IOC_NRMASK	((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK	((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK	((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK	((1 << _IOC_DIRBITS)-1)
+
+#define _IOC_NRSHIFT	0
+#define _IOC_TYPESHIFT	(_IOC_NRSHIFT+_IOC_NRBITS)
+#define _IOC_SIZESHIFT	(_IOC_TYPESHIFT+_IOC_TYPEBITS)
+#define _IOC_DIRSHIFT	(_IOC_SIZESHIFT+_IOC_SIZEBITS)
+
+/*
+ * Direction bits _IOC_NONE could be 0, but OSF/1 gives it a bit.
+ * And this turns out useful to catch old ioctl numbers in header
+ * files for us.
+ */
+#define _IOC_NONE	1U
+#define _IOC_READ	2U
+#define _IOC_WRITE	4U
+
+#define _IOC(dir,type,nr,size)			\
+	((unsigned int)				\
+	 (((dir)  << _IOC_DIRSHIFT) |		\
+	  ((type) << _IOC_TYPESHIFT) |		\
+	  ((nr)   << _IOC_NRSHIFT) |		\
+	  ((size) << _IOC_SIZESHIFT)))
+
+/* used to create numbers */
+#define _IO(type,nr)		_IOC(_IOC_NONE,(type),(nr),0)
+#define _IOR(type,nr,size)	_IOC(_IOC_READ,(type),(nr),sizeof(size))
+#define _IOW(type,nr,size)	_IOC(_IOC_WRITE,(type),(nr),sizeof(size))
+#define _IOWR(type,nr,size)	_IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
+
+/* used to decode them.. */
+#define _IOC_DIR(nr)		(((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
+#define _IOC_TYPE(nr)		(((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
+#define _IOC_NR(nr)		(((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+#define _IOC_SIZE(nr)		(((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+/* ...and for the drivers/sound files... */
+
+#define IOC_IN		(_IOC_WRITE << _IOC_DIRSHIFT)
+#define IOC_OUT		(_IOC_READ << _IOC_DIRSHIFT)
+#define IOC_INOUT	((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
+#define IOCSIZE_MASK	(_IOC_SIZEMASK << _IOC_SIZESHIFT)
+#define IOCSIZE_SHIFT	(_IOC_SIZESHIFT)
+
+#endif /* _ALPHA_IOCTL_H */
diff --git a/include/asm-alpha/ioctls.h b/include/asm-alpha/ioctls.h
new file mode 100644
index 0000000..67bb9f6
--- /dev/null
+++ b/include/asm-alpha/ioctls.h
@@ -0,0 +1,112 @@
+#ifndef _ASM_ALPHA_IOCTLS_H
+#define _ASM_ALPHA_IOCTLS_H
+
+#include <asm/ioctl.h>
+
+#define FIOCLEX		_IO('f', 1)
+#define FIONCLEX	_IO('f', 2)
+#define FIOASYNC	_IOW('f', 125, int)
+#define FIONBIO		_IOW('f', 126, int)
+#define FIONREAD	_IOR('f', 127, int)
+#define TIOCINQ		FIONREAD
+#define FIOQSIZE	_IOR('f', 128, loff_t)
+
+#define TIOCGETP	_IOR('t', 8, struct sgttyb)
+#define TIOCSETP	_IOW('t', 9, struct sgttyb)
+#define TIOCSETN	_IOW('t', 10, struct sgttyb)	/* TIOCSETP wo flush */
+
+#define TIOCSETC	_IOW('t', 17, struct tchars)
+#define TIOCGETC	_IOR('t', 18, struct tchars)
+#define TCGETS		_IOR('t', 19, struct termios)
+#define TCSETS		_IOW('t', 20, struct termios)
+#define TCSETSW		_IOW('t', 21, struct termios)
+#define TCSETSF		_IOW('t', 22, struct termios)
+
+#define TCGETA		_IOR('t', 23, struct termio)
+#define TCSETA		_IOW('t', 24, struct termio)
+#define TCSETAW		_IOW('t', 25, struct termio)
+#define TCSETAF		_IOW('t', 28, struct termio)
+
+#define TCSBRK		_IO('t', 29)
+#define TCXONC		_IO('t', 30)
+#define TCFLSH		_IO('t', 31)
+
+#define TIOCSWINSZ	_IOW('t', 103, struct winsize)
+#define TIOCGWINSZ	_IOR('t', 104, struct winsize)
+#define	TIOCSTART	_IO('t', 110)		/* start output, like ^Q */
+#define	TIOCSTOP	_IO('t', 111)		/* stop output, like ^S */
+#define TIOCOUTQ        _IOR('t', 115, int)     /* output queue size */
+
+#define TIOCGLTC	_IOR('t', 116, struct ltchars)
+#define TIOCSLTC	_IOW('t', 117, struct ltchars)
+#define TIOCSPGRP	_IOW('t', 118, int)
+#define TIOCGPGRP	_IOR('t', 119, int)
+
+#define TIOCEXCL	0x540C
+#define TIOCNXCL	0x540D
+#define TIOCSCTTY	0x540E
+
+#define TIOCSTI		0x5412
+#define TIOCMGET	0x5415
+#define TIOCMBIS	0x5416
+#define TIOCMBIC	0x5417
+#define TIOCMSET	0x5418
+# define TIOCM_LE	0x001
+# define TIOCM_DTR	0x002
+# define TIOCM_RTS	0x004
+# define TIOCM_ST	0x008
+# define TIOCM_SR	0x010
+# define TIOCM_CTS	0x020
+# define TIOCM_CAR	0x040
+# define TIOCM_RNG	0x080
+# define TIOCM_DSR	0x100
+# define TIOCM_CD	TIOCM_CAR
+# define TIOCM_RI	TIOCM_RNG
+# define TIOCM_OUT1	0x2000
+# define TIOCM_OUT2	0x4000
+# define TIOCM_LOOP	0x8000
+
+#define TIOCGSOFTCAR	0x5419
+#define TIOCSSOFTCAR	0x541A
+#define TIOCLINUX	0x541C
+#define TIOCCONS	0x541D
+#define TIOCGSERIAL	0x541E
+#define TIOCSSERIAL	0x541F
+#define TIOCPKT		0x5420
+# define TIOCPKT_DATA		 0
+# define TIOCPKT_FLUSHREAD	 1
+# define TIOCPKT_FLUSHWRITE	 2
+# define TIOCPKT_STOP		 4
+# define TIOCPKT_START		 8
+# define TIOCPKT_NOSTOP		16
+# define TIOCPKT_DOSTOP		32
+
+
+#define TIOCNOTTY	0x5422
+#define TIOCSETD	0x5423
+#define TIOCGETD	0x5424
+#define TCSBRKP		0x5425	/* Needed for POSIX tcsendbreak() */
+#define TIOCSBRK	0x5427  /* BSD compatibility */
+#define TIOCCBRK	0x5428  /* BSD compatibility */
+#define TIOCGSID	0x5429  /* Return the session ID of FD */
+#define TIOCGPTN	_IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK	_IOW('T',0x31, int)  /* Lock/unlock Pty */
+
+#define TIOCSERCONFIG	0x5453
+#define TIOCSERGWILD	0x5454
+#define TIOCSERSWILD	0x5455
+#define TIOCGLCKTRMIOS	0x5456
+#define TIOCSLCKTRMIOS	0x5457
+#define TIOCSERGSTRUCT	0x5458 /* For debugging only */
+#define TIOCSERGETLSR   0x5459 /* Get line status register */
+  /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+# define TIOCSER_TEMT    0x01	/* Transmitter physically empty */
+#define TIOCSERGETMULTI 0x545A /* Get multiport config  */
+#define TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TIOCMIWAIT	0x545C	/* wait for a change on serial input line(s) */
+#define TIOCGICOUNT	0x545D	/* read serial port inline interrupt counts */
+#define TIOCGHAYESESP	0x545E  /* Get Hayes ESP configuration */
+#define TIOCSHAYESESP	0x545F  /* Set Hayes ESP configuration */
+
+#endif /* _ASM_ALPHA_IOCTLS_H */
diff --git a/include/asm-alpha/ipcbuf.h b/include/asm-alpha/ipcbuf.h
new file mode 100644
index 0000000..d9c0e1a
--- /dev/null
+++ b/include/asm-alpha/ipcbuf.h
@@ -0,0 +1,28 @@
+#ifndef _ALPHA_IPCBUF_H
+#define _ALPHA_IPCBUF_H
+
+/* 
+ * The ipc64_perm structure for alpha architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 32-bit seq
+ * - 2 miscellaneous 64-bit values
+ */
+
+struct ipc64_perm
+{
+	__kernel_key_t	key;
+	__kernel_uid_t	uid;
+	__kernel_gid_t	gid;
+	__kernel_uid_t	cuid;
+	__kernel_gid_t	cgid;
+	__kernel_mode_t	mode; 
+	unsigned short	seq;
+	unsigned short	__pad1;
+	unsigned long	__unused1;
+	unsigned long	__unused2;
+};
+
+#endif /* _ALPHA_IPCBUF_H */
diff --git a/include/asm-alpha/irq.h b/include/asm-alpha/irq.h
new file mode 100644
index 0000000..566db72
--- /dev/null
+++ b/include/asm-alpha/irq.h
@@ -0,0 +1,100 @@
+#ifndef _ALPHA_IRQ_H
+#define _ALPHA_IRQ_H
+
+/*
+ *	linux/include/alpha/irq.h
+ *
+ *	(C) 1994 Linus Torvalds
+ */
+
+#include <linux/linkage.h>
+#include <linux/config.h>
+
+#if   defined(CONFIG_ALPHA_GENERIC)
+
+/* Here NR_IRQS is not exact, but rather an upper bound.  This is used
+   many places throughout the kernel to size static arrays.  That's ok,
+   we'll use alpha_mv.nr_irqs when we want the real thing.  */
+
+/* When LEGACY_START_ADDRESS is selected, we leave out:
+     TITAN
+     WILDFIRE
+     MARVEL
+
+   This helps keep the kernel object size reasonable for the majority
+   of machines.
+*/
+
+# if defined(CONFIG_ALPHA_LEGACY_START_ADDRESS)
+#  define NR_IRQS      (128)           /* max is RAWHIDE/TAKARA */
+# else
+#  define NR_IRQS      (32768 + 16)    /* marvel - 32 pids */
+# endif
+
+#elif defined(CONFIG_ALPHA_CABRIOLET) || \
+      defined(CONFIG_ALPHA_EB66P)     || \
+      defined(CONFIG_ALPHA_EB164)     || \
+      defined(CONFIG_ALPHA_PC164)     || \
+      defined(CONFIG_ALPHA_LX164)
+# define NR_IRQS	35
+
+#elif defined(CONFIG_ALPHA_EB66)      || \
+      defined(CONFIG_ALPHA_EB64P)     || \
+      defined(CONFIG_ALPHA_MIKASA)
+# define NR_IRQS	32
+
+#elif defined(CONFIG_ALPHA_ALCOR)     || \
+      defined(CONFIG_ALPHA_MIATA)     || \
+      defined(CONFIG_ALPHA_RUFFIAN)   || \
+      defined(CONFIG_ALPHA_RX164)     || \
+      defined(CONFIG_ALPHA_NORITAKE)
+# define NR_IRQS	48
+
+#elif defined(CONFIG_ALPHA_SABLE)     || \
+      defined(CONFIG_ALPHA_SX164)
+# define NR_IRQS	40
+
+#elif defined(CONFIG_ALPHA_DP264) || \
+      defined(CONFIG_ALPHA_LYNX)  || \
+      defined(CONFIG_ALPHA_SHARK) || \
+      defined(CONFIG_ALPHA_EIGER)
+# define NR_IRQS	64
+
+#elif defined(CONFIG_ALPHA_TITAN)
+#define NR_IRQS		80
+
+#elif defined(CONFIG_ALPHA_RAWHIDE) || \
+	defined(CONFIG_ALPHA_TAKARA)
+# define NR_IRQS	128
+
+#elif defined(CONFIG_ALPHA_WILDFIRE)
+# define NR_IRQS	2048 /* enuff for 8 QBBs */
+
+#elif defined(CONFIG_ALPHA_MARVEL)
+# define NR_IRQS	(32768 + 16) 	/* marvel - 32 pids*/
+
+#else /* everyone else */
+# define NR_IRQS	16
+#endif
+
+static __inline__ int irq_canonicalize(int irq)
+{
+	/*
+	 * XXX is this true for all Alpha's?  The old serial driver
+	 * did it this way for years without any complaints, so....
+	 */
+	return ((irq == 2) ? 9 : irq);
+}
+
+extern void disable_irq(unsigned int);
+extern void disable_irq_nosync(unsigned int);
+extern void enable_irq(unsigned int);
+
+struct pt_regs;
+extern void (*perf_irq)(unsigned long, struct pt_regs *);
+
+struct irqaction;
+int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
+
+
+#endif /* _ALPHA_IRQ_H */
diff --git a/include/asm-alpha/jensen.h b/include/asm-alpha/jensen.h
new file mode 100644
index 0000000..964b06e
--- /dev/null
+++ b/include/asm-alpha/jensen.h
@@ -0,0 +1,346 @@
+#ifndef __ALPHA_JENSEN_H
+#define __ALPHA_JENSEN_H
+
+#include <asm/compiler.h>
+
+/*
+ * Defines for the AlphaPC EISA IO and memory address space.
+ */
+
+/*
+ * NOTE! The memory operations do not set any memory barriers, as it's
+ * not needed for cases like a frame buffer that is essentially memory-like.
+ * You need to do them by hand if the operations depend on ordering.
+ *
+ * Similarly, the port IO operations do a "mb" only after a write operation:
+ * if an mb is needed before (as in the case of doing memory mapped IO
+ * first, and then a port IO operation to the same device), it needs to be
+ * done by hand.
+ *
+ * After the above has bitten me 100 times, I'll give up and just do the
+ * mb all the time, but right now I'm hoping this will work out.  Avoiding
+ * mb's may potentially be a noticeable speed improvement, but I can't
+ * honestly say I've tested it.
+ *
+ * Handling interrupts that need to do mb's to synchronize to non-interrupts
+ * is another fun race area.  Don't do it (because if you do, I'll have to
+ * do *everything* with interrupts disabled, ugh).
+ */
+
+/*
+ * EISA Interrupt Acknowledge address
+ */
+#define EISA_INTA		(IDENT_ADDR + 0x100000000UL)
+
+/*
+ * FEPROM addresses
+ */
+#define EISA_FEPROM0		(IDENT_ADDR + 0x180000000UL)
+#define EISA_FEPROM1		(IDENT_ADDR + 0x1A0000000UL)
+
+/*
+ * VL82C106 base address
+ */
+#define EISA_VL82C106		(IDENT_ADDR + 0x1C0000000UL)
+
+/*
+ * EISA "Host Address Extension" address (bits 25-31 of the EISA address)
+ */
+#define EISA_HAE		(IDENT_ADDR + 0x1D0000000UL)
+
+/*
+ * "SYSCTL" register address
+ */
+#define EISA_SYSCTL		(IDENT_ADDR + 0x1E0000000UL)
+
+/*
+ * "spare" register address
+ */
+#define EISA_SPARE		(IDENT_ADDR + 0x1F0000000UL)
+
+/*
+ * EISA memory address offset
+ */
+#define EISA_MEM		(IDENT_ADDR + 0x200000000UL)
+
+/*
+ * EISA IO address offset
+ */
+#define EISA_IO			(IDENT_ADDR + 0x300000000UL)
+
+
+#ifdef __KERNEL__
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
+/*
+ * Handle the "host address register". This needs to be set
+ * to the high 7 bits of the EISA address.  This is also needed
+ * for EISA IO addresses, which are only 16 bits wide (the
+ * hae needs to be set to 0).
+ *
+ * HAE isn't needed for the local IO operations, though.
+ */
+
+#define JENSEN_HAE_ADDRESS	EISA_HAE
+#define JENSEN_HAE_MASK		0x1ffffff
+
+__EXTERN_INLINE void jensen_set_hae(unsigned long addr)
+{
+	/* hae on the Jensen is bits 31:25 shifted right */
+	addr >>= 25;
+	if (addr != alpha_mv.hae_cache)
+		set_hae(addr);
+}
+
+#define vuip	volatile unsigned int *
+
+/*
+ * IO functions
+ *
+ * The "local" functions are those that don't go out to the EISA bus,
+ * but instead act on the VL82C106 chip directly.. This is mainly the
+ * keyboard, RTC,  printer and first two serial lines..
+ *
+ * The local stuff makes for some complications, but it seems to be
+ * gone in the PCI version. I hope I can get DEC suckered^H^H^H^H^H^H^H^H
+ * convinced that I need one of the newer machines.
+ */
+
+static inline unsigned int jensen_local_inb(unsigned long addr)
+{
+	return 0xff & *(vuip)((addr << 9) + EISA_VL82C106);
+}
+
+static inline void jensen_local_outb(u8 b, unsigned long addr)
+{
+	*(vuip)((addr << 9) + EISA_VL82C106) = b;
+	mb();
+}
+
+static inline unsigned int jensen_bus_inb(unsigned long addr)
+{
+	long result;
+
+	jensen_set_hae(0);
+	result = *(volatile int *)((addr << 7) + EISA_IO + 0x00);
+	return __kernel_extbl(result, addr & 3);
+}
+
+static inline void jensen_bus_outb(u8 b, unsigned long addr)
+{
+	jensen_set_hae(0);
+	*(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;
+	mb();
+}
+
+/*
+ * It seems gcc is not very good at optimizing away logical
+ * operations that result in operations across inline functions.
+ * Which is why this is a macro.
+ */
+
+#define jensen_is_local(addr) ( \
+/* keyboard */	(addr == 0x60 || addr == 0x64) || \
+/* RTC */	(addr == 0x170 || addr == 0x171) || \
+/* mb COM2 */	(addr >= 0x2f8 && addr <= 0x2ff) || \
+/* mb LPT1 */	(addr >= 0x3bc && addr <= 0x3be) || \
+/* mb COM2 */	(addr >= 0x3f8 && addr <= 0x3ff))
+
+__EXTERN_INLINE u8 jensen_inb(unsigned long addr)
+{
+	if (jensen_is_local(addr))
+		return jensen_local_inb(addr);
+	else
+		return jensen_bus_inb(addr);
+}
+
+__EXTERN_INLINE void jensen_outb(u8 b, unsigned long addr)
+{
+	if (jensen_is_local(addr))
+		jensen_local_outb(b, addr);
+	else
+		jensen_bus_outb(b, addr);
+}
+
+__EXTERN_INLINE u16 jensen_inw(unsigned long addr)
+{
+	long result;
+
+	jensen_set_hae(0);
+	result = *(volatile int *) ((addr << 7) + EISA_IO + 0x20);
+	result >>= (addr & 3) * 8;
+	return 0xffffUL & result;
+}
+
+__EXTERN_INLINE u32 jensen_inl(unsigned long addr)
+{
+	jensen_set_hae(0);
+	return *(vuip) ((addr << 7) + EISA_IO + 0x60);
+}
+
+__EXTERN_INLINE void jensen_outw(u16 b, unsigned long addr)
+{
+	jensen_set_hae(0);
+	*(vuip) ((addr << 7) + EISA_IO + 0x20) = b * 0x00010001;
+	mb();
+}
+
+__EXTERN_INLINE void jensen_outl(u32 b, unsigned long addr)
+{
+	jensen_set_hae(0);
+	*(vuip) ((addr << 7) + EISA_IO + 0x60) = b;
+	mb();
+}
+
+/*
+ * Memory functions.
+ */
+
+__EXTERN_INLINE u8 jensen_readb(const volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	long result;
+
+	jensen_set_hae(addr);
+	addr &= JENSEN_HAE_MASK;
+	result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x00);
+	result >>= (addr & 3) * 8;
+	return 0xffUL & result;
+}
+
+__EXTERN_INLINE u16 jensen_readw(const volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	long result;
+
+	jensen_set_hae(addr);
+	addr &= JENSEN_HAE_MASK;
+	result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x20);
+	result >>= (addr & 3) * 8;
+	return 0xffffUL & result;
+}
+
+__EXTERN_INLINE u32 jensen_readl(const volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	jensen_set_hae(addr);
+	addr &= JENSEN_HAE_MASK;
+	return *(vuip) ((addr << 7) + EISA_MEM + 0x60);
+}
+
+__EXTERN_INLINE u64 jensen_readq(const volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	unsigned long r0, r1;
+
+	jensen_set_hae(addr);
+	addr &= JENSEN_HAE_MASK;
+	addr = (addr << 7) + EISA_MEM + 0x60;
+	r0 = *(vuip) (addr);
+	r1 = *(vuip) (addr + (4 << 7));
+	return r1 << 32 | r0;
+}
+
+__EXTERN_INLINE void jensen_writeb(u8 b, volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	jensen_set_hae(addr);
+	addr &= JENSEN_HAE_MASK;
+	*(vuip) ((addr << 7) + EISA_MEM + 0x00) = b * 0x01010101;
+}
+
+__EXTERN_INLINE void jensen_writew(u16 b, volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	jensen_set_hae(addr);
+	addr &= JENSEN_HAE_MASK;
+	*(vuip) ((addr << 7) + EISA_MEM + 0x20) = b * 0x00010001;
+}
+
+__EXTERN_INLINE void jensen_writel(u32 b, volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	jensen_set_hae(addr);
+	addr &= JENSEN_HAE_MASK;
+	*(vuip) ((addr << 7) + EISA_MEM + 0x60) = b;
+}
+
+__EXTERN_INLINE void jensen_writeq(u64 b, volatile void __iomem *xaddr)
+{
+	unsigned long addr = (unsigned long) xaddr;
+	jensen_set_hae(addr);
+	addr &= JENSEN_HAE_MASK;
+	addr = (addr << 7) + EISA_MEM + 0x60;
+	*(vuip) (addr) = b;
+	*(vuip) (addr + (4 << 7)) = b >> 32;
+}
+
+__EXTERN_INLINE void __iomem *jensen_ioportmap(unsigned long addr)
+{
+	return (void __iomem *)addr;
+}
+
+__EXTERN_INLINE void __iomem *jensen_ioremap(unsigned long addr,
+					     unsigned long size)
+{
+	return (void __iomem *)(addr + 0x100000000ul);
+}
+
+__EXTERN_INLINE int jensen_is_ioaddr(unsigned long addr)
+{
+	return (long)addr >= 0;
+}
+
+__EXTERN_INLINE int jensen_is_mmio(const volatile void __iomem *addr)
+{
+	return (unsigned long)addr >= 0x100000000ul;
+}
+
+/* New-style ioread interface.  All the routines are so ugly for Jensen
+   that it doesn't make sense to merge them.  */
+
+#define IOPORT(OS, NS)							\
+__EXTERN_INLINE unsigned int jensen_ioread##NS(void __iomem *xaddr)	\
+{									\
+	if (jensen_is_mmio(xaddr))					\
+		return jensen_read##OS(xaddr - 0x100000000ul);		\
+	else								\
+		return jensen_in##OS((unsigned long)xaddr);		\
+}									\
+__EXTERN_INLINE void jensen_iowrite##NS(u##NS b, void __iomem *xaddr)	\
+{									\
+	if (jensen_is_mmio(xaddr))					\
+		jensen_write##OS(b, xaddr - 0x100000000ul);		\
+	else								\
+		jensen_out##OS(b, (unsigned long)xaddr);		\
+}
+
+IOPORT(b, 8)
+IOPORT(w, 16)
+IOPORT(l, 32)
+
+#undef IOPORT
+
+#undef vuip
+
+#undef __IO_PREFIX
+#define __IO_PREFIX		jensen
+#define jensen_trivial_rw_bw	0
+#define jensen_trivial_rw_lq	0
+#define jensen_trivial_io_bw	0
+#define jensen_trivial_io_lq	0
+#define jensen_trivial_iounmap	1
+#include <asm/io_trivial.h>
+
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_JENSEN_H */
diff --git a/include/asm-alpha/kmap_types.h b/include/asm-alpha/kmap_types.h
new file mode 100644
index 0000000..3d10cd3
--- /dev/null
+++ b/include/asm-alpha/kmap_types.h
@@ -0,0 +1,33 @@
+#ifndef _ASM_KMAP_TYPES_H
+#define _ASM_KMAP_TYPES_H
+
+/* Dummy header just to define km_type. */
+
+#include <linux/config.h>
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+# define D(n) __KM_FENCE_##n ,
+#else
+# define D(n)
+#endif
+
+enum km_type {
+D(0)	KM_BOUNCE_READ,
+D(1)	KM_SKB_SUNRPC_DATA,
+D(2)	KM_SKB_DATA_SOFTIRQ,
+D(3)	KM_USER0,
+D(4)	KM_USER1,
+D(5)	KM_BIO_SRC_IRQ,
+D(6)	KM_BIO_DST_IRQ,
+D(7)	KM_PTE0,
+D(8)	KM_PTE1,
+D(9)	KM_IRQ0,
+D(10)	KM_IRQ1,
+D(11)	KM_SOFTIRQ0,
+D(12)	KM_SOFTIRQ1,
+D(13)	KM_TYPE_NR
+};
+
+#undef D
+
+#endif
diff --git a/include/asm-alpha/linkage.h b/include/asm-alpha/linkage.h
new file mode 100644
index 0000000..291c2d0
--- /dev/null
+++ b/include/asm-alpha/linkage.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+/* Nothing to see here... */
+
+#endif
diff --git a/include/asm-alpha/local.h b/include/asm-alpha/local.h
new file mode 100644
index 0000000..90a510f
--- /dev/null
+++ b/include/asm-alpha/local.h
@@ -0,0 +1,40 @@
+#ifndef _ALPHA_LOCAL_H
+#define _ALPHA_LOCAL_H
+
+#include <linux/percpu.h>
+#include <asm/atomic.h>
+
+typedef atomic64_t local_t;
+
+#define LOCAL_INIT(i)	ATOMIC64_INIT(i)
+#define local_read(v)	atomic64_read(v)
+#define local_set(v,i)	atomic64_set(v,i)
+
+#define local_inc(v)	atomic64_inc(v)
+#define local_dec(v)	atomic64_dec(v)
+#define local_add(i, v)	atomic64_add(i, v)
+#define local_sub(i, v)	atomic64_sub(i, v)
+
+#define __local_inc(v)		((v)->counter++)
+#define __local_dec(v)		((v)->counter++)
+#define __local_add(i,v)	((v)->counter+=(i))
+#define __local_sub(i,v)	((v)->counter-=(i))
+
+/* Use these for per-cpu local_t variables: on some archs they are
+ * much more efficient than these naive implementations.  Note they take
+ * a variable, not an address.
+ */
+#define cpu_local_read(v)	local_read(&__get_cpu_var(v))
+#define cpu_local_set(v, i)	local_set(&__get_cpu_var(v), (i))
+
+#define cpu_local_inc(v)	local_inc(&__get_cpu_var(v))
+#define cpu_local_dec(v)	local_dec(&__get_cpu_var(v))
+#define cpu_local_add(i, v)	local_add((i), &__get_cpu_var(v))
+#define cpu_local_sub(i, v)	local_sub((i), &__get_cpu_var(v))
+
+#define __cpu_local_inc(v)	__local_inc(&__get_cpu_var(v))
+#define __cpu_local_dec(v)	__local_dec(&__get_cpu_var(v))
+#define __cpu_local_add(i, v)	__local_add((i), &__get_cpu_var(v))
+#define __cpu_local_sub(i, v)	__local_sub((i), &__get_cpu_var(v))
+
+#endif /* _ALPHA_LOCAL_H */
diff --git a/include/asm-alpha/machvec.h b/include/asm-alpha/machvec.h
new file mode 100644
index 0000000..ece166a
--- /dev/null
+++ b/include/asm-alpha/machvec.h
@@ -0,0 +1,136 @@
+#ifndef __ALPHA_MACHVEC_H
+#define __ALPHA_MACHVEC_H 1
+
+#include <linux/config.h>
+#include <linux/types.h>
+
+/*
+ *	This file gets pulled in by asm/io.h from user space. We don't
+ *	want most of this escaping.
+ */
+ 
+#ifdef __KERNEL__
+
+/* The following structure vectors all of the I/O and IRQ manipulation
+   from the generic kernel to the hardware specific backend.  */
+
+struct task_struct;
+struct mm_struct;
+struct pt_regs;
+struct vm_area_struct;
+struct linux_hose_info;
+struct pci_dev;
+struct pci_ops;
+struct pci_controller;
+struct _alpha_agp_info;
+
+struct alpha_machine_vector
+{
+	/* This "belongs" down below with the rest of the runtime
+	   variables, but it is convenient for entry.S if these 
+	   two slots are at the beginning of the struct.  */
+	unsigned long hae_cache;
+	unsigned long *hae_register;
+
+	int nr_irqs;
+	int rtc_port;
+	unsigned int max_asn;
+	unsigned long max_isa_dma_address;
+	unsigned long irq_probe_mask;
+	unsigned long iack_sc;
+	unsigned long min_io_address;
+	unsigned long min_mem_address;
+	unsigned long pci_dac_offset;
+
+	void (*mv_pci_tbi)(struct pci_controller *hose,
+			   dma_addr_t start, dma_addr_t end);
+
+	unsigned int (*mv_ioread8)(void __iomem *);
+	unsigned int (*mv_ioread16)(void __iomem *);
+	unsigned int (*mv_ioread32)(void __iomem *);
+
+	void (*mv_iowrite8)(u8, void __iomem *);
+	void (*mv_iowrite16)(u16, void __iomem *);
+	void (*mv_iowrite32)(u32, void __iomem *);
+
+	u8 (*mv_readb)(const volatile void __iomem *);
+	u16 (*mv_readw)(const volatile void __iomem *);
+	u32 (*mv_readl)(const volatile void __iomem *);
+	u64 (*mv_readq)(const volatile void __iomem *);
+
+	void (*mv_writeb)(u8, volatile void __iomem *);
+	void (*mv_writew)(u16, volatile void __iomem *);
+	void (*mv_writel)(u32, volatile void __iomem *);
+	void (*mv_writeq)(u64, volatile void __iomem *);
+
+	void __iomem *(*mv_ioportmap)(unsigned long);
+	void __iomem *(*mv_ioremap)(unsigned long, unsigned long);
+	void (*mv_iounmap)(volatile void __iomem *);
+	int (*mv_is_ioaddr)(unsigned long);
+	int (*mv_is_mmio)(const volatile void __iomem *);
+
+	void (*mv_switch_mm)(struct mm_struct *, struct mm_struct *,
+			     struct task_struct *);
+	void (*mv_activate_mm)(struct mm_struct *, struct mm_struct *);
+
+	void (*mv_flush_tlb_current)(struct mm_struct *);
+	void (*mv_flush_tlb_current_page)(struct mm_struct * mm,
+					  struct vm_area_struct *vma,
+					  unsigned long addr);
+
+	void (*update_irq_hw)(unsigned long, unsigned long, int);
+	void (*ack_irq)(unsigned long);
+	void (*device_interrupt)(unsigned long vector, struct pt_regs *regs);
+	void (*machine_check)(u64 vector, u64 la, struct pt_regs *regs);
+
+	void (*smp_callin)(void);
+	void (*init_arch)(void);
+	void (*init_irq)(void);
+	void (*init_rtc)(void);
+	void (*init_pci)(void);
+	void (*kill_arch)(int);
+
+	u8 (*pci_swizzle)(struct pci_dev *, u8 *);
+	int (*pci_map_irq)(struct pci_dev *, u8, u8);
+	struct pci_ops *pci_ops;
+
+	struct _alpha_agp_info *(*agp_info)(void);
+
+	const char *vector_name;
+
+	/* NUMA information */
+	int (*pa_to_nid)(unsigned long);
+	int (*cpuid_to_nid)(int);
+	unsigned long (*node_mem_start)(int);
+	unsigned long (*node_mem_size)(int);
+
+	/* System specific parameters.  */
+	union {
+	    struct {
+		unsigned long gru_int_req_bits;
+	    } cia;
+
+	    struct {
+		unsigned long gamma_bias;
+	    } t2;
+
+	    struct {
+		unsigned int route_tab;
+	    } sio;
+	} sys;
+};
+
+extern struct alpha_machine_vector alpha_mv;
+
+#ifdef CONFIG_ALPHA_GENERIC
+extern int alpha_using_srm;
+#else
+#ifdef CONFIG_ALPHA_SRM
+#define alpha_using_srm 1
+#else
+#define alpha_using_srm 0
+#endif
+#endif /* GENERIC */
+
+#endif
+#endif /* __ALPHA_MACHVEC_H */
diff --git a/include/asm-alpha/mc146818rtc.h b/include/asm-alpha/mc146818rtc.h
new file mode 100644
index 0000000..097703f
--- /dev/null
+++ b/include/asm-alpha/mc146818rtc.h
@@ -0,0 +1,27 @@
+/*
+ * Machine dependent access functions for RTC registers.
+ */
+#ifndef __ASM_ALPHA_MC146818RTC_H
+#define __ASM_ALPHA_MC146818RTC_H
+
+#include <asm/io.h>
+
+#ifndef RTC_PORT
+#define RTC_PORT(x)	(0x70 + (x))
+#define RTC_ALWAYS_BCD	1	/* RTC operates in binary mode */
+#endif
+
+/*
+ * The yet supported machines all access the RTC index register via
+ * an ISA port access but the way to access the date register differs ...
+ */
+#define CMOS_READ(addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+inb_p(RTC_PORT(1)); \
+})
+#define CMOS_WRITE(val, addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+outb_p((val),RTC_PORT(1)); \
+})
+
+#endif /* __ASM_ALPHA_MC146818RTC_H */
diff --git a/include/asm-alpha/md.h b/include/asm-alpha/md.h
new file mode 100644
index 0000000..6c9b822
--- /dev/null
+++ b/include/asm-alpha/md.h
@@ -0,0 +1,13 @@
+/* $Id: md.h,v 1.1 1997/12/15 15:11:48 jj Exp $
+ * md.h: High speed xor_block operation for RAID4/5 
+ *
+ */
+ 
+#ifndef __ASM_MD_H
+#define __ASM_MD_H
+
+/* #define HAVE_ARCH_XORBLOCK */
+
+#define MD_XORBLOCK_ALIGNMENT	sizeof(long)
+
+#endif /* __ASM_MD_H */
diff --git a/include/asm-alpha/mman.h b/include/asm-alpha/mman.h
new file mode 100644
index 0000000..eb9c279
--- /dev/null
+++ b/include/asm-alpha/mman.h
@@ -0,0 +1,50 @@
+#ifndef __ALPHA_MMAN_H__
+#define __ALPHA_MMAN_H__
+
+#define PROT_READ	0x1		/* page can be read */
+#define PROT_WRITE	0x2		/* page can be written */
+#define PROT_EXEC	0x4		/* page can be executed */
+#define PROT_SEM	0x8		/* page may be used for atomic ops */
+#define PROT_NONE	0x0		/* page can not be accessed */
+#define PROT_GROWSDOWN	0x01000000	/* mprotect flag: extend change to start of growsdown vma */
+#define PROT_GROWSUP	0x02000000	/* mprotect flag: extend change to end of growsup vma */
+
+#define MAP_SHARED	0x01		/* Share changes */
+#define MAP_PRIVATE	0x02		/* Changes are private */
+#define MAP_TYPE	0x0f		/* Mask for type of mapping (OSF/1 is _wrong_) */
+#define MAP_FIXED	0x100		/* Interpret addr exactly */
+#define MAP_ANONYMOUS	0x10		/* don't use a file */
+
+/* not used by linux, but here to make sure we don't clash with OSF/1 defines */
+#define _MAP_HASSEMAPHORE 0x0200
+#define _MAP_INHERIT	0x0400
+#define _MAP_UNALIGNED	0x0800
+
+/* These are linux-specific */
+#define MAP_GROWSDOWN	0x01000		/* stack-like segment */
+#define MAP_DENYWRITE	0x02000		/* ETXTBSY */
+#define MAP_EXECUTABLE	0x04000		/* mark it as an executable */
+#define MAP_LOCKED	0x08000		/* lock the mapping */
+#define MAP_NORESERVE	0x10000		/* don't check for reservations */
+#define MAP_POPULATE	0x20000		/* populate (prefault) pagetables */
+#define MAP_NONBLOCK	0x40000		/* do not block on IO */
+
+#define MS_ASYNC	1		/* sync memory asynchronously */
+#define MS_SYNC		2		/* synchronous memory sync */
+#define MS_INVALIDATE	4		/* invalidate the caches */
+
+#define MCL_CURRENT	 8192		/* lock all currently mapped pages */
+#define MCL_FUTURE	16384		/* lock all additions to address space */
+
+#define MADV_NORMAL	0		/* no further special treatment */
+#define MADV_RANDOM	1		/* expect random page references */
+#define MADV_SEQUENTIAL	2		/* expect sequential page references */
+#define MADV_WILLNEED	3		/* will need these pages */
+#define	MADV_SPACEAVAIL	5		/* ensure resources are available */
+#define MADV_DONTNEED	6		/* don't need these pages */
+
+/* compatibility flags */
+#define MAP_ANON	MAP_ANONYMOUS
+#define MAP_FILE	0
+
+#endif /* __ALPHA_MMAN_H__ */
diff --git a/include/asm-alpha/mmu.h b/include/asm-alpha/mmu.h
new file mode 100644
index 0000000..3dc1277
--- /dev/null
+++ b/include/asm-alpha/mmu.h
@@ -0,0 +1,7 @@
+#ifndef __ALPHA_MMU_H
+#define __ALPHA_MMU_H
+
+/* The alpha MMU context is one "unsigned long" bitmap per CPU */
+typedef unsigned long mm_context_t[NR_CPUS];
+
+#endif
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
new file mode 100644
index 0000000..a714d0c
--- /dev/null
+++ b/include/asm-alpha/mmu_context.h
@@ -0,0 +1,261 @@
+#ifndef __ALPHA_MMU_CONTEXT_H
+#define __ALPHA_MMU_CONTEXT_H
+
+/*
+ * get a new mmu context..
+ *
+ * Copyright (C) 1996, Linus Torvalds
+ */
+
+#include <linux/config.h>
+#include <asm/system.h>
+#include <asm/machvec.h>
+#include <asm/compiler.h>
+
+/*
+ * Force a context reload. This is needed when we change the page
+ * table pointer or when we update the ASN of the current process.
+ */
+
+/* Don't get into trouble with dueling __EXTERN_INLINEs.  */
+#ifndef __EXTERN_INLINE
+#include <asm/io.h>
+#endif
+
+
+extern inline unsigned long
+__reload_thread(struct pcb_struct *pcb)
+{
+	register unsigned long a0 __asm__("$16");
+	register unsigned long v0 __asm__("$0");
+
+	a0 = virt_to_phys(pcb);
+	__asm__ __volatile__(
+		"call_pal %2 #__reload_thread"
+		: "=r"(v0), "=r"(a0)
+		: "i"(PAL_swpctx), "r"(a0)
+		: "$1", "$22", "$23", "$24", "$25");
+
+	return v0;
+}
+
+
+/*
+ * The maximum ASN's the processor supports.  On the EV4 this is 63
+ * but the PAL-code doesn't actually use this information.  On the
+ * EV5 this is 127, and EV6 has 255.
+ *
+ * On the EV4, the ASNs are more-or-less useless anyway, as they are
+ * only used as an icache tag, not for TB entries.  On the EV5 and EV6,
+ * ASN's also validate the TB entries, and thus make a lot more sense.
+ *
+ * The EV4 ASN's don't even match the architecture manual, ugh.  And
+ * I quote: "If a processor implements address space numbers (ASNs),
+ * and the old PTE has the Address Space Match (ASM) bit clear (ASNs
+ * in use) and the Valid bit set, then entries can also effectively be
+ * made coherent by assigning a new, unused ASN to the currently
+ * running process and not reusing the previous ASN before calling the
+ * appropriate PALcode routine to invalidate the translation buffer (TB)". 
+ *
+ * In short, the EV4 has a "kind of" ASN capability, but it doesn't actually
+ * work correctly and can thus not be used (explaining the lack of PAL-code
+ * support).
+ */
+#define EV4_MAX_ASN 63
+#define EV5_MAX_ASN 127
+#define EV6_MAX_ASN 255
+
+#ifdef CONFIG_ALPHA_GENERIC
+# define MAX_ASN	(alpha_mv.max_asn)
+#else
+# ifdef CONFIG_ALPHA_EV4
+#  define MAX_ASN	EV4_MAX_ASN
+# elif defined(CONFIG_ALPHA_EV5)
+#  define MAX_ASN	EV5_MAX_ASN
+# else
+#  define MAX_ASN	EV6_MAX_ASN
+# endif
+#endif
+
+/*
+ * cpu_last_asn(processor):
+ * 63                                            0
+ * +-------------+----------------+--------------+
+ * | asn version | this processor | hardware asn |
+ * +-------------+----------------+--------------+
+ */
+
+#ifdef CONFIG_SMP
+#include <asm/smp.h>
+#define cpu_last_asn(cpuid)	(cpu_data[cpuid].last_asn)
+#else
+extern unsigned long last_asn;
+#define cpu_last_asn(cpuid)	last_asn
+#endif /* CONFIG_SMP */
+
+#define WIDTH_HARDWARE_ASN	8
+#define ASN_FIRST_VERSION (1UL << WIDTH_HARDWARE_ASN)
+#define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1)
+
+/*
+ * NOTE! The way this is set up, the high bits of the "asn_cache" (and
+ * the "mm->context") are the ASN _version_ code. A version of 0 is
+ * always considered invalid, so to invalidate another process you only
+ * need to do "p->mm->context = 0".
+ *
+ * If we need more ASN's than the processor has, we invalidate the old
+ * user TLB's (tbiap()) and start a new ASN version. That will automatically
+ * force a new asn for any other processes the next time they want to
+ * run.
+ */
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __MMU_EXTERN_INLINE
+#endif
+
+static inline unsigned long
+__get_new_mm_context(struct mm_struct *mm, long cpu)
+{
+	unsigned long asn = cpu_last_asn(cpu);
+	unsigned long next = asn + 1;
+
+	if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) {
+		tbiap();
+		imb();
+		next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION;
+	}
+	cpu_last_asn(cpu) = next;
+	return next;
+}
+
+__EXTERN_INLINE void
+ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
+	      struct task_struct *next)
+{
+	/* Check if our ASN is of an older version, and thus invalid. */
+	unsigned long asn;
+	unsigned long mmc;
+	long cpu = smp_processor_id();
+
+#ifdef CONFIG_SMP
+	cpu_data[cpu].asn_lock = 1;
+	barrier();
+#endif
+	asn = cpu_last_asn(cpu);
+	mmc = next_mm->context[cpu];
+	if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) {
+		mmc = __get_new_mm_context(next_mm, cpu);
+		next_mm->context[cpu] = mmc;
+	}
+#ifdef CONFIG_SMP
+	else
+		cpu_data[cpu].need_new_asn = 1;
+#endif
+
+	/* Always update the PCB ASN.  Another thread may have allocated
+	   a new mm->context (via flush_tlb_mm) without the ASN serial
+	   number wrapping.  We have no way to detect when this is needed.  */
+	next->thread_info->pcb.asn = mmc & HARDWARE_ASN_MASK;
+}
+
+__EXTERN_INLINE void
+ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
+	      struct task_struct *next)
+{
+	/* As described, ASN's are broken for TLB usage.  But we can
+	   optimize for switching between threads -- if the mm is
+	   unchanged from current we needn't flush.  */
+	/* ??? May not be needed because EV4 PALcode recognizes that
+	   ASN's are broken and does a tbiap itself on swpctx, under
+	   the "Must set ASN or flush" rule.  At least this is true
+	   for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com).
+	   I'm going to leave this here anyway, just to Be Sure.  -- r~  */
+	if (prev_mm != next_mm)
+		tbiap();
+
+	/* Do continue to allocate ASNs, because we can still use them
+	   to avoid flushing the icache.  */
+	ev5_switch_mm(prev_mm, next_mm, next);
+}
+
+extern void __load_new_mm_context(struct mm_struct *);
+
+#ifdef CONFIG_SMP
+#define check_mmu_context()					\
+do {								\
+	int cpu = smp_processor_id();				\
+	cpu_data[cpu].asn_lock = 0;				\
+	barrier();						\
+	if (cpu_data[cpu].need_new_asn) {			\
+		struct mm_struct * mm = current->active_mm;	\
+		cpu_data[cpu].need_new_asn = 0;			\
+		if (!mm->context[cpu])			\
+			__load_new_mm_context(mm);		\
+	}							\
+} while(0)
+#else
+#define check_mmu_context()  do { } while(0)
+#endif
+
+__EXTERN_INLINE void
+ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
+{
+	__load_new_mm_context(next_mm);
+}
+
+__EXTERN_INLINE void
+ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
+{
+	__load_new_mm_context(next_mm);
+	tbiap();
+}
+
+#define deactivate_mm(tsk,mm)	do { } while (0)
+
+#ifdef CONFIG_ALPHA_GENERIC
+# define switch_mm(a,b,c)	alpha_mv.mv_switch_mm((a),(b),(c))
+# define activate_mm(x,y)	alpha_mv.mv_activate_mm((x),(y))
+#else
+# ifdef CONFIG_ALPHA_EV4
+#  define switch_mm(a,b,c)	ev4_switch_mm((a),(b),(c))
+#  define activate_mm(x,y)	ev4_activate_mm((x),(y))
+# else
+#  define switch_mm(a,b,c)	ev5_switch_mm((a),(b),(c))
+#  define activate_mm(x,y)	ev5_activate_mm((x),(y))
+# endif
+#endif
+
+extern inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+	int i;
+
+	for (i = 0; i < NR_CPUS; i++)
+		if (cpu_online(i))
+			mm->context[i] = 0;
+	if (tsk != current)
+		tsk->thread_info->pcb.ptbr
+		  = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
+	return 0;
+}
+
+extern inline void
+destroy_context(struct mm_struct *mm)
+{
+	/* Nothing to do.  */
+}
+
+static inline void
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+	tsk->thread_info->pcb.ptbr
+	  = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
+}
+
+#ifdef __MMU_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __MMU_EXTERN_INLINE
+#endif
+
+#endif /* __ALPHA_MMU_CONTEXT_H */
diff --git a/include/asm-alpha/mmzone.h b/include/asm-alpha/mmzone.h
new file mode 100644
index 0000000..726c150
--- /dev/null
+++ b/include/asm-alpha/mmzone.h
@@ -0,0 +1,131 @@
+/*
+ * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
+ * Adapted for the alpha wildfire architecture Jan 2001.
+ */
+#ifndef _ASM_MMZONE_H_
+#define _ASM_MMZONE_H_
+
+#include <linux/config.h>
+#include <asm/smp.h>
+
+struct bootmem_data_t; /* stupid forward decl. */
+
+/*
+ * Following are macros that are specific to this numa platform.
+ */
+
+extern pg_data_t node_data[];
+
+#define alpha_pa_to_nid(pa)		\
+        (alpha_mv.pa_to_nid 		\
+	 ? alpha_mv.pa_to_nid(pa)	\
+	 : (0))
+#define node_mem_start(nid)		\
+        (alpha_mv.node_mem_start 	\
+	 ? alpha_mv.node_mem_start(nid) \
+	 : (0UL))
+#define node_mem_size(nid)		\
+        (alpha_mv.node_mem_size 	\
+	 ? alpha_mv.node_mem_size(nid) 	\
+	 : ((nid) ? (0UL) : (~0UL)))
+
+#define pa_to_nid(pa)		alpha_pa_to_nid(pa)
+#define NODE_DATA(nid)		(&node_data[(nid)])
+
+#define node_localnr(pfn, nid)	((pfn) - NODE_DATA(nid)->node_start_pfn)
+
+#if 1
+#define PLAT_NODE_DATA_LOCALNR(p, n)	\
+	(((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn)
+#else
+static inline unsigned long
+PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
+{
+	unsigned long temp;
+	temp = p >> PAGE_SHIFT;
+	return temp - PLAT_NODE_DATA(n)->gendata.node_start_pfn;
+}
+#endif
+
+#ifdef CONFIG_DISCONTIGMEM
+
+/*
+ * Following are macros that each numa implementation must define.
+ */
+
+/*
+ * Given a kernel address, find the home node of the underlying memory.
+ */
+#define kvaddr_to_nid(kaddr)	pa_to_nid(__pa(kaddr))
+#define node_mem_map(nid)	(NODE_DATA(nid)->node_mem_map)
+#define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
+
+#define local_mapnr(kvaddr) \
+      ((__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)))
+
+/*
+ * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
+ * and returns the kaddr corresponding to first physical page in the
+ * node's mem_map.
+ */
+#define LOCAL_BASE_ADDR(kaddr)						  \
+    ((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn  \
+			 << PAGE_SHIFT))
+
+/* XXX: FIXME -- wli */
+#define kern_addr_valid(kaddr)	(0)
+
+#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+
+#define VALID_PAGE(page)	(((page) - mem_map) < max_mapnr)
+
+#define pmd_page(pmd)		(pfn_to_page(pmd_val(pmd) >> 32))
+#define pte_pfn(pte)		(pte_val(pte) >> 32)
+
+#define mk_pte(page, pgprot)						     \
+({								 	     \
+	pte_t pte;                                                           \
+	unsigned long pfn;                                                   \
+									     \
+	pfn = ((unsigned long)((page)-page_zone(page)->zone_mem_map)) << 32; \
+	pfn += page_zone(page)->zone_start_pfn << 32;			     \
+	pte_val(pte) = pfn | pgprot_val(pgprot);			     \
+									     \
+	pte;								     \
+})
+
+#define pte_page(x)							\
+({									\
+       	unsigned long kvirt;						\
+	struct page * __xx;						\
+									\
+	kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT));	\
+	__xx = virt_to_page(kvirt);					\
+									\
+	__xx;                                                           \
+})
+
+#define pfn_to_page(pfn)						\
+({									\
+ 	unsigned long kaddr = (unsigned long)__va((pfn) << PAGE_SHIFT);	\
+	(node_mem_map(kvaddr_to_nid(kaddr)) + local_mapnr(kaddr));	\
+})
+
+#define page_to_pfn(page)						\
+	((page) - page_zone(page)->zone_mem_map +			\
+	 (page_zone(page)->zone_start_pfn))
+
+#define page_to_pa(page)						\
+	((( (page) - page_zone(page)->zone_mem_map )			\
+	+ page_zone(page)->zone_start_pfn) << PAGE_SHIFT)
+
+#define pfn_to_nid(pfn)		pa_to_nid(((u64)(pfn) << PAGE_SHIFT))
+#define pfn_valid(pfn)							\
+	(((pfn) - node_start_pfn(pfn_to_nid(pfn))) <			\
+	 node_spanned_pages(pfn_to_nid(pfn)))					\
+
+#define virt_addr_valid(kaddr)	pfn_valid((__pa(kaddr) >> PAGE_SHIFT))
+
+#endif /* CONFIG_DISCONTIGMEM */
+
+#endif /* _ASM_MMZONE_H_ */
diff --git a/include/asm-alpha/module.h b/include/asm-alpha/module.h
new file mode 100644
index 0000000..7b63743
--- /dev/null
+++ b/include/asm-alpha/module.h
@@ -0,0 +1,23 @@
+#ifndef _ALPHA_MODULE_H
+#define _ALPHA_MODULE_H
+
+struct mod_arch_specific
+{
+	unsigned int gotsecindex;
+};
+
+#define Elf_Sym Elf64_Sym
+#define Elf_Shdr Elf64_Shdr
+#define Elf_Ehdr Elf64_Ehdr
+#define Elf_Phdr Elf64_Phdr
+#define Elf_Dyn Elf64_Dyn
+#define Elf_Rel Elf64_Rel
+#define Elf_Rela Elf64_Rela
+
+#define ARCH_SHF_SMALL SHF_ALPHA_GPREL
+
+#ifdef MODULE
+asm(".section .got,\"aws\",@progbits; .align 3; .previous");
+#endif
+
+#endif /*_ALPHA_MODULE_H*/
diff --git a/include/asm-alpha/msgbuf.h b/include/asm-alpha/msgbuf.h
new file mode 100644
index 0000000..9849650
--- /dev/null
+++ b/include/asm-alpha/msgbuf.h
@@ -0,0 +1,27 @@
+#ifndef _ALPHA_MSGBUF_H
+#define _ALPHA_MSGBUF_H
+
+/* 
+ * The msqid64_ds structure for alpha architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 64-bit values
+ */
+
+struct msqid64_ds {
+	struct ipc64_perm msg_perm;
+	__kernel_time_t msg_stime;	/* last msgsnd time */
+	__kernel_time_t msg_rtime;	/* last msgrcv time */
+	__kernel_time_t msg_ctime;	/* last change time */
+	unsigned long  msg_cbytes;	/* current number of bytes on queue */
+	unsigned long  msg_qnum;	/* number of messages in queue */
+	unsigned long  msg_qbytes;	/* max number of bytes on queue */
+	__kernel_pid_t msg_lspid;	/* pid of last msgsnd */
+	__kernel_pid_t msg_lrpid;	/* last receive pid */
+	unsigned long  __unused1;
+	unsigned long  __unused2;
+};
+
+#endif /* _ALPHA_MSGBUF_H */
diff --git a/include/asm-alpha/namei.h b/include/asm-alpha/namei.h
new file mode 100644
index 0000000..5cc9bb3
--- /dev/null
+++ b/include/asm-alpha/namei.h
@@ -0,0 +1,17 @@
+/* $Id: namei.h,v 1.1 1996/12/13 14:48:21 jj Exp $
+ * linux/include/asm-alpha/namei.h
+ *
+ * Included from linux/fs/namei.c
+ */
+
+#ifndef __ALPHA_NAMEI_H
+#define __ALPHA_NAMEI_H
+
+/* This dummy routine maybe changed to something useful
+ * for /usr/gnemul/ emulation stuff.
+ * Look at asm-sparc/namei.h for details.
+ */
+
+#define __emul_prefix() NULL
+
+#endif /* __ALPHA_NAMEI_H */
diff --git a/include/asm-alpha/numnodes.h b/include/asm-alpha/numnodes.h
new file mode 100644
index 0000000..cd42582
--- /dev/null
+++ b/include/asm-alpha/numnodes.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_MAX_NUMNODES_H
+#define _ASM_MAX_NUMNODES_H
+
+/* Max 128 Nodes - Marvel */
+#define NODES_SHIFT	7
+
+#endif /* _ASM_MAX_NUMNODES_H */
diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h
new file mode 100644
index 0000000..0577daf
--- /dev/null
+++ b/include/asm-alpha/page.h
@@ -0,0 +1,115 @@
+#ifndef _ALPHA_PAGE_H
+#define _ALPHA_PAGE_H
+
+#include <linux/config.h>
+#include <asm/pal.h>
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT	13
+#define PAGE_SIZE	(1UL << PAGE_SHIFT)
+#define PAGE_MASK	(~(PAGE_SIZE-1))
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#define STRICT_MM_TYPECHECKS
+
+extern void clear_page(void *page);
+#define clear_user_page(page, vaddr, pg)	clear_page(page)
+
+#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vmaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+
+extern void copy_page(void * _to, void * _from);
+#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x)	((x).pte)
+#define pmd_val(x)	((x).pmd)
+#define pgd_val(x)	((x).pgd)
+#define pgprot_val(x)	((x).pgprot)
+
+#define __pte(x)	((pte_t) { (x) } )
+#define __pmd(x)	((pmd_t) { (x) } )
+#define __pgd(x)	((pgd_t) { (x) } )
+#define __pgprot(x)	((pgprot_t) { (x) } )
+
+#else
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+
+#define pte_val(x)	(x)
+#define pmd_val(x)	(x)
+#define pgd_val(x)	(x)
+#define pgprot_val(x)	(x)
+
+#define __pte(x)	(x)
+#define __pgd(x)	(x)
+#define __pgprot(x)	(x)
+
+#endif /* STRICT_MM_TYPECHECKS */
+
+/* Pure 2^n version of get_order */
+extern __inline__ int get_order(unsigned long size)
+{
+	int order;
+
+	size = (size-1) >> (PAGE_SHIFT-1);
+	order = -1;
+	do {
+		size >>= 1;
+		order++;
+	} while (size);
+	return order;
+}
+
+#ifdef USE_48_BIT_KSEG
+#define PAGE_OFFSET		0xffff800000000000UL
+#else
+#define PAGE_OFFSET		0xfffffc0000000000UL
+#endif
+
+#else
+
+#ifdef USE_48_BIT_KSEG
+#define PAGE_OFFSET		0xffff800000000000
+#else
+#define PAGE_OFFSET		0xfffffc0000000000
+#endif
+
+#endif /* !__ASSEMBLY__ */
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+#define __pa(x)			((unsigned long) (x) - PAGE_OFFSET)
+#define __va(x)			((void *)((unsigned long) (x) + PAGE_OFFSET))
+#ifndef CONFIG_DISCONTIGMEM
+#define pfn_to_page(pfn)	(mem_map + (pfn))
+#define page_to_pfn(page)	((unsigned long)((page) - mem_map))
+#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+
+#define pfn_valid(pfn)		((pfn) < max_mapnr)
+#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#endif /* CONFIG_DISCONTIGMEM */
+
+#define VM_DATA_DEFAULT_FLAGS		(VM_READ | VM_WRITE | VM_EXEC | \
+					 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ALPHA_PAGE_H */
diff --git a/include/asm-alpha/pal.h b/include/asm-alpha/pal.h
new file mode 100644
index 0000000..9b4ba0d
--- /dev/null
+++ b/include/asm-alpha/pal.h
@@ -0,0 +1,51 @@
+#ifndef __ALPHA_PAL_H
+#define __ALPHA_PAL_H
+
+/*
+ * Common PAL-code
+ */
+#define PAL_halt	  0
+#define PAL_cflush	  1
+#define PAL_draina	  2
+#define PAL_bpt		128
+#define PAL_bugchk	129
+#define PAL_chmk	131
+#define PAL_callsys	131
+#define PAL_imb		134
+#define PAL_rduniq	158
+#define PAL_wruniq	159
+#define PAL_gentrap	170
+#define PAL_nphalt	190
+
+/*
+ * VMS specific PAL-code
+ */
+#define PAL_swppal	10
+#define PAL_mfpr_vptb	41
+
+/*
+ * OSF specific PAL-code
+ */
+#define PAL_cserve	 9
+#define PAL_wripir	13
+#define PAL_rdmces	16
+#define PAL_wrmces	17
+#define PAL_wrfen	43
+#define PAL_wrvptptr	45
+#define PAL_jtopal	46
+#define PAL_swpctx	48
+#define PAL_wrval	49
+#define PAL_rdval	50
+#define PAL_tbi		51
+#define PAL_wrent	52
+#define PAL_swpipl	53
+#define PAL_rdps	54
+#define PAL_wrkgp	55
+#define PAL_wrusp	56
+#define PAL_wrperfmon	57
+#define PAL_rdusp	58
+#define PAL_whami	60
+#define PAL_retsys	61
+#define PAL_rti		63
+
+#endif /* __ALPHA_PAL_H */
diff --git a/include/asm-alpha/param.h b/include/asm-alpha/param.h
new file mode 100644
index 0000000..3ed0b3b
--- /dev/null
+++ b/include/asm-alpha/param.h
@@ -0,0 +1,32 @@
+#ifndef _ASM_ALPHA_PARAM_H
+#define _ASM_ALPHA_PARAM_H
+
+/* ??? Gross.  I don't want to parameterize this, and supposedly the
+   hardware ignores reprogramming.  We also need userland buy-in to the 
+   change in HZ, since this is visible in the wait4 resources etc.  */
+
+#include <linux/config.h>
+
+#ifndef HZ
+# ifndef CONFIG_ALPHA_RAWHIDE
+#  define HZ	1024
+# else
+#  define HZ	1200
+# endif
+#endif
+
+#define USER_HZ		HZ
+
+#define EXEC_PAGESIZE	8192
+
+#ifndef NOGROUP
+#define NOGROUP		(-1)
+#endif
+
+#define MAXHOSTNAMELEN	64	/* max length of hostname */
+
+#ifdef __KERNEL__
+# define CLOCKS_PER_SEC	HZ	/* frequency at which times() counts */
+#endif
+
+#endif /* _ASM_ALPHA_PARAM_H */
diff --git a/include/asm-alpha/parport.h b/include/asm-alpha/parport.h
new file mode 100644
index 0000000..c5ee7cb
--- /dev/null
+++ b/include/asm-alpha/parport.h
@@ -0,0 +1,18 @@
+/*
+ * parport.h: platform-specific PC-style parport initialisation
+ *
+ * Copyright (C) 1999, 2000  Tim Waugh <tim@cyberelk.demon.co.uk>
+ *
+ * This file should only be included by drivers/parport/parport_pc.c.
+ */
+
+#ifndef _ASM_AXP_PARPORT_H
+#define _ASM_AXP_PARPORT_H 1
+
+static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
+static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
+{
+	return parport_pc_find_isa_ports (autoirq, autodma);
+}
+
+#endif /* !(_ASM_AXP_PARPORT_H) */
diff --git a/include/asm-alpha/pci.h b/include/asm-alpha/pci.h
new file mode 100644
index 0000000..0c7b57b
--- /dev/null
+++ b/include/asm-alpha/pci.h
@@ -0,0 +1,261 @@
+#ifndef __ALPHA_PCI_H
+#define __ALPHA_PCI_H
+
+#ifdef __KERNEL__
+
+#include <linux/spinlock.h>
+#include <asm/scatterlist.h>
+#include <asm/machvec.h>
+
+/*
+ * The following structure is used to manage multiple PCI busses.
+ */
+
+struct pci_dev;
+struct pci_bus;
+struct resource;
+struct pci_iommu_arena;
+struct page;
+
+/* A controller.  Used to manage multiple PCI busses.  */
+
+struct pci_controller {
+	struct pci_controller *next;
+        struct pci_bus *bus;
+	struct resource *io_space;
+	struct resource *mem_space;
+
+	/* The following are for reporting to userland.  The invariant is
+	   that if we report a BWX-capable dense memory, we do not report
+	   a sparse memory at all, even if it exists.  */
+	unsigned long sparse_mem_base;
+	unsigned long dense_mem_base;
+	unsigned long sparse_io_base;
+	unsigned long dense_io_base;
+
+	/* This one's for the kernel only.  It's in KSEG somewhere.  */
+	unsigned long config_space_base;
+
+	unsigned int index;
+	/* For compatibility with current (as of July 2003) pciutils
+	   and XFree86. Eventually will be removed. */
+	unsigned int need_domain_info;
+
+	struct pci_iommu_arena *sg_pci;
+	struct pci_iommu_arena *sg_isa;
+
+	void *sysdata;
+};
+
+/* Override the logic in pci_scan_bus for skipping already-configured
+   bus numbers.  */
+
+#define pcibios_assign_all_busses()	1
+#define pcibios_scan_all_fns(a, b)	0
+
+#define PCIBIOS_MIN_IO		alpha_mv.min_io_address
+#define PCIBIOS_MIN_MEM		alpha_mv.min_mem_address
+
+extern void pcibios_set_master(struct pci_dev *dev);
+
+extern inline void pcibios_penalize_isa_irq(int irq)
+{
+	/* We don't do dynamic PCI IRQ allocation */
+}
+
+/* IOMMU controls.  */
+
+/* The PCI address space does not equal the physical memory address space.
+   The networking and block device layers use this boolean for bounce buffer
+   decisions.  */
+#define PCI_DMA_BUS_IS_PHYS  0
+
+/* Allocate and map kernel buffer using consistent mode DMA for PCI
+   device.  Returns non-NULL cpu-view pointer to the buffer if
+   successful and sets *DMA_ADDRP to the pci side dma address as well,
+   else DMA_ADDRP is undefined.  */
+
+extern void *pci_alloc_consistent(struct pci_dev *, size_t, dma_addr_t *);
+
+/* Free and unmap a consistent DMA buffer.  CPU_ADDR and DMA_ADDR must
+   be values that were returned from pci_alloc_consistent.  SIZE must
+   be the same as what as passed into pci_alloc_consistent.
+   References to the memory and mappings associated with CPU_ADDR or
+   DMA_ADDR past this call are illegal.  */
+
+extern void pci_free_consistent(struct pci_dev *, size_t, void *, dma_addr_t);
+
+/* Map a single buffer of the indicate size for PCI DMA in streaming mode.
+   The 32-bit PCI bus mastering address to use is returned.  Once the device
+   is given the dma address, the device owns this memory until either
+   pci_unmap_single or pci_dma_sync_single_for_cpu is performed.  */
+
+extern dma_addr_t pci_map_single(struct pci_dev *, void *, size_t, int);
+
+/* Likewise, but for a page instead of an address.  */
+extern dma_addr_t pci_map_page(struct pci_dev *, struct page *,
+			       unsigned long, size_t, int);
+
+/* Test for pci_map_single or pci_map_page having generated an error.  */
+
+static inline int
+pci_dma_mapping_error(dma_addr_t dma_addr)
+{
+	return dma_addr == 0;
+}
+
+/* Unmap a single streaming mode DMA translation.  The DMA_ADDR and
+   SIZE must match what was provided for in a previous pci_map_single
+   call.  All other usages are undefined.  After this call, reads by
+   the cpu to the buffer are guaranteed to see whatever the device
+   wrote there.  */
+
+extern void pci_unmap_single(struct pci_dev *, dma_addr_t, size_t, int);
+extern void pci_unmap_page(struct pci_dev *, dma_addr_t, size_t, int);
+
+/* pci_unmap_{single,page} is not a nop, thus... */
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
+	dma_addr_t ADDR_NAME;
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
+	__u32 LEN_NAME;
+#define pci_unmap_addr(PTR, ADDR_NAME)			\
+	((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
+	(((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME)			\
+	((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
+	(((PTR)->LEN_NAME) = (VAL))
+
+/* Map a set of buffers described by scatterlist in streaming mode for
+   PCI DMA.  This is the scatter-gather version of the above
+   pci_map_single interface.  Here the scatter gather list elements
+   are each tagged with the appropriate PCI dma address and length.
+   They are obtained via sg_dma_{address,length}(SG).
+
+   NOTE: An implementation may be able to use a smaller number of DMA
+   address/length pairs than there are SG table elements.  (for
+   example via virtual mapping capabilities) The routine returns the
+   number of addr/length pairs actually used, at most nents.
+
+   Device ownership issues as mentioned above for pci_map_single are
+   the same here.  */
+
+extern int pci_map_sg(struct pci_dev *, struct scatterlist *, int, int);
+
+/* Unmap a set of streaming mode DMA translations.  Again, cpu read
+   rules concerning calls here are the same as for pci_unmap_single()
+   above.  */
+
+extern void pci_unmap_sg(struct pci_dev *, struct scatterlist *, int, int);
+
+/* Make physical memory consistent for a single streaming mode DMA
+   translation after a transfer and device currently has ownership
+   of the buffer.
+
+   If you perform a pci_map_single() but wish to interrogate the
+   buffer using the cpu, yet do not wish to teardown the PCI dma
+   mapping, you must call this function before doing so.  At the next
+   point you give the PCI dma address back to the card, you must first
+   perform a pci_dma_sync_for_device, and then the device again owns
+   the buffer.  */
+
+static inline void
+pci_dma_sync_single_for_cpu(struct pci_dev *dev, dma_addr_t dma_addr,
+			    long size, int direction)
+{
+	/* Nothing to do.  */
+}
+
+static inline void
+pci_dma_sync_single_for_device(struct pci_dev *dev, dma_addr_t dma_addr,
+			       size_t size, int direction)
+{
+	/* Nothing to do.  */
+}
+
+/* Make physical memory consistent for a set of streaming mode DMA
+   translations after a transfer.  The same as pci_dma_sync_single_*
+   but for a scatter-gather list, same rules and usage.  */
+
+static inline void
+pci_dma_sync_sg_for_cpu(struct pci_dev *dev, struct scatterlist *sg,
+			int nents, int direction)
+{
+	/* Nothing to do.  */
+}
+
+static inline void
+pci_dma_sync_sg_for_device(struct pci_dev *dev, struct scatterlist *sg,
+			   int nents, int direction)
+{
+	/* Nothing to do.  */
+}
+
+/* Return whether the given PCI device DMA address mask can
+   be supported properly.  For example, if your device can
+   only drive the low 24-bits during PCI bus mastering, then
+   you would pass 0x00ffffff as the mask to this function.  */
+
+extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
+
+/* True if the machine supports DAC addressing, and DEV can
+   make use of it given MASK.  */
+extern int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask);
+
+/* Convert to/from DAC dma address and struct page.  */
+extern dma64_addr_t pci_dac_page_to_dma(struct pci_dev *, struct page *,
+					unsigned long, int);
+extern struct page *pci_dac_dma_to_page(struct pci_dev *, dma64_addr_t);
+extern unsigned long pci_dac_dma_to_offset(struct pci_dev *, dma64_addr_t);
+
+static inline void
+pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr,
+				size_t len, int direction)
+{
+	/* Nothing to do. */
+}
+
+static inline void
+pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr,
+				   size_t len, int direction)
+{
+	/* Nothing to do. */
+}
+
+/* TODO: integrate with include/asm-generic/pci.h ? */
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+	return channel ? 15 : 14;
+}
+
+extern void pcibios_resource_to_bus(struct pci_dev *, struct pci_bus_region *,
+				    struct resource *);
+
+#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
+
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+	struct pci_controller *hose = bus->sysdata;
+	return hose->need_domain_info;
+}
+
+static inline void
+pcibios_add_platform_entries(struct pci_dev *dev)
+{
+}
+
+struct pci_dev *alpha_gendev_to_pci(struct device *dev);
+
+#endif /* __KERNEL__ */
+
+/* Values for the `which' argument to sys_pciconfig_iobase.  */
+#define IOBASE_HOSE		0
+#define IOBASE_SPARSE_MEM	1
+#define IOBASE_DENSE_MEM	2
+#define IOBASE_SPARSE_IO	3
+#define IOBASE_DENSE_IO		4
+#define IOBASE_ROOT_BUS		5
+#define IOBASE_FROM_HOSE	0x10000
+
+#endif /* __ALPHA_PCI_H */
diff --git a/include/asm-alpha/percpu.h b/include/asm-alpha/percpu.h
new file mode 100644
index 0000000..48348fe
--- /dev/null
+++ b/include/asm-alpha/percpu.h
@@ -0,0 +1,6 @@
+#ifndef __ALPHA_PERCPU_H
+#define __ALPHA_PERCPU_H
+
+#include <asm-generic/percpu.h>
+
+#endif /* __ALPHA_PERCPU_H */
diff --git a/include/asm-alpha/pgalloc.h b/include/asm-alpha/pgalloc.h
new file mode 100644
index 0000000..3084756
--- /dev/null
+++ b/include/asm-alpha/pgalloc.h
@@ -0,0 +1,78 @@
+#ifndef _ALPHA_PGALLOC_H
+#define _ALPHA_PGALLOC_H
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+
+/*      
+ * Allocate and free page tables. The xxx_kernel() versions are
+ * used to allocate a kernel page table - this turns on ASN bits
+ * if any.
+ */
+
+static inline void
+pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
+{
+	pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET));
+}
+
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+{
+	pmd_set(pmd, pte);
+}
+
+static inline void
+pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+{
+	pgd_set(pgd, pmd);
+}
+
+extern pgd_t *pgd_alloc(struct mm_struct *mm);
+
+static inline void
+pgd_free(pgd_t *pgd)
+{
+	free_page((unsigned long)pgd);
+}
+
+static inline pmd_t *
+pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+	pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+	return ret;
+}
+
+static inline void
+pmd_free(pmd_t *pmd)
+{
+	free_page((unsigned long)pmd);
+}
+
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
+
+static inline void
+pte_free_kernel(pte_t *pte)
+{
+	free_page((unsigned long)pte);
+}
+
+static inline struct page *
+pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+	pte_t *pte = pte_alloc_one_kernel(mm, addr);
+	if (pte)
+		return virt_to_page(pte);
+	return NULL;
+}
+
+static inline void
+pte_free(struct page *page)
+{
+	__free_page(page);
+}
+
+#define check_pgt_cache()	do { } while (0)
+
+#endif /* _ALPHA_PGALLOC_H */
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h
new file mode 100644
index 0000000..faae196
--- /dev/null
+++ b/include/asm-alpha/pgtable.h
@@ -0,0 +1,369 @@
+#ifndef _ALPHA_PGTABLE_H
+#define _ALPHA_PGTABLE_H
+
+#include <asm-generic/4level-fixup.h>
+
+/*
+ * This file contains the functions and defines necessary to modify and use
+ * the Alpha page table tree.
+ *
+ * This hopefully works with any standard Alpha page-size, as defined
+ * in <asm/page.h> (currently 8192).
+ */
+#include <linux/config.h>
+#include <linux/mmzone.h>
+
+#include <asm/page.h>
+#include <asm/processor.h>	/* For TASK_SIZE */
+#include <asm/machvec.h>
+
+/* Certain architectures need to do special things when PTEs
+ * within a page table are directly modified.  Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+
+/* PMD_SHIFT determines the size of the area a second-level page table can map */
+#define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-3))
+#define PMD_SIZE	(1UL << PMD_SHIFT)
+#define PMD_MASK	(~(PMD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a third-level page table entry can map */
+#define PGDIR_SHIFT	(PAGE_SHIFT + 2*(PAGE_SHIFT-3))
+#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
+#define PGDIR_MASK	(~(PGDIR_SIZE-1))
+
+/*
+ * Entries per page directory level:  the Alpha is three-level, with
+ * all levels having a one-page page table.
+ */
+#define PTRS_PER_PTE	(1UL << (PAGE_SHIFT-3))
+#define PTRS_PER_PMD	(1UL << (PAGE_SHIFT-3))
+#define PTRS_PER_PGD	(1UL << (PAGE_SHIFT-3))
+#define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
+#define FIRST_USER_PGD_NR	0
+
+/* Number of pointers that fit on a page:  this will go away. */
+#define PTRS_PER_PAGE	(1UL << (PAGE_SHIFT-3))
+
+#ifdef CONFIG_ALPHA_LARGE_VMALLOC
+#define VMALLOC_START		0xfffffe0000000000
+#else
+#define VMALLOC_START		(-2*PGDIR_SIZE)
+#endif
+#define VMALLOC_END		(-PGDIR_SIZE)
+
+/*
+ * OSF/1 PAL-code-imposed page table bits
+ */
+#define _PAGE_VALID	0x0001
+#define _PAGE_FOR	0x0002	/* used for page protection (fault on read) */
+#define _PAGE_FOW	0x0004	/* used for page protection (fault on write) */
+#define _PAGE_FOE	0x0008	/* used for page protection (fault on exec) */
+#define _PAGE_ASM	0x0010
+#define _PAGE_KRE	0x0100	/* xxx - see below on the "accessed" bit */
+#define _PAGE_URE	0x0200	/* xxx */
+#define _PAGE_KWE	0x1000	/* used to do the dirty bit in software */
+#define _PAGE_UWE	0x2000	/* used to do the dirty bit in software */
+
+/* .. and these are ours ... */
+#define _PAGE_DIRTY	0x20000
+#define _PAGE_ACCESSED	0x40000
+#define _PAGE_FILE	0x80000	/* set:pagecache, unset:swap */
+
+/*
+ * NOTE! The "accessed" bit isn't necessarily exact:  it can be kept exactly
+ * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
+ * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use
+ * the KRE/URE bits to watch for it. That way we don't need to overload the
+ * KWE/UWE bits with both handling dirty and accessed.
+ *
+ * Note that the kernel uses the accessed bit just to check whether to page
+ * out a page or not, so it doesn't have to be exact anyway.
+ */
+
+#define __DIRTY_BITS	(_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
+#define __ACCESS_BITS	(_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
+
+#define _PFN_MASK	0xFFFFFFFF00000000UL
+
+#define _PAGE_TABLE	(_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
+#define _PAGE_CHG_MASK	(_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
+
+/*
+ * All the normal masks have the "page accessed" bits on, as any time they are used,
+ * the page is accessed. They are cleared only by the page-out routines
+ */
+#define PAGE_NONE	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
+#define PAGE_SHARED	__pgprot(_PAGE_VALID | __ACCESS_BITS)
+#define PAGE_COPY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+#define PAGE_READONLY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+#define PAGE_KERNEL	__pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
+
+#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
+
+#define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
+#define _PAGE_S(x) _PAGE_NORMAL(x)
+
+/*
+ * The hardware can handle write-only mappings, but as the Alpha
+ * architecture does byte-wide writes with a read-modify-write
+ * sequence, it's not practical to have write-without-read privs.
+ * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in
+ * arch/alpha/mm/fault.c)
+ */
+	/* xwr */
+#define __P000	_PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
+#define __P001	_PAGE_P(_PAGE_FOE | _PAGE_FOW)
+#define __P010	_PAGE_P(_PAGE_FOE)
+#define __P011	_PAGE_P(_PAGE_FOE)
+#define __P100	_PAGE_P(_PAGE_FOW | _PAGE_FOR)
+#define __P101	_PAGE_P(_PAGE_FOW)
+#define __P110	_PAGE_P(0)
+#define __P111	_PAGE_P(0)
+
+#define __S000	_PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
+#define __S001	_PAGE_S(_PAGE_FOE | _PAGE_FOW)
+#define __S010	_PAGE_S(_PAGE_FOE)
+#define __S011	_PAGE_S(_PAGE_FOE)
+#define __S100	_PAGE_S(_PAGE_FOW | _PAGE_FOR)
+#define __S101	_PAGE_S(_PAGE_FOW)
+#define __S110	_PAGE_S(0)
+#define __S111	_PAGE_S(0)
+
+/*
+ * BAD_PAGETABLE is used when we need a bogus page-table, while
+ * BAD_PAGE is used for a bogus page.
+ *
+ * ZERO_PAGE is a global shared page that is always zero:  used
+ * for zero-mapped memory areas etc..
+ */
+extern pte_t __bad_page(void);
+extern pmd_t * __bad_pagetable(void);
+
+extern unsigned long __zero_page(void);
+
+#define BAD_PAGETABLE	__bad_pagetable()
+#define BAD_PAGE	__bad_page()
+#define ZERO_PAGE(vaddr)	(virt_to_page(ZERO_PGE))
+
+/* number of bits that fit into a memory pointer */
+#define BITS_PER_PTR			(8*sizeof(unsigned long))
+
+/* to align the pointer to a pointer address */
+#define PTR_MASK			(~(sizeof(void*)-1))
+
+/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
+#define SIZEOF_PTR_LOG2			3
+
+/* to find an entry in a page-table */
+#define PAGE_PTR(address)		\
+  ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
+
+/*
+ * On certain platforms whose physical address space can overlap KSEG,
+ * namely EV6 and above, we must re-twiddle the physaddr to restore the
+ * correct high-order bits.
+ *
+ * This is extremely confusing until you realize that this is actually
+ * just working around a userspace bug.  The X server was intending to
+ * provide the physical address but instead provided the KSEG address.
+ * Or tried to, except it's not representable.
+ * 
+ * On Tsunami there's nothing meaningful at 0x40000000000, so this is
+ * a safe thing to do.  Come the first core logic that does put something
+ * in this area -- memory or whathaveyou -- then this hack will have
+ * to go away.  So be prepared!
+ */
+
+#if defined(CONFIG_ALPHA_GENERIC) && defined(USE_48_BIT_KSEG)
+#error "EV6-only feature in a generic kernel"
+#endif
+#if defined(CONFIG_ALPHA_GENERIC) || \
+    (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG))
+#define KSEG_PFN	(0xc0000000000UL >> PAGE_SHIFT)
+#define PHYS_TWIDDLE(pfn) \
+  ((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \
+  ? ((pfn) ^= KSEG_PFN) : (pfn))
+#else
+#define PHYS_TWIDDLE(pfn) (pfn)
+#endif
+
+/*
+ * Conversion functions:  convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#ifndef CONFIG_DISCONTIGMEM
+#define page_to_pa(page)	(((page) - mem_map) << PAGE_SHIFT)
+
+#define pte_pfn(pte)	(pte_val(pte) >> 32)
+#define pte_page(pte)	pfn_to_page(pte_pfn(pte))
+#define mk_pte(page, pgprot)						\
+({									\
+	pte_t pte;							\
+									\
+	pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot);	\
+	pte;								\
+})
+#endif
+
+extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot)
+{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; }
+
+extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
+
+extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
+{ pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
+
+extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
+{ pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
+
+
+extern inline unsigned long
+pmd_page_kernel(pmd_t pmd)
+{
+	return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET;
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+#define pmd_page(pmd)	(mem_map + ((pmd_val(pmd) & _PFN_MASK) >> 32))
+#endif
+
+extern inline unsigned long pgd_page(pgd_t pgd)
+{ return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
+
+extern inline int pte_none(pte_t pte)		{ return !pte_val(pte); }
+extern inline int pte_present(pte_t pte)	{ return pte_val(pte) & _PAGE_VALID; }
+extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+	pte_val(*ptep) = 0;
+}
+
+extern inline int pmd_none(pmd_t pmd)		{ return !pmd_val(pmd); }
+extern inline int pmd_bad(pmd_t pmd)		{ return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; }
+extern inline int pmd_present(pmd_t pmd)	{ return pmd_val(pmd) & _PAGE_VALID; }
+extern inline void pmd_clear(pmd_t * pmdp)	{ pmd_val(*pmdp) = 0; }
+
+extern inline int pgd_none(pgd_t pgd)		{ return !pgd_val(pgd); }
+extern inline int pgd_bad(pgd_t pgd)		{ return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE; }
+extern inline int pgd_present(pgd_t pgd)	{ return pgd_val(pgd) & _PAGE_VALID; }
+extern inline void pgd_clear(pgd_t * pgdp)	{ pgd_val(*pgdp) = 0; }
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+extern inline int pte_read(pte_t pte)		{ return !(pte_val(pte) & _PAGE_FOR); }
+extern inline int pte_write(pte_t pte)		{ return !(pte_val(pte) & _PAGE_FOW); }
+extern inline int pte_exec(pte_t pte)		{ return !(pte_val(pte) & _PAGE_FOE); }
+extern inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
+extern inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }
+extern inline int pte_file(pte_t pte)		{ return pte_val(pte) & _PAGE_FILE; }
+
+extern inline pte_t pte_wrprotect(pte_t pte)	{ pte_val(pte) |= _PAGE_FOW; return pte; }
+extern inline pte_t pte_rdprotect(pte_t pte)	{ pte_val(pte) |= _PAGE_FOR; return pte; }
+extern inline pte_t pte_exprotect(pte_t pte)	{ pte_val(pte) |= _PAGE_FOE; return pte; }
+extern inline pte_t pte_mkclean(pte_t pte)	{ pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
+extern inline pte_t pte_mkold(pte_t pte)	{ pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
+extern inline pte_t pte_mkwrite(pte_t pte)	{ pte_val(pte) &= ~_PAGE_FOW; return pte; }
+extern inline pte_t pte_mkread(pte_t pte)	{ pte_val(pte) &= ~_PAGE_FOR; return pte; }
+extern inline pte_t pte_mkexec(pte_t pte)	{ pte_val(pte) &= ~_PAGE_FOE; return pte; }
+extern inline pte_t pte_mkdirty(pte_t pte)	{ pte_val(pte) |= __DIRTY_BITS; return pte; }
+extern inline pte_t pte_mkyoung(pte_t pte)	{ pte_val(pte) |= __ACCESS_BITS; return pte; }
+
+#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
+
+/* to find an entry in a page-table-directory. */
+#define pgd_index(address)	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#define pgd_offset(mm, address)	((mm)->pgd+pgd_index(address))
+
+/* Find an entry in the second-level page table.. */
+extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
+{
+	return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
+}
+
+/* Find an entry in the third-level page table.. */
+extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
+{
+	return (pte_t *) pmd_page_kernel(*dir)
+		+ ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
+}
+
+#define pte_offset_map(dir,addr)	pte_offset_kernel((dir),(addr))
+#define pte_offset_map_nested(dir,addr)	pte_offset_kernel((dir),(addr))
+#define pte_unmap(pte)			do { } while (0)
+#define pte_unmap_nested(pte)		do { } while (0)
+
+extern pgd_t swapper_pg_dir[1024];
+
+/*
+ * The Alpha doesn't have any external MMU info:  the kernel page
+ * tables contain all the necessary information.
+ */
+extern inline void update_mmu_cache(struct vm_area_struct * vma,
+	unsigned long address, pte_t pte)
+{
+}
+
+/*
+ * Non-present pages:  high 24 bits are offset, next 8 bits type,
+ * low 32 bits zero.
+ */
+extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
+{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
+
+#define __swp_type(x)		(((x).val >> 32) & 0xff)
+#define __swp_offset(x)		((x).val >> 40)
+#define __swp_entry(type, off)	((swp_entry_t) { pte_val(mk_swap_pte((type), (off))) })
+#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x)	((pte_t) { (x).val })
+
+#define pte_to_pgoff(pte)	(pte_val(pte) >> 32)
+#define pgoff_to_pte(off)	((pte_t) { ((off) << 32) | _PAGE_FILE })
+
+#define PTE_FILE_MAX_BITS	32
+
+#ifndef CONFIG_DISCONTIGMEM
+#define kern_addr_valid(addr)	(1)
+#endif
+
+#define io_remap_page_range(vma, start, busaddr, size, prot)	\
+({								\
+	void *va = (void __force *)ioremap(busaddr, size);	\
+	unsigned long pfn = virt_to_phys(va) >> PAGE_SHIFT;	\
+	remap_pfn_range(vma, start, pfn, size, prot);		\
+})
+
+#define io_remap_pfn_range(vma, start, pfn, size, prot)	\
+		remap_pfn_range(vma, start, pfn, size, prot)
+
+#define MK_IOSPACE_PFN(space, pfn)	(pfn)
+#define GET_IOSPACE(pfn)		0
+#define GET_PFN(pfn)			(pfn)
+
+#define pte_ERROR(e) \
+	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+extern void paging_init(void);
+
+#include <asm-generic/pgtable.h>
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init()	do { } while (0)
+
+/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT.  */
+#define HAVE_ARCH_UNMAPPED_AREA
+
+#endif /* _ALPHA_PGTABLE_H */
diff --git a/include/asm-alpha/poll.h b/include/asm-alpha/poll.h
new file mode 100644
index 0000000..34f333b
--- /dev/null
+++ b/include/asm-alpha/poll.h
@@ -0,0 +1,23 @@
+#ifndef __ALPHA_POLL_H
+#define __ALPHA_POLL_H
+
+#define POLLIN		(1 << 0)
+#define POLLPRI		(1 << 1)
+#define POLLOUT		(1 << 2)
+#define POLLERR		(1 << 3)
+#define POLLHUP		(1 << 4)
+#define POLLNVAL	(1 << 5)
+#define POLLRDNORM	(1 << 6)
+#define POLLRDBAND	(1 << 7)
+#define POLLWRNORM	(1 << 8)
+#define POLLWRBAND	(1 << 9)
+#define POLLMSG		(1 << 10)
+#define POLLREMOVE	(1 << 11)
+
+struct pollfd {
+	int fd;
+	short events;
+	short revents;
+};
+
+#endif
diff --git a/include/asm-alpha/posix_types.h b/include/asm-alpha/posix_types.h
new file mode 100644
index 0000000..c78c04a
--- /dev/null
+++ b/include/asm-alpha/posix_types.h
@@ -0,0 +1,123 @@
+#ifndef _ALPHA_POSIX_TYPES_H
+#define _ALPHA_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc.  Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned int	__kernel_ino_t;
+typedef unsigned int	__kernel_mode_t;
+typedef unsigned int	__kernel_nlink_t;
+typedef long		__kernel_off_t;
+typedef long long	__kernel_loff_t;
+typedef int		__kernel_pid_t;
+typedef int		__kernel_ipc_pid_t;
+typedef unsigned int	__kernel_uid_t;
+typedef unsigned int	__kernel_gid_t;
+typedef unsigned long	__kernel_size_t;
+typedef long		__kernel_ssize_t;
+typedef long		__kernel_ptrdiff_t;
+typedef long		__kernel_time_t;
+typedef long		__kernel_suseconds_t;
+typedef long		__kernel_clock_t;
+typedef int		__kernel_daddr_t;
+typedef char *		__kernel_caddr_t;
+typedef unsigned long	__kernel_sigset_t;	/* at least 32 bits */
+typedef unsigned short	__kernel_uid16_t;
+typedef unsigned short	__kernel_gid16_t;
+typedef int		__kernel_clockid_t;
+typedef int		__kernel_timer_t;
+
+typedef struct {
+	int	val[2];
+} __kernel_fsid_t;
+
+typedef __kernel_uid_t __kernel_old_uid_t;
+typedef __kernel_gid_t __kernel_old_gid_t;
+typedef __kernel_uid_t __kernel_uid32_t;
+typedef __kernel_gid_t __kernel_gid32_t;
+
+typedef unsigned int	__kernel_old_dev_t;
+
+#ifdef __KERNEL__
+
+#ifndef __GNUC__
+
+#define	__FD_SET(d, set)	((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
+#define	__FD_CLR(d, set)	((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
+#define	__FD_ISSET(d, set)	(((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) != 0)
+#define	__FD_ZERO(set)	\
+  ((void) memset ((__ptr_t) (set), 0, sizeof (__kernel_fd_set)))
+
+#else /* __GNUC__ */
+
+/* With GNU C, use inline functions instead so args are evaluated only once: */
+
+#undef __FD_SET
+static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
+{
+	unsigned long _tmp = fd / __NFDBITS;
+	unsigned long _rem = fd % __NFDBITS;
+	fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
+}
+
+#undef __FD_CLR
+static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
+{
+	unsigned long _tmp = fd / __NFDBITS;
+	unsigned long _rem = fd % __NFDBITS;
+	fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
+}
+
+#undef __FD_ISSET
+static __inline__ int __FD_ISSET(unsigned long fd, const __kernel_fd_set *p)
+{ 
+	unsigned long _tmp = fd / __NFDBITS;
+	unsigned long _rem = fd % __NFDBITS;
+	return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
+}
+
+/*
+ * This will unroll the loop for the normal constant case (8 ints,
+ * for a 256-bit fd_set)
+ */
+#undef __FD_ZERO
+static __inline__ void __FD_ZERO(__kernel_fd_set *p)
+{
+	unsigned long *tmp = p->fds_bits;
+	int i;
+
+	if (__builtin_constant_p(__FDSET_LONGS)) {
+		switch (__FDSET_LONGS) {
+		      case 16:
+			tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+			tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+			tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
+			tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
+			return;
+
+		      case 8:
+			tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+			tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+			return;
+
+		      case 4:
+			tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+			return;
+		}
+	}
+	i = __FDSET_LONGS;
+	while (i) {
+		i--;
+		*tmp = 0;
+		tmp++;
+	}
+}
+
+#endif /* __GNUC__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ALPHA_POSIX_TYPES_H */
diff --git a/include/asm-alpha/processor.h b/include/asm-alpha/processor.h
new file mode 100644
index 0000000..059780a
--- /dev/null
+++ b/include/asm-alpha/processor.h
@@ -0,0 +1,118 @@
+/*
+ * include/asm-alpha/processor.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+#ifndef __ASM_ALPHA_PROCESSOR_H
+#define __ASM_ALPHA_PROCESSOR_H
+
+#include <linux/personality.h>	/* for ADDR_LIMIT_32BIT */
+
+/*
+ * Returns current instruction pointer ("program counter").
+ */
+#define current_text_addr() \
+  ({ void *__pc; __asm__ ("br %0,.+4" : "=r"(__pc)); __pc; })
+
+/*
+ * We have a 42-bit user address space: 4TB user VM...
+ */
+#define TASK_SIZE (0x40000000000UL)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE \
+  ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2)
+
+typedef struct {
+	unsigned long seg;
+} mm_segment_t;
+
+/* This is dead.  Everything has been moved to thread_info.  */
+struct thread_struct { };
+#define INIT_THREAD  { }
+
+/* Return saved PC of a blocked thread.  */
+struct task_struct;
+extern unsigned long thread_saved_pc(struct task_struct *);
+
+/* Do necessary setup to start up a newly executed thread.  */
+extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk)	do { } while (0)
+
+/* Create a kernel thread without removing it from tasklists.  */
+extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+
+unsigned long get_wchan(struct task_struct *p);
+
+/* See arch/alpha/kernel/ptrace.c for details.  */
+#define PT_REG(reg) \
+  (PAGE_SIZE*2 - sizeof(struct pt_regs) + offsetof(struct pt_regs, reg))
+
+#define SW_REG(reg) \
+ (PAGE_SIZE*2 - sizeof(struct pt_regs) - sizeof(struct switch_stack) \
+  + offsetof(struct switch_stack, reg))
+
+#define KSTK_EIP(tsk) \
+  (*(unsigned long *)(PT_REG(pc) + (unsigned long) ((tsk)->thread_info)))
+
+#define KSTK_ESP(tsk) \
+  ((tsk) == current ? rdusp() : (tsk)->thread_info->pcb.usp)
+
+#define cpu_relax()	barrier()
+
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+
+#ifndef CONFIG_SMP
+/* Nothing to prefetch. */
+#define spin_lock_prefetch(lock)  	do { } while (0)
+#endif
+
+#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
+extern inline void prefetch(const void *ptr)  
+{ 
+	__builtin_prefetch(ptr, 0, 3);
+}
+
+extern inline void prefetchw(const void *ptr)  
+{
+	__builtin_prefetch(ptr, 1, 3);
+}
+
+#ifdef CONFIG_SMP
+extern inline void spin_lock_prefetch(const void *ptr)  
+{
+	__builtin_prefetch(ptr, 1, 3);
+}
+#endif
+
+#else
+extern inline void prefetch(const void *ptr)  
+{ 
+	__asm__ ("ldl $31,%0" : : "m"(*(char *)ptr)); 
+}
+
+extern inline void prefetchw(const void *ptr)  
+{
+	__asm__ ("ldq $31,%0" : : "m"(*(char *)ptr)); 
+}
+
+#ifdef CONFIG_SMP
+extern inline void spin_lock_prefetch(const void *ptr)  
+{
+	__asm__ ("ldq $31,%0" : : "m"(*(char *)ptr)); 
+}
+#endif
+
+#endif /* GCC 3.1 */
+
+#endif /* __ASM_ALPHA_PROCESSOR_H */
diff --git a/include/asm-alpha/ptrace.h b/include/asm-alpha/ptrace.h
new file mode 100644
index 0000000..d462c5e
--- /dev/null
+++ b/include/asm-alpha/ptrace.h
@@ -0,0 +1,82 @@
+#ifndef _ASMAXP_PTRACE_H
+#define _ASMAXP_PTRACE_H
+
+
+/*
+ * This struct defines the way the registers are stored on the
+ * kernel stack during a system call or other kernel entry
+ *
+ * NOTE! I want to minimize the overhead of system calls, so this
+ * struct has as little information as possible.  I does not have
+ *
+ *  - floating point regs: the kernel doesn't change those
+ *  - r9-15: saved by the C compiler
+ *
+ * This makes "fork()" and "exec()" a bit more complex, but should
+ * give us low system call latency.
+ */
+
+struct pt_regs {
+	unsigned long r0;
+	unsigned long r1;
+	unsigned long r2;
+	unsigned long r3;
+	unsigned long r4;
+	unsigned long r5;
+	unsigned long r6;
+	unsigned long r7;
+	unsigned long r8;
+	unsigned long r19;
+	unsigned long r20;
+	unsigned long r21;
+	unsigned long r22;
+	unsigned long r23;
+	unsigned long r24;
+	unsigned long r25;
+	unsigned long r26;
+	unsigned long r27;
+	unsigned long r28;
+	unsigned long hae;
+/* JRP - These are the values provided to a0-a2 by PALcode */
+	unsigned long trap_a0;
+	unsigned long trap_a1;
+	unsigned long trap_a2;
+/* These are saved by PAL-code: */
+	unsigned long ps;
+	unsigned long pc;
+	unsigned long gp;
+	unsigned long r16;
+	unsigned long r17;
+	unsigned long r18;
+};
+
+/*
+ * This is the extended stack used by signal handlers and the context
+ * switcher: it's pushed after the normal "struct pt_regs".
+ */
+struct switch_stack {
+	unsigned long r9;
+	unsigned long r10;
+	unsigned long r11;
+	unsigned long r12;
+	unsigned long r13;
+	unsigned long r14;
+	unsigned long r15;
+	unsigned long r26;
+	unsigned long fp[32];	/* fp[31] is fpcr */
+};
+
+#ifdef __KERNEL__
+#define user_mode(regs) (((regs)->ps & 8) != 0)
+#define instruction_pointer(regs) ((regs)->pc)
+#define profile_pc(regs) instruction_pointer(regs)
+extern void show_regs(struct pt_regs *);
+
+#define alpha_task_regs(task) \
+  ((struct pt_regs *) ((long) (task)->thread_info + 2*PAGE_SIZE) - 1)
+
+#define force_successful_syscall_return() (alpha_task_regs(current)->r0 = 0)
+
+#endif
+
+#endif
diff --git a/include/asm-alpha/reg.h b/include/asm-alpha/reg.h
new file mode 100644
index 0000000..86ff916
--- /dev/null
+++ b/include/asm-alpha/reg.h
@@ -0,0 +1,52 @@
+#ifndef __reg_h__
+#define __reg_h__
+
+/*
+ * Exception frame offsets.
+ */
+#define EF_V0		0
+#define EF_T0		1
+#define EF_T1		2
+#define EF_T2		3
+#define EF_T3		4
+#define EF_T4		5
+#define EF_T5		6
+#define EF_T6		7
+#define EF_T7		8
+#define EF_S0		9
+#define EF_S1		10
+#define EF_S2		11
+#define EF_S3		12
+#define EF_S4		13
+#define EF_S5		14
+#define EF_S6		15
+#define EF_A3		16
+#define EF_A4		17
+#define EF_A5		18
+#define EF_T8		19
+#define EF_T9		20
+#define EF_T10		21
+#define EF_T11		22
+#define EF_RA		23
+#define EF_T12		24
+#define EF_AT		25
+#define EF_SP		26
+#define EF_PS		27
+#define EF_PC		28
+#define EF_GP		29
+#define EF_A0		30
+#define EF_A1		31
+#define EF_A2		32
+
+#define EF_SIZE		(33*8)
+#define HWEF_SIZE	(6*8)		/* size of PAL frame (PS-A2) */
+
+#define EF_SSIZE	(EF_SIZE - HWEF_SIZE)
+
+/*
+ * Map register number into core file offset.
+ */
+#define CORE_REG(reg, ubase) \
+	(((unsigned long *)((unsigned long)(ubase)))[reg])
+
+#endif /* __reg_h__ */
diff --git a/include/asm-alpha/regdef.h b/include/asm-alpha/regdef.h
new file mode 100644
index 0000000..142df9c
--- /dev/null
+++ b/include/asm-alpha/regdef.h
@@ -0,0 +1,44 @@
+#ifndef __alpha_regdef_h__
+#define __alpha_regdef_h__
+
+#define v0	$0	/* function return value */
+
+#define t0	$1	/* temporary registers (caller-saved) */
+#define t1	$2
+#define t2	$3
+#define t3	$4
+#define t4	$5
+#define t5	$6
+#define t6	$7
+#define t7	$8
+
+#define	s0	$9	/* saved-registers (callee-saved registers) */
+#define	s1	$10
+#define	s2	$11
+#define	s3	$12
+#define	s4	$13
+#define	s5	$14
+#define	s6	$15
+#define	fp	s6	/* frame-pointer (s6 in frame-less procedures) */
+
+#define a0	$16	/* argument registers (caller-saved) */
+#define a1	$17
+#define a2	$18
+#define a3	$19
+#define a4	$20
+#define a5	$21
+
+#define t8	$22	/* more temps (caller-saved) */
+#define t9	$23
+#define t10	$24
+#define t11	$25
+#define ra	$26	/* return address register */
+#define t12	$27
+
+#define pv	t12	/* procedure-variable register */
+#define AT	$at	/* assembler temporary */
+#define gp	$29	/* global pointer */
+#define sp	$30	/* stack pointer */
+#define zero	$31	/* reads as zero, writes are noops */
+
+#endif /* __alpha_regdef_h__ */
diff --git a/include/asm-alpha/resource.h b/include/asm-alpha/resource.h
new file mode 100644
index 0000000..c10874f
--- /dev/null
+++ b/include/asm-alpha/resource.h
@@ -0,0 +1,22 @@
+#ifndef _ALPHA_RESOURCE_H
+#define _ALPHA_RESOURCE_H
+
+/*
+ * Alpha/Linux-specific ordering of these four resource limit IDs,
+ * the rest comes from the generic header:
+ */
+#define RLIMIT_NOFILE		6	/* max number of open files */
+#define RLIMIT_AS		7	/* address space limit */
+#define RLIMIT_NPROC		8	/* max number of processes */
+#define RLIMIT_MEMLOCK		9	/* max locked-in-memory address space */
+
+/*
+ * SuS says limits have to be unsigned.  Fine, it's unsigned, but
+ * we retain the old value for compatibility, especially with DU. 
+ * When you run into the 2^63 barrier, you call me.
+ */
+#define RLIM_INFINITY		0x7ffffffffffffffful
+
+#include <asm-generic/resource.h>
+
+#endif /* _ALPHA_RESOURCE_H */
diff --git a/include/asm-alpha/rtc.h b/include/asm-alpha/rtc.h
new file mode 100644
index 0000000..4e854b1
--- /dev/null
+++ b/include/asm-alpha/rtc.h
@@ -0,0 +1,10 @@
+#ifndef _ALPHA_RTC_H
+#define _ALPHA_RTC_H
+
+/*
+ * Alpha uses the default access methods for the RTC.
+ */
+
+#include <asm-generic/rtc.h>
+
+#endif
diff --git a/include/asm-alpha/rwsem.h b/include/asm-alpha/rwsem.h
new file mode 100644
index 0000000..8e058a6
--- /dev/null
+++ b/include/asm-alpha/rwsem.h
@@ -0,0 +1,266 @@
+#ifndef _ALPHA_RWSEM_H
+#define _ALPHA_RWSEM_H
+
+/*
+ * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
+ * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
+ */
+
+#ifndef _LINUX_RWSEM_H
+#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
+#endif
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+struct rwsem_waiter;
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
+/*
+ * the semaphore definition
+ */
+struct rw_semaphore {
+	long			count;
+#define RWSEM_UNLOCKED_VALUE		0x0000000000000000L
+#define RWSEM_ACTIVE_BIAS		0x0000000000000001L
+#define RWSEM_ACTIVE_MASK		0x00000000ffffffffL
+#define RWSEM_WAITING_BIAS		(-0x0000000100000000L)
+#define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+	spinlock_t		wait_lock;
+	struct list_head	wait_list;
+#if RWSEM_DEBUG
+	int			debug;
+#endif
+};
+
+#if RWSEM_DEBUG
+#define __RWSEM_DEBUG_INIT      , 0
+#else
+#define __RWSEM_DEBUG_INIT	/* */
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+	{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
+	LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT }
+
+#define DECLARE_RWSEM(name) \
+	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+static inline void init_rwsem(struct rw_semaphore *sem)
+{
+	sem->count = RWSEM_UNLOCKED_VALUE;
+	spin_lock_init(&sem->wait_lock);
+	INIT_LIST_HEAD(&sem->wait_list);
+#if RWSEM_DEBUG
+	sem->debug = 0;
+#endif
+}
+
+static inline void __down_read(struct rw_semaphore *sem)
+{
+	long oldcount;
+#ifndef	CONFIG_SMP
+	oldcount = sem->count;
+	sem->count += RWSEM_ACTIVE_READ_BIAS;
+#else
+	long temp;
+	__asm__ __volatile__(
+	"1:	ldq_l	%0,%1\n"
+	"	addq	%0,%3,%2\n"
+	"	stq_c	%2,%1\n"
+	"	beq	%2,2f\n"
+	"	mb\n"
+	".subsection 2\n"
+	"2:	br	1b\n"
+	".previous"
+	:"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
+	:"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
+#endif
+	if (unlikely(oldcount < 0))
+		rwsem_down_read_failed(sem);
+}
+
+/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+	long old, new, res;
+
+	res = sem->count;
+	do {
+		new = res + RWSEM_ACTIVE_READ_BIAS;
+		if (new <= 0)
+			break;
+		old = res;
+		res = cmpxchg(&sem->count, old, new);
+	} while (res != old);
+	return res >= 0 ? 1 : 0;
+}
+
+static inline void __down_write(struct rw_semaphore *sem)
+{
+	long oldcount;
+#ifndef	CONFIG_SMP
+	oldcount = sem->count;
+	sem->count += RWSEM_ACTIVE_WRITE_BIAS;
+#else
+	long temp;
+	__asm__ __volatile__(
+	"1:	ldq_l	%0,%1\n"
+	"	addq	%0,%3,%2\n"
+	"	stq_c	%2,%1\n"
+	"	beq	%2,2f\n"
+	"	mb\n"
+	".subsection 2\n"
+	"2:	br	1b\n"
+	".previous"
+	:"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
+	:"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
+#endif
+	if (unlikely(oldcount))
+		rwsem_down_write_failed(sem);
+}
+
+/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+	long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+			   RWSEM_ACTIVE_WRITE_BIAS);
+	if (ret == RWSEM_UNLOCKED_VALUE)
+		return 1;
+	return 0;
+}
+
+static inline void __up_read(struct rw_semaphore *sem)
+{
+	long oldcount;
+#ifndef	CONFIG_SMP
+	oldcount = sem->count;
+	sem->count -= RWSEM_ACTIVE_READ_BIAS;
+#else
+	long temp;
+	__asm__ __volatile__(
+	"	mb\n"
+	"1:	ldq_l	%0,%1\n"
+	"	subq	%0,%3,%2\n"
+	"	stq_c	%2,%1\n"
+	"	beq	%2,2f\n"
+	".subsection 2\n"
+	"2:	br	1b\n"
+	".previous"
+	:"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
+	:"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
+#endif
+	if (unlikely(oldcount < 0))
+		if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
+			rwsem_wake(sem);
+}
+
+static inline void __up_write(struct rw_semaphore *sem)
+{
+	long count;
+#ifndef	CONFIG_SMP
+	sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
+	count = sem->count;
+#else
+	long temp;
+	__asm__ __volatile__(
+	"	mb\n"
+	"1:	ldq_l	%0,%1\n"
+	"	subq	%0,%3,%2\n"
+	"	stq_c	%2,%1\n"
+	"	beq	%2,2f\n"
+	"	subq	%0,%3,%0\n"
+	".subsection 2\n"
+	"2:	br	1b\n"
+	".previous"
+	:"=&r" (count), "=m" (sem->count), "=&r" (temp)
+	:"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
+#endif
+	if (unlikely(count))
+		if ((int)count == 0)
+			rwsem_wake(sem);
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+	long oldcount;
+#ifndef	CONFIG_SMP
+	oldcount = sem->count;
+	sem->count -= RWSEM_WAITING_BIAS;
+#else
+	long temp;
+	__asm__ __volatile__(
+	"1:	ldq_l	%0,%1\n"
+	"	addq	%0,%3,%2\n"
+	"	stq_c	%2,%1\n"
+	"	beq	%2,2f\n"
+	"	mb\n"
+	".subsection 2\n"
+	"2:	br	1b\n"
+	".previous"
+	:"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
+	:"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory");
+#endif
+	if (unlikely(oldcount < 0))
+		rwsem_downgrade_wake(sem);
+}
+
+static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
+{
+#ifndef	CONFIG_SMP
+	sem->count += val;
+#else
+	long temp;
+	__asm__ __volatile__(
+	"1:	ldq_l	%0,%1\n"
+	"	addq	%0,%2,%0\n"
+	"	stq_c	%0,%1\n"
+	"	beq	%0,2f\n"
+	".subsection 2\n"
+	"2:	br	1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (sem->count)
+	:"Ir" (val), "m" (sem->count));
+#endif
+}
+
+static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
+{
+#ifndef	CONFIG_SMP
+	sem->count += val;
+	return sem->count;
+#else
+	long ret, temp;
+	__asm__ __volatile__(
+	"1:	ldq_l	%0,%1\n"
+	"	addq 	%0,%3,%2\n"
+	"	addq	%0,%3,%0\n"
+	"	stq_c	%2,%1\n"
+	"	beq	%2,2f\n"
+	".subsection 2\n"
+	"2:	br	1b\n"
+	".previous"
+	:"=&r" (ret), "=m" (sem->count), "=&r" (temp)
+	:"Ir" (val), "m" (sem->count));
+
+	return ret;
+#endif
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ALPHA_RWSEM_H */
diff --git a/include/asm-alpha/scatterlist.h b/include/asm-alpha/scatterlist.h
new file mode 100644
index 0000000..6afb8bd
--- /dev/null
+++ b/include/asm-alpha/scatterlist.h
@@ -0,0 +1,21 @@
+#ifndef _ALPHA_SCATTERLIST_H
+#define _ALPHA_SCATTERLIST_H
+
+#include <asm/page.h>
+  
+struct scatterlist {
+	struct page *page;
+	unsigned int offset;
+
+	unsigned int length;
+
+	dma_addr_t dma_address;
+	__u32 dma_length;
+};
+
+#define sg_dma_address(sg)	((sg)->dma_address)
+#define sg_dma_len(sg)		((sg)->dma_length)
+
+#define ISA_DMA_THRESHOLD (~0UL)
+
+#endif /* !(_ALPHA_SCATTERLIST_H) */
diff --git a/include/asm-alpha/sections.h b/include/asm-alpha/sections.h
new file mode 100644
index 0000000..43b40ed
--- /dev/null
+++ b/include/asm-alpha/sections.h
@@ -0,0 +1,7 @@
+#ifndef _ALPHA_SECTIONS_H
+#define _ALPHA_SECTIONS_H
+
+/* nothing to see, move along */
+#include <asm-generic/sections.h>
+
+#endif
diff --git a/include/asm-alpha/segment.h b/include/asm-alpha/segment.h
new file mode 100644
index 0000000..0453d97
--- /dev/null
+++ b/include/asm-alpha/segment.h
@@ -0,0 +1,6 @@
+#ifndef __ALPHA_SEGMENT_H
+#define __ALPHA_SEGMENT_H
+
+/* Only here because we have some old header files that expect it.. */
+
+#endif
diff --git a/include/asm-alpha/semaphore.h b/include/asm-alpha/semaphore.h
new file mode 100644
index 0000000..eb2cbd9
--- /dev/null
+++ b/include/asm-alpha/semaphore.h
@@ -0,0 +1,153 @@
+#ifndef _ALPHA_SEMAPHORE_H
+#define _ALPHA_SEMAPHORE_H
+
+/*
+ * SMP- and interrupt-safe semaphores..
+ *
+ * (C) Copyright 1996 Linus Torvalds
+ * (C) Copyright 1996, 2000 Richard Henderson
+ */
+
+#include <asm/current.h>
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <linux/compiler.h>
+#include <linux/wait.h>
+#include <linux/rwsem.h>
+
+struct semaphore {
+	atomic_t count;
+	wait_queue_head_t wait;
+};
+
+#define __SEMAPHORE_INITIALIZER(name, n)			\
+{								\
+	.count	= ATOMIC_INIT(n),				\
+  	.wait	= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait),	\
+}
+
+#define __MUTEX_INITIALIZER(name)			\
+	__SEMAPHORE_INITIALIZER(name,1)
+
+#define __DECLARE_SEMAPHORE_GENERIC(name,count)		\
+	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
+
+#define DECLARE_MUTEX(name)		__DECLARE_SEMAPHORE_GENERIC(name,1)
+#define DECLARE_MUTEX_LOCKED(name)	__DECLARE_SEMAPHORE_GENERIC(name,0)
+
+static inline void sema_init(struct semaphore *sem, int val)
+{
+	/*
+	 * Logically, 
+	 *   *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
+	 * except that gcc produces better initializing by parts yet.
+	 */
+
+	atomic_set(&sem->count, val);
+	init_waitqueue_head(&sem->wait);
+}
+
+static inline void init_MUTEX (struct semaphore *sem)
+{
+	sema_init(sem, 1);
+}
+
+static inline void init_MUTEX_LOCKED (struct semaphore *sem)
+{
+	sema_init(sem, 0);
+}
+
+extern void down(struct semaphore *);
+extern void __down_failed(struct semaphore *);
+extern int  down_interruptible(struct semaphore *);
+extern int  __down_failed_interruptible(struct semaphore *);
+extern int  down_trylock(struct semaphore *);
+extern void up(struct semaphore *);
+extern void __up_wakeup(struct semaphore *);
+
+/*
+ * Hidden out of line code is fun, but extremely messy.  Rely on newer
+ * compilers to do a respectable job with this.  The contention cases
+ * are handled out of line in arch/alpha/kernel/semaphore.c.
+ */
+
+static inline void __down(struct semaphore *sem)
+{
+	long count;
+	might_sleep();
+	count = atomic_dec_return(&sem->count);
+	if (unlikely(count < 0))
+		__down_failed(sem);
+}
+
+static inline int __down_interruptible(struct semaphore *sem)
+{
+	long count;
+	might_sleep();
+	count = atomic_dec_return(&sem->count);
+	if (unlikely(count < 0))
+		return __down_failed_interruptible(sem);
+	return 0;
+}
+
+/*
+ * down_trylock returns 0 on success, 1 if we failed to get the lock.
+ */
+
+static inline int __down_trylock(struct semaphore *sem)
+{
+	long ret;
+
+	/* "Equivalent" C:
+
+	   do {
+		ret = ldl_l;
+		--ret;
+		if (ret < 0)
+			break;
+		ret = stl_c = ret;
+	   } while (ret == 0);
+	*/
+	__asm__ __volatile__(
+		"1:	ldl_l	%0,%1\n"
+		"	subl	%0,1,%0\n"
+		"	blt	%0,2f\n"
+		"	stl_c	%0,%1\n"
+		"	beq	%0,3f\n"
+		"	mb\n"
+		"2:\n"
+		".subsection 2\n"
+		"3:	br	1b\n"
+		".previous"
+		: "=&r" (ret), "=m" (sem->count)
+		: "m" (sem->count));
+
+	return ret < 0;
+}
+
+static inline void __up(struct semaphore *sem)
+{
+	if (unlikely(atomic_inc_return(&sem->count) <= 0))
+		__up_wakeup(sem);
+}
+
+#if !defined(CONFIG_DEBUG_SEMAPHORE)
+extern inline void down(struct semaphore *sem)
+{
+	__down(sem);
+}
+extern inline int down_interruptible(struct semaphore *sem)
+{
+	return __down_interruptible(sem);
+}
+extern inline int down_trylock(struct semaphore *sem)
+{
+	return __down_trylock(sem);
+}
+extern inline void up(struct semaphore *sem)
+{
+	__up(sem);
+}
+#endif
+
+#endif
diff --git a/include/asm-alpha/sembuf.h b/include/asm-alpha/sembuf.h
new file mode 100644
index 0000000..7b38b15
--- /dev/null
+++ b/include/asm-alpha/sembuf.h
@@ -0,0 +1,22 @@
+#ifndef _ALPHA_SEMBUF_H
+#define _ALPHA_SEMBUF_H
+
+/* 
+ * The semid64_ds structure for alpha architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 64-bit values
+ */
+
+struct semid64_ds {
+	struct ipc64_perm sem_perm;		/* permissions .. see ipc.h */
+	__kernel_time_t	sem_otime;		/* last semop time */
+	__kernel_time_t	sem_ctime;		/* last change time */
+	unsigned long	sem_nsems;		/* no. of semaphores in array */
+	unsigned long	__unused1;
+	unsigned long	__unused2;
+};
+
+#endif /* _ALPHA_SEMBUF_H */
diff --git a/include/asm-alpha/serial.h b/include/asm-alpha/serial.h
new file mode 100644
index 0000000..7b2d9ee
--- /dev/null
+++ b/include/asm-alpha/serial.h
@@ -0,0 +1,75 @@
+/*
+ * include/asm-alpha/serial.h
+ */
+
+#include <linux/config.h>
+
+/*
+ * This assumes you have a 1.8432 MHz clock for your UART.
+ *
+ * It'd be nice if someone built a serial card with a 24.576 MHz
+ * clock, since the 16550A is capable of handling a top speed of 1.5
+ * megabits/second; but this requires the faster clock.
+ */
+#define BASE_BAUD ( 1843200 / 16 )
+
+/* Standard COM flags (except for COM4, because of the 8514 problem) */
+#ifdef CONFIG_SERIAL_DETECT_IRQ
+#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
+#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
+#else
+#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
+#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
+#endif
+
+#ifdef CONFIG_SERIAL_MANY_PORTS
+#define FOURPORT_FLAGS ASYNC_FOURPORT
+#define ACCENT_FLAGS 0
+#define BOCA_FLAGS 0
+#endif
+	
+#define STD_SERIAL_PORT_DEFNS			\
+	/* UART CLK   PORT IRQ     FLAGS        */			\
+	{ 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS },	/* ttyS0 */	\
+	{ 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS },	/* ttyS1 */	\
+	{ 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS },	/* ttyS2 */	\
+	{ 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS },	/* ttyS3 */
+
+
+#ifdef CONFIG_SERIAL_MANY_PORTS
+#define EXTRA_SERIAL_PORT_DEFNS			\
+	{ 0, BASE_BAUD, 0x1A0, 9, FOURPORT_FLAGS }, 	/* ttyS4 */	\
+	{ 0, BASE_BAUD, 0x1A8, 9, FOURPORT_FLAGS },	/* ttyS5 */	\
+	{ 0, BASE_BAUD, 0x1B0, 9, FOURPORT_FLAGS },	/* ttyS6 */	\
+	{ 0, BASE_BAUD, 0x1B8, 9, FOURPORT_FLAGS },	/* ttyS7 */	\
+	{ 0, BASE_BAUD, 0x2A0, 5, FOURPORT_FLAGS },	/* ttyS8 */	\
+	{ 0, BASE_BAUD, 0x2A8, 5, FOURPORT_FLAGS },	/* ttyS9 */	\
+	{ 0, BASE_BAUD, 0x2B0, 5, FOURPORT_FLAGS },	/* ttyS10 */	\
+	{ 0, BASE_BAUD, 0x2B8, 5, FOURPORT_FLAGS },	/* ttyS11 */	\
+	{ 0, BASE_BAUD, 0x330, 4, ACCENT_FLAGS },	/* ttyS12 */	\
+	{ 0, BASE_BAUD, 0x338, 4, ACCENT_FLAGS },	/* ttyS13 */	\
+	{ 0, BASE_BAUD, 0x000, 0, 0 },	/* ttyS14 (spare) */		\
+	{ 0, BASE_BAUD, 0x000, 0, 0 },	/* ttyS15 (spare) */		\
+	{ 0, BASE_BAUD, 0x100, 12, BOCA_FLAGS },	/* ttyS16 */	\
+	{ 0, BASE_BAUD, 0x108, 12, BOCA_FLAGS },	/* ttyS17 */	\
+	{ 0, BASE_BAUD, 0x110, 12, BOCA_FLAGS },	/* ttyS18 */	\
+	{ 0, BASE_BAUD, 0x118, 12, BOCA_FLAGS },	/* ttyS19 */	\
+	{ 0, BASE_BAUD, 0x120, 12, BOCA_FLAGS },	/* ttyS20 */	\
+	{ 0, BASE_BAUD, 0x128, 12, BOCA_FLAGS },	/* ttyS21 */	\
+	{ 0, BASE_BAUD, 0x130, 12, BOCA_FLAGS },	/* ttyS22 */	\
+	{ 0, BASE_BAUD, 0x138, 12, BOCA_FLAGS },	/* ttyS23 */	\
+	{ 0, BASE_BAUD, 0x140, 12, BOCA_FLAGS },	/* ttyS24 */	\
+	{ 0, BASE_BAUD, 0x148, 12, BOCA_FLAGS },	/* ttyS25 */	\
+	{ 0, BASE_BAUD, 0x150, 12, BOCA_FLAGS },	/* ttyS26 */	\
+	{ 0, BASE_BAUD, 0x158, 12, BOCA_FLAGS },	/* ttyS27 */	\
+	{ 0, BASE_BAUD, 0x160, 12, BOCA_FLAGS },	/* ttyS28 */	\
+	{ 0, BASE_BAUD, 0x168, 12, BOCA_FLAGS },	/* ttyS29 */	\
+	{ 0, BASE_BAUD, 0x170, 12, BOCA_FLAGS },	/* ttyS30 */	\
+	{ 0, BASE_BAUD, 0x178, 12, BOCA_FLAGS },	/* ttyS31 */
+#else
+#define EXTRA_SERIAL_PORT_DEFNS
+#endif
+
+#define SERIAL_PORT_DFNS		\
+	STD_SERIAL_PORT_DEFNS		\
+	EXTRA_SERIAL_PORT_DEFNS
diff --git a/include/asm-alpha/setup.h b/include/asm-alpha/setup.h
new file mode 100644
index 0000000..2e023a4
--- /dev/null
+++ b/include/asm-alpha/setup.h
@@ -0,0 +1,6 @@
+#ifndef __ALPHA_SETUP_H
+#define __ALPHA_SETUP_H
+
+#define COMMAND_LINE_SIZE	256
+
+#endif
diff --git a/include/asm-alpha/sfp-machine.h b/include/asm-alpha/sfp-machine.h
new file mode 100644
index 0000000..5fe63af
--- /dev/null
+++ b/include/asm-alpha/sfp-machine.h
@@ -0,0 +1,82 @@
+/* Machine-dependent software floating-point definitions.
+   Alpha kernel version.
+   Copyright (C) 1997,1998,1999 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Richard Henderson (rth@cygnus.com),
+		  Jakub Jelinek (jakub@redhat.com) and
+		  David S. Miller (davem@redhat.com).
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Library General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Library General Public License for more details.
+
+   You should have received a copy of the GNU Library General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, write to the Free Software Foundation, Inc.,
+   59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.  */
+
+#ifndef _SFP_MACHINE_H
+#define _SFP_MACHINE_H
+   
+#define _FP_W_TYPE_SIZE		64
+#define _FP_W_TYPE		unsigned long
+#define _FP_WS_TYPE		signed long
+#define _FP_I_TYPE		long
+
+#define _FP_MUL_MEAT_S(R,X,Y)					\
+  _FP_MUL_MEAT_1_imm(_FP_WFRACBITS_S,R,X,Y)
+#define _FP_MUL_MEAT_D(R,X,Y)					\
+  _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_Q(R,X,Y)					\
+  _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
+
+#define _FP_DIV_MEAT_S(R,X,Y)	_FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm)
+#define _FP_DIV_MEAT_D(R,X,Y)	_FP_DIV_MEAT_1_udiv(D,R,X,Y)
+#define _FP_DIV_MEAT_Q(R,X,Y)	_FP_DIV_MEAT_2_udiv(Q,R,X,Y)
+
+#define _FP_NANFRAC_S		_FP_QNANBIT_S
+#define _FP_NANFRAC_D		_FP_QNANBIT_D
+#define _FP_NANFRAC_Q		_FP_QNANBIT_Q
+#define _FP_NANSIGN_S		1
+#define _FP_NANSIGN_D		1
+#define _FP_NANSIGN_Q		1
+
+#define _FP_KEEPNANFRACP 1
+
+/* Alpha Architecture Handbook, 4.7.10.4 sais that
+ * we should prefer any type of NaN in Fb, then Fa.
+ */
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP)			\
+  do {								\
+    R##_s = Y##_s;						\
+    _FP_FRAC_COPY_##wc(R,X);					\
+    R##_c = FP_CLS_NAN;						\
+  } while (0)
+
+/* Obtain the current rounding mode. */
+#define FP_ROUNDMODE	mode
+#define FP_RND_NEAREST	(FPCR_DYN_NORMAL >> FPCR_DYN_SHIFT)
+#define FP_RND_ZERO	(FPCR_DYN_CHOPPED >> FPCR_DYN_SHIFT)
+#define FP_RND_PINF	(FPCR_DYN_PLUS >> FPCR_DYN_SHIFT)
+#define FP_RND_MINF	(FPCR_DYN_MINUS >> FPCR_DYN_SHIFT)
+
+/* Exception flags. */
+#define FP_EX_INVALID		IEEE_TRAP_ENABLE_INV
+#define FP_EX_OVERFLOW		IEEE_TRAP_ENABLE_OVF
+#define FP_EX_UNDERFLOW		IEEE_TRAP_ENABLE_UNF
+#define FP_EX_DIVZERO		IEEE_TRAP_ENABLE_DZE
+#define FP_EX_INEXACT		IEEE_TRAP_ENABLE_INE
+#define FP_EX_DENORM		IEEE_TRAP_ENABLE_DNO
+
+#define FP_DENORM_ZERO		(swcr & IEEE_MAP_DMZ)
+
+/* We write the results always */
+#define FP_INHIBIT_RESULTS 0
+
+#endif
diff --git a/include/asm-alpha/shmbuf.h b/include/asm-alpha/shmbuf.h
new file mode 100644
index 0000000..37ee84f
--- /dev/null
+++ b/include/asm-alpha/shmbuf.h
@@ -0,0 +1,38 @@
+#ifndef _ALPHA_SHMBUF_H
+#define _ALPHA_SHMBUF_H
+
+/* 
+ * The shmid64_ds structure for alpha architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 64-bit values
+ */
+
+struct shmid64_ds {
+	struct ipc64_perm	shm_perm;	/* operation perms */
+	size_t			shm_segsz;	/* size of segment (bytes) */
+	__kernel_time_t		shm_atime;	/* last attach time */
+	__kernel_time_t		shm_dtime;	/* last detach time */
+	__kernel_time_t		shm_ctime;	/* last change time */
+	__kernel_pid_t		shm_cpid;	/* pid of creator */
+	__kernel_pid_t		shm_lpid;	/* pid of last operator */
+	unsigned long		shm_nattch;	/* no. of current attaches */
+	unsigned long		__unused1;
+	unsigned long		__unused2;
+};
+
+struct shminfo64 {
+	unsigned long	shmmax;
+	unsigned long	shmmin;
+	unsigned long	shmmni;
+	unsigned long	shmseg;
+	unsigned long	shmall;
+	unsigned long	__unused1;
+	unsigned long	__unused2;
+	unsigned long	__unused3;
+	unsigned long	__unused4;
+};
+
+#endif /* _ALPHA_SHMBUF_H */
diff --git a/include/asm-alpha/shmparam.h b/include/asm-alpha/shmparam.h
new file mode 100644
index 0000000..cc901d5
--- /dev/null
+++ b/include/asm-alpha/shmparam.h
@@ -0,0 +1,6 @@
+#ifndef _ASMAXP_SHMPARAM_H
+#define _ASMAXP_SHMPARAM_H
+
+#define	SHMLBA PAGE_SIZE		 /* attach addr a multiple of this */
+
+#endif /* _ASMAXP_SHMPARAM_H */
diff --git a/include/asm-alpha/sigcontext.h b/include/asm-alpha/sigcontext.h
new file mode 100644
index 0000000..323cdb0
--- /dev/null
+++ b/include/asm-alpha/sigcontext.h
@@ -0,0 +1,34 @@
+#ifndef _ASMAXP_SIGCONTEXT_H
+#define _ASMAXP_SIGCONTEXT_H
+
+struct sigcontext {
+	/*
+	 * What should we have here? I'd probably better use the same
+	 * stack layout as OSF/1, just in case we ever want to try
+	 * running their binaries.. 
+	 *
+	 * This is the basic layout, but I don't know if we'll ever
+	 * actually fill in all the values..
+	 */
+	 long		sc_onstack;
+	 long		sc_mask;
+	 long		sc_pc;
+	 long		sc_ps;
+	 long		sc_regs[32];
+	 long		sc_ownedfp;
+	 long		sc_fpregs[32];
+	 unsigned long	sc_fpcr;
+	 unsigned long	sc_fp_control;
+	 unsigned long	sc_reserved1, sc_reserved2;
+	 unsigned long	sc_ssize;
+	 char *		sc_sbase;
+	 unsigned long	sc_traparg_a0;
+	 unsigned long	sc_traparg_a1;
+	 unsigned long	sc_traparg_a2;
+	 unsigned long	sc_fp_trap_pc;
+	 unsigned long	sc_fp_trigger_sum;
+	 unsigned long	sc_fp_trigger_inst;
+};
+
+
+#endif
diff --git a/include/asm-alpha/siginfo.h b/include/asm-alpha/siginfo.h
new file mode 100644
index 0000000..86bcab5
--- /dev/null
+++ b/include/asm-alpha/siginfo.h
@@ -0,0 +1,11 @@
+#ifndef _ALPHA_SIGINFO_H
+#define _ALPHA_SIGINFO_H
+
+#define __ARCH_SI_PREAMBLE_SIZE		(4 * sizeof(int))
+#define __ARCH_SI_TRAPNO
+
+#define SIGEV_PAD_SIZE			((SIGEV_MAX_SIZE/sizeof(int)) - 4)
+
+#include <asm-generic/siginfo.h>
+
+#endif
diff --git a/include/asm-alpha/signal.h b/include/asm-alpha/signal.h
new file mode 100644
index 0000000..25f98bc
--- /dev/null
+++ b/include/asm-alpha/signal.h
@@ -0,0 +1,197 @@
+#ifndef _ASMAXP_SIGNAL_H
+#define _ASMAXP_SIGNAL_H
+
+#include <linux/types.h>
+
+/* Avoid too many header ordering problems.  */
+struct siginfo;
+
+#ifdef __KERNEL__
+/* Digital Unix defines 64 signals.  Most things should be clean enough
+   to redefine this at will, if care is taken to make libc match.  */
+
+#define _NSIG		64
+#define _NSIG_BPW	64
+#define _NSIG_WORDS	(_NSIG / _NSIG_BPW)
+
+typedef unsigned long old_sigset_t;		/* at least 32 bits */
+
+typedef struct {
+	unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#else
+/* Here we must cater to libcs that poke about in kernel headers.  */
+
+#define NSIG		32
+typedef unsigned long sigset_t;
+
+#endif /* __KERNEL__ */
+
+
+/*
+ * Linux/AXP has different signal numbers that Linux/i386: I'm trying
+ * to make it OSF/1 binary compatible, at least for normal binaries.
+ */
+#define SIGHUP		 1
+#define SIGINT		 2
+#define SIGQUIT		 3
+#define SIGILL		 4
+#define SIGTRAP		 5
+#define SIGABRT		 6
+#define SIGEMT		 7
+#define SIGFPE		 8
+#define SIGKILL		 9
+#define SIGBUS		10
+#define SIGSEGV		11
+#define SIGSYS		12
+#define SIGPIPE		13
+#define SIGALRM		14
+#define SIGTERM		15
+#define SIGURG		16
+#define SIGSTOP		17
+#define SIGTSTP		18
+#define SIGCONT		19
+#define SIGCHLD		20
+#define SIGTTIN		21
+#define SIGTTOU		22
+#define SIGIO		23
+#define SIGXCPU		24
+#define SIGXFSZ		25
+#define SIGVTALRM	26
+#define SIGPROF		27
+#define SIGWINCH	28
+#define SIGINFO		29
+#define SIGUSR1		30
+#define SIGUSR2		31
+
+#define SIGPOLL	SIGIO
+#define SIGPWR	SIGINFO
+#define SIGIOT	SIGABRT
+
+/* These should not be considered constants from userland.  */
+#define SIGRTMIN	32
+#define SIGRTMAX	_NSIG
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+
+#define SA_ONSTACK	0x00000001
+#define SA_RESTART	0x00000002
+#define SA_NOCLDSTOP	0x00000004
+#define SA_NODEFER	0x00000008
+#define SA_RESETHAND	0x00000010
+#define SA_NOCLDWAIT	0x00000020
+#define SA_SIGINFO	0x00000040
+
+#define SA_ONESHOT	SA_RESETHAND
+#define SA_NOMASK	SA_NODEFER
+#define SA_INTERRUPT	0x20000000 /* dummy -- ignored */
+
+/* 
+ * sigaltstack controls
+ */
+#define SS_ONSTACK	1
+#define SS_DISABLE	2
+
+#define MINSIGSTKSZ	4096
+#define SIGSTKSZ	16384
+
+
+#ifdef __KERNEL__
+/*
+ * These values of sa_flags are used only by the kernel as part of the
+ * irq handling routines.
+ *
+ * SA_INTERRUPT is also used by the irq handling routines.
+ * SA_SHIRQ is for shared interrupt support on PCI and EISA.
+ */
+#define SA_PROBE		SA_ONESHOT
+#define SA_SAMPLE_RANDOM	SA_RESTART
+#define SA_SHIRQ		0x40000000
+#endif
+
+#define SIG_BLOCK          1	/* for blocking signals */
+#define SIG_UNBLOCK        2	/* for unblocking signals */
+#define SIG_SETMASK        3	/* for setting the signal mask */
+
+/* Type of a signal handler.  */
+typedef void __signalfn_t(int);
+typedef __signalfn_t __user *__sighandler_t;
+
+typedef void __restorefn_t(void);
+typedef __restorefn_t __user *__sigrestore_t;
+
+#define SIG_DFL	((__sighandler_t)0)	/* default signal handling */
+#define SIG_IGN	((__sighandler_t)1)	/* ignore signal */
+#define SIG_ERR	((__sighandler_t)-1)	/* error return from signal */
+
+#ifdef __KERNEL__
+struct osf_sigaction {
+	__sighandler_t	sa_handler;
+	old_sigset_t	sa_mask;
+	int		sa_flags;
+};
+
+struct sigaction {
+	__sighandler_t	sa_handler;
+	unsigned long	sa_flags;
+	sigset_t	sa_mask;	/* mask last for extensibility */
+};
+
+struct k_sigaction {
+	struct sigaction sa;
+	__sigrestore_t ka_restorer;
+};
+#else
+/* Here we must cater to libcs that poke about in kernel headers.  */
+
+struct sigaction {
+	union {
+	  __sighandler_t	_sa_handler;
+	  void (*_sa_sigaction)(int, struct siginfo *, void *);
+	} _u;
+	sigset_t	sa_mask;
+	int		sa_flags;
+};
+
+#define sa_handler	_u._sa_handler
+#define sa_sigaction	_u._sa_sigaction
+
+#endif /* __KERNEL__ */
+
+typedef struct sigaltstack {
+	void __user *ss_sp;
+	int ss_flags;
+	size_t ss_size;
+} stack_t;
+
+/* sigstack(2) is deprecated, and will be withdrawn in a future version
+   of the X/Open CAE Specification.  Use sigaltstack instead.  It is only
+   implemented here for OSF/1 compatibility.  */
+
+struct sigstack {
+	void __user *ss_sp;
+	int ss_onstack;
+};
+
+#ifdef __KERNEL__
+#include <asm/sigcontext.h>
+
+#define ptrace_signal_deliver(regs, cookie) do { } while (0)
+
+#endif
+
+#endif
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
new file mode 100644
index 0000000..cbc173a
--- /dev/null
+++ b/include/asm-alpha/smp.h
@@ -0,0 +1,63 @@
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/bitops.h>
+#include <asm/pal.h>
+
+/* HACK: Cabrio WHAMI return value is bogus if more than 8 bits used.. :-( */
+
+static __inline__ unsigned char
+__hard_smp_processor_id(void)
+{
+	register unsigned char __r0 __asm__("$0");
+	__asm__ __volatile__(
+		"call_pal %1 #whami"
+		: "=r"(__r0)
+		:"i" (PAL_whami)
+		: "$1", "$22", "$23", "$24", "$25");
+	return __r0;
+}
+
+#ifdef CONFIG_SMP
+
+#include <asm/irq.h>
+
+struct cpuinfo_alpha {
+	unsigned long loops_per_jiffy;
+	unsigned long last_asn;
+	int need_new_asn;
+	int asn_lock;
+	unsigned long ipi_count;
+	unsigned long prof_multiplier;
+	unsigned long prof_counter;
+	unsigned char mcheck_expected;
+	unsigned char mcheck_taken;
+	unsigned char mcheck_extra;
+} __attribute__((aligned(64)));
+
+extern struct cpuinfo_alpha cpu_data[NR_CPUS];
+
+#define PROC_CHANGE_PENALTY     20
+
+#define hard_smp_processor_id()	__hard_smp_processor_id()
+#define smp_processor_id()	(current_thread_info()->cpu)
+
+extern cpumask_t cpu_present_mask;
+extern cpumask_t cpu_online_map;
+extern int smp_num_cpus;
+#define cpu_possible_map	cpu_present_mask
+
+int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, cpumask_t cpu);
+
+#else /* CONFIG_SMP */
+
+#define smp_call_function_on_cpu(func,info,retry,wait,cpu)    ({ 0; })
+
+#endif /* CONFIG_SMP */
+
+#define NO_PROC_ID	(-1)
+
+#endif
diff --git a/include/asm-alpha/socket.h b/include/asm-alpha/socket.h
new file mode 100644
index 0000000..d00259d
--- /dev/null
+++ b/include/asm-alpha/socket.h
@@ -0,0 +1,58 @@
+#ifndef _ASM_SOCKET_H
+#define _ASM_SOCKET_H
+
+#include <asm/sockios.h>
+
+/* For setsockopt(2) */
+/*
+ * Note: we only bother about making the SOL_SOCKET options
+ * same as OSF/1, as that's all that "normal" programs are
+ * likely to set.  We don't necessarily want to be binary
+ * compatible with _everything_. 
+ */
+#define SOL_SOCKET	0xffff
+
+#define SO_DEBUG	0x0001
+#define SO_REUSEADDR	0x0004
+#define SO_KEEPALIVE	0x0008
+#define SO_DONTROUTE	0x0010
+#define SO_BROADCAST	0x0020
+#define SO_LINGER	0x0080
+#define SO_OOBINLINE	0x0100
+/* To add :#define SO_REUSEPORT 0x0200 */
+
+#define SO_TYPE		0x1008
+#define SO_ERROR	0x1007
+#define SO_SNDBUF	0x1001
+#define SO_RCVBUF	0x1002
+#define	SO_RCVLOWAT	0x1010
+#define	SO_SNDLOWAT	0x1011
+#define	SO_RCVTIMEO	0x1012
+#define	SO_SNDTIMEO	0x1013
+#define SO_ACCEPTCONN	0x1014
+
+/* linux-specific, might as well be the same as on i386 */
+#define SO_NO_CHECK	11
+#define SO_PRIORITY	12
+#define SO_BSDCOMPAT	14
+
+#define SO_PASSCRED	17
+#define SO_PEERCRED	18
+#define SO_BINDTODEVICE 25
+
+/* Socket filtering */
+#define SO_ATTACH_FILTER        26
+#define SO_DETACH_FILTER        27
+
+#define SO_PEERNAME		28
+#define SO_TIMESTAMP		29
+#define SCM_TIMESTAMP		SO_TIMESTAMP
+
+#define SO_PEERSEC		30
+
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+#define SO_SECURITY_AUTHENTICATION		19
+#define SO_SECURITY_ENCRYPTION_TRANSPORT	20
+#define SO_SECURITY_ENCRYPTION_NETWORK		21
+
+#endif /* _ASM_SOCKET_H */
diff --git a/include/asm-alpha/sockios.h b/include/asm-alpha/sockios.h
new file mode 100644
index 0000000..e4961a7
--- /dev/null
+++ b/include/asm-alpha/sockios.h
@@ -0,0 +1,15 @@
+#ifndef _ASM_ALPHA_SOCKIOS_H
+#define _ASM_ALPHA_SOCKIOS_H
+
+/* Socket-level I/O control calls. */
+
+#define FIOGETOWN	_IOR('f', 123, int)
+#define FIOSETOWN 	_IOW('f', 124, int)
+
+#define SIOCATMARK	_IOR('s', 7, int)
+#define SIOCSPGRP	_IOW('s', 8, pid_t)
+#define SIOCGPGRP	_IOR('s', 9, pid_t)
+
+#define SIOCGSTAMP	0x8906		/* Get stamp - linux-specific */
+
+#endif /* _ASM_ALPHA_SOCKIOS_H */
diff --git a/include/asm-alpha/spinlock.h b/include/asm-alpha/spinlock.h
new file mode 100644
index 0000000..80780db
--- /dev/null
+++ b/include/asm-alpha/spinlock.h
@@ -0,0 +1,212 @@
+#ifndef _ALPHA_SPINLOCK_H
+#define _ALPHA_SPINLOCK_H
+
+#include <linux/config.h>
+#include <asm/system.h>
+#include <linux/kernel.h>
+#include <asm/current.h>
+
+
+/*
+ * Simple spin lock operations.  There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+typedef struct {
+	volatile unsigned int lock;
+#ifdef CONFIG_DEBUG_SPINLOCK
+	int on_cpu;
+	int line_no;
+	void *previous;
+	struct task_struct * task;
+	const char *base_file;
+#endif
+} spinlock_t;
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define SPIN_LOCK_UNLOCKED	(spinlock_t){ 0, -1, 0, NULL, NULL, NULL }
+#else
+#define SPIN_LOCK_UNLOCKED	(spinlock_t){ 0 }
+#endif
+
+#define spin_lock_init(x)	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
+#define spin_is_locked(x)	((x)->lock != 0)
+#define spin_unlock_wait(x)	do { barrier(); } while ((x)->lock)
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+extern void _raw_spin_unlock(spinlock_t * lock);
+extern void debug_spin_lock(spinlock_t * lock, const char *, int);
+extern int debug_spin_trylock(spinlock_t * lock, const char *, int);
+#define _raw_spin_lock(LOCK) \
+	debug_spin_lock(LOCK, __BASE_FILE__, __LINE__)
+#define _raw_spin_trylock(LOCK) \
+	debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__)
+#else
+static inline void _raw_spin_unlock(spinlock_t * lock)
+{
+	mb();
+	lock->lock = 0;
+}
+
+static inline void _raw_spin_lock(spinlock_t * lock)
+{
+	long tmp;
+
+	__asm__ __volatile__(
+	"1:	ldl_l	%0,%1\n"
+	"	bne	%0,2f\n"
+	"	lda	%0,1\n"
+	"	stl_c	%0,%1\n"
+	"	beq	%0,2f\n"
+	"	mb\n"
+	".subsection 2\n"
+	"2:	ldl	%0,%1\n"
+	"	bne	%0,2b\n"
+	"	br	1b\n"
+	".previous"
+	: "=&r" (tmp), "=m" (lock->lock)
+	: "m"(lock->lock) : "memory");
+}
+
+static inline int _raw_spin_trylock(spinlock_t *lock)
+{
+	return !test_and_set_bit(0, &lock->lock);
+}
+#endif /* CONFIG_DEBUG_SPINLOCK */
+
+#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+
+/***********************************************************/
+
+typedef struct {
+	volatile unsigned int lock;
+} rwlock_t;
+
+#define RW_LOCK_UNLOCKED	(rwlock_t){ 0 }
+
+#define rwlock_init(x)		do { *(x) = RW_LOCK_UNLOCKED; } while(0)
+
+static inline int read_can_lock(rwlock_t *lock)
+{
+	return (lock->lock & 1) == 0;
+}
+
+static inline int write_can_lock(rwlock_t *lock)
+{
+	return lock->lock == 0;
+}
+
+#ifdef CONFIG_DEBUG_RWLOCK
+extern void _raw_write_lock(rwlock_t * lock);
+extern void _raw_read_lock(rwlock_t * lock);
+#else
+static inline void _raw_write_lock(rwlock_t * lock)
+{
+	long regx;
+
+	__asm__ __volatile__(
+	"1:	ldl_l	%1,%0\n"
+	"	bne	%1,6f\n"
+	"	lda	%1,1\n"
+	"	stl_c	%1,%0\n"
+	"	beq	%1,6f\n"
+	"	mb\n"
+	".subsection 2\n"
+	"6:	ldl	%1,%0\n"
+	"	bne	%1,6b\n"
+	"	br	1b\n"
+	".previous"
+	: "=m" (*lock), "=&r" (regx)
+	: "m" (*lock) : "memory");
+}
+
+static inline void _raw_read_lock(rwlock_t * lock)
+{
+	long regx;
+
+	__asm__ __volatile__(
+	"1:	ldl_l	%1,%0\n"
+	"	blbs	%1,6f\n"
+	"	subl	%1,2,%1\n"
+	"	stl_c	%1,%0\n"
+	"	beq	%1,6f\n"
+	"	mb\n"
+	".subsection 2\n"
+	"6:	ldl	%1,%0\n"
+	"	blbs	%1,6b\n"
+	"	br	1b\n"
+	".previous"
+	: "=m" (*lock), "=&r" (regx)
+	: "m" (*lock) : "memory");
+}
+#endif /* CONFIG_DEBUG_RWLOCK */
+
+static inline int _raw_read_trylock(rwlock_t * lock)
+{
+	long regx;
+	int success;
+
+	__asm__ __volatile__(
+	"1:	ldl_l	%1,%0\n"
+	"	lda	%2,0\n"
+	"	blbs	%1,2f\n"
+	"	subl	%1,2,%2\n"
+	"	stl_c	%2,%0\n"
+	"	beq	%2,6f\n"
+	"2:	mb\n"
+	".subsection 2\n"
+	"6:	br	1b\n"
+	".previous"
+	: "=m" (*lock), "=&r" (regx), "=&r" (success)
+	: "m" (*lock) : "memory");
+
+	return success;
+}
+
+static inline int _raw_write_trylock(rwlock_t * lock)
+{
+	long regx;
+	int success;
+
+	__asm__ __volatile__(
+	"1:	ldl_l	%1,%0\n"
+	"	lda	%2,0\n"
+	"	bne	%1,2f\n"
+	"	lda	%2,1\n"
+	"	stl_c	%2,%0\n"
+	"	beq	%2,6f\n"
+	"2:	mb\n"
+	".subsection 2\n"
+	"6:	br	1b\n"
+	".previous"
+	: "=m" (*lock), "=&r" (regx), "=&r" (success)
+	: "m" (*lock) : "memory");
+
+	return success;
+}
+
+static inline void _raw_write_unlock(rwlock_t * lock)
+{
+	mb();
+	lock->lock = 0;
+}
+
+static inline void _raw_read_unlock(rwlock_t * lock)
+{
+	long regx;
+	__asm__ __volatile__(
+	"	mb\n"
+	"1:	ldl_l	%1,%0\n"
+	"	addl	%1,2,%1\n"
+	"	stl_c	%1,%0\n"
+	"	beq	%1,6f\n"
+	".subsection 2\n"
+	"6:	br	1b\n"
+	".previous"
+	: "=m" (*lock), "=&r" (regx)
+	: "m" (*lock) : "memory");
+}
+
+#endif /* _ALPHA_SPINLOCK_H */
diff --git a/include/asm-alpha/stat.h b/include/asm-alpha/stat.h
new file mode 100644
index 0000000..07ad3e6
--- /dev/null
+++ b/include/asm-alpha/stat.h
@@ -0,0 +1,48 @@
+#ifndef _ALPHA_STAT_H
+#define _ALPHA_STAT_H
+
+struct stat {
+	unsigned int	st_dev;
+	unsigned int	st_ino;
+	unsigned int	st_mode;
+	unsigned int	st_nlink;
+	unsigned int	st_uid;
+	unsigned int	st_gid;
+	unsigned int	st_rdev;
+	long		st_size;
+	unsigned long	st_atime;
+	unsigned long	st_mtime;
+	unsigned long	st_ctime;
+	unsigned int	st_blksize;
+	unsigned int	st_blocks;
+	unsigned int	st_flags;
+	unsigned int	st_gen;
+};
+
+/* The stat64 structure increases the size of dev_t, blkcnt_t, adds
+   nanosecond resolution times, and padding for expansion.  */
+
+struct stat64 {
+	unsigned long	st_dev;
+	unsigned long	st_ino;
+	unsigned long	st_rdev;
+	long		st_size;
+	unsigned long	st_blocks;
+
+	unsigned int	st_mode;
+	unsigned int	st_uid;
+	unsigned int	st_gid;
+	unsigned int	st_blksize;
+	unsigned int	st_nlink;
+	unsigned int	__pad0;
+
+	unsigned long	st_atime;
+	unsigned long 	st_atime_nsec; 
+	unsigned long	st_mtime;
+	unsigned long	st_mtime_nsec;
+	unsigned long	st_ctime;
+	unsigned long   st_ctime_nsec;
+  	long		__unused[3];
+};
+
+#endif
diff --git a/include/asm-alpha/statfs.h b/include/asm-alpha/statfs.h
new file mode 100644
index 0000000..ad15830
--- /dev/null
+++ b/include/asm-alpha/statfs.h
@@ -0,0 +1,6 @@
+#ifndef _ALPHA_STATFS_H
+#define _ALPHA_STATFS_H
+
+#include <asm-generic/statfs.h>
+
+#endif
diff --git a/include/asm-alpha/string.h b/include/asm-alpha/string.h
new file mode 100644
index 0000000..9e44fea
--- /dev/null
+++ b/include/asm-alpha/string.h
@@ -0,0 +1,68 @@
+#ifndef __ALPHA_STRING_H__
+#define __ALPHA_STRING_H__
+
+#ifdef __KERNEL__
+
+/*
+ * GCC of any recent vintage doesn't do stupid things with bcopy.
+ * EGCS 1.1 knows all about expanding memcpy inline, others don't.
+ *
+ * Similarly for a memset with data = 0.
+ */
+
+#define __HAVE_ARCH_MEMCPY
+extern void * memcpy(void *, const void *, size_t);
+#define __HAVE_ARCH_MEMMOVE
+extern void * memmove(void *, const void *, size_t);
+
+/* For backward compatibility with modules.  Unused otherwise.  */
+extern void * __memcpy(void *, const void *, size_t);
+
+#define memcpy __builtin_memcpy
+
+#define __HAVE_ARCH_MEMSET
+extern void * __constant_c_memset(void *, unsigned long, size_t);
+extern void * __memset(void *, int, size_t);
+extern void * memset(void *, int, size_t);
+
+#define memset(s, c, n)							    \
+(__builtin_constant_p(c)						    \
+ ? (__builtin_constant_p(n) && (c) == 0					    \
+    ? __builtin_memset((s),0,(n)) 					    \
+    : __constant_c_memset((s),0x0101010101010101UL*(unsigned char)(c),(n))) \
+ : __memset((s),(c),(n)))
+
+#define __HAVE_ARCH_STRCPY
+extern char * strcpy(char *,const char *);
+#define __HAVE_ARCH_STRNCPY
+extern char * strncpy(char *, const char *, size_t);
+#define __HAVE_ARCH_STRCAT
+extern char * strcat(char *, const char *);
+#define __HAVE_ARCH_STRNCAT
+extern char * strncat(char *, const char *, size_t);
+#define __HAVE_ARCH_STRCHR
+extern char * strchr(const char *,int);
+#define __HAVE_ARCH_STRRCHR
+extern char * strrchr(const char *,int);
+#define __HAVE_ARCH_STRLEN
+extern size_t strlen(const char *);
+#define __HAVE_ARCH_MEMCHR
+extern void * memchr(const void *, int, size_t);
+
+/* The following routine is like memset except that it writes 16-bit
+   aligned values.  The DEST and COUNT parameters must be even for 
+   correct operation.  */
+
+#define __HAVE_ARCH_MEMSETW
+extern void * __memsetw(void *dest, unsigned short, size_t count);
+
+#define memsetw(s, c, n)						 \
+(__builtin_constant_p(c)						 \
+ ? __constant_c_memset((s),0x0001000100010001UL*(unsigned short)(c),(n)) \
+ : __memsetw((s),(c),(n)))
+
+extern int strcasecmp(const char *, const char *);
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_STRING_H__ */
diff --git a/include/asm-alpha/suspend.h b/include/asm-alpha/suspend.h
new file mode 100644
index 0000000..c7042d5
--- /dev/null
+++ b/include/asm-alpha/suspend.h
@@ -0,0 +1,6 @@
+#ifndef __ALPHA_SUSPEND_H
+#define __ALPHA_SUSPEND_H
+
+/* Dummy include. */
+
+#endif  /* __ALPHA_SUSPEND_H */
diff --git a/include/asm-alpha/sysinfo.h b/include/asm-alpha/sysinfo.h
new file mode 100644
index 0000000..086aba2
--- /dev/null
+++ b/include/asm-alpha/sysinfo.h
@@ -0,0 +1,39 @@
+/*
+ * include/asm-alpha/sysinfo.h
+ */
+
+#ifndef __ASM_ALPHA_SYSINFO_H
+#define __ASM_ALPHA_SYSINFO_H
+
+/* This defines the subset of the OSF/1 getsysinfo/setsysinfo calls
+   that we support.  */
+
+#define GSI_UACPROC			8
+#define GSI_IEEE_FP_CONTROL		45
+#define GSI_IEEE_STATE_AT_SIGNAL	46
+#define GSI_PROC_TYPE			60
+#define GSI_GET_HWRPB			101
+
+#define SSI_NVPAIRS			1
+#define SSI_IEEE_FP_CONTROL		14
+#define SSI_IEEE_STATE_AT_SIGNAL	15
+#define SSI_IEEE_IGNORE_STATE_AT_SIGNAL	16
+#define SSI_IEEE_RAISE_EXCEPTION	1001	/* linux specific */
+
+#define SSIN_UACPROC			6
+
+#define UAC_BITMASK			7
+#define UAC_NOPRINT			1
+#define UAC_NOFIX			2
+#define UAC_SIGBUS			4
+
+
+#ifdef __KERNEL__
+
+/* This is the shift that is applied to the UAC bits as stored in the
+   per-thread flags.  See thread_info.h.  */
+#define UAC_SHIFT			6
+
+#endif
+
+#endif /* __ASM_ALPHA_SYSINFO_H */
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
new file mode 100644
index 0000000..c08ce97
--- /dev/null
+++ b/include/asm-alpha/system.h
@@ -0,0 +1,626 @@
+#ifndef __ALPHA_SYSTEM_H
+#define __ALPHA_SYSTEM_H
+
+#include <linux/config.h>
+#include <asm/pal.h>
+#include <asm/page.h>
+
+/*
+ * System defines.. Note that this is included both from .c and .S
+ * files, so it does only defines, not any C code.
+ */
+
+/*
+ * We leave one page for the initial stack page, and one page for
+ * the initial process structure. Also, the console eats 3 MB for
+ * the initial bootloader (one of which we can reclaim later).
+ */
+#define BOOT_PCB	0x20000000
+#define BOOT_ADDR	0x20000000
+/* Remove when official MILO sources have ELF support: */
+#define BOOT_SIZE	(16*1024)
+
+#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
+#define KERNEL_START_PHYS	0x300000 /* Old bootloaders hardcoded this.  */
+#else
+#define KERNEL_START_PHYS	0x1000000 /* required: Wildfire/Titan/Marvel */
+#endif
+
+#define KERNEL_START	(PAGE_OFFSET+KERNEL_START_PHYS)
+#define SWAPPER_PGD	KERNEL_START
+#define INIT_STACK	(PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
+#define EMPTY_PGT	(PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
+#define EMPTY_PGE	(PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
+#define ZERO_PGE	(PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
+
+#define START_ADDR	(PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
+
+/*
+ * This is setup by the secondary bootstrap loader.  Because
+ * the zero page is zeroed out as soon as the vm system is
+ * initialized, we need to copy things out into a more permanent
+ * place.
+ */
+#define PARAM			ZERO_PGE
+#define COMMAND_LINE		((char*)(PARAM + 0x0000))
+#define INITRD_START		(*(unsigned long *) (PARAM+0x100))
+#define INITRD_SIZE		(*(unsigned long *) (PARAM+0x108))
+
+#ifndef __ASSEMBLY__
+#include <linux/kernel.h>
+
+/*
+ * This is the logout header that should be common to all platforms
+ * (assuming they are running OSF/1 PALcode, I guess).
+ */
+struct el_common {
+	unsigned int	size;		/* size in bytes of logout area */
+	unsigned int	sbz1	: 30;	/* should be zero */
+	unsigned int	err2	:  1;	/* second error */
+	unsigned int	retry	:  1;	/* retry flag */
+	unsigned int	proc_offset;	/* processor-specific offset */
+	unsigned int	sys_offset;	/* system-specific offset */
+	unsigned int	code;		/* machine check code */
+	unsigned int	frame_rev;	/* frame revision */
+};
+
+/* Machine Check Frame for uncorrectable errors (Large format)
+ *      --- This is used to log uncorrectable errors such as
+ *          double bit ECC errors.
+ *      --- These errors are detected by both processor and systems.
+ */
+struct el_common_EV5_uncorrectable_mcheck {
+        unsigned long   shadow[8];        /* Shadow reg. 8-14, 25           */
+        unsigned long   paltemp[24];      /* PAL TEMP REGS.                 */
+        unsigned long   exc_addr;         /* Address of excepting instruction*/
+        unsigned long   exc_sum;          /* Summary of arithmetic traps.   */
+        unsigned long   exc_mask;         /* Exception mask (from exc_sum). */
+        unsigned long   pal_base;         /* Base address for PALcode.      */
+        unsigned long   isr;              /* Interrupt Status Reg.          */
+        unsigned long   icsr;             /* CURRENT SETUP OF EV5 IBOX      */
+        unsigned long   ic_perr_stat;     /* I-CACHE Reg. <11> set Data parity
+                                                         <12> set TAG parity*/
+        unsigned long   dc_perr_stat;     /* D-CACHE error Reg. Bits set to 1:
+                                                     <2> Data error in bank 0
+                                                     <3> Data error in bank 1
+                                                     <4> Tag error in bank 0
+                                                     <5> Tag error in bank 1 */
+        unsigned long   va;               /* Effective VA of fault or miss. */
+        unsigned long   mm_stat;          /* Holds the reason for D-stream 
+                                             fault or D-cache parity errors */
+        unsigned long   sc_addr;          /* Address that was being accessed
+                                             when EV5 detected Secondary cache
+                                             failure.                 */
+        unsigned long   sc_stat;          /* Helps determine if the error was
+                                             TAG/Data parity(Secondary Cache)*/
+        unsigned long   bc_tag_addr;      /* Contents of EV5 BC_TAG_ADDR    */
+        unsigned long   ei_addr;          /* Physical address of any transfer
+                                             that is logged in EV5 EI_STAT */
+        unsigned long   fill_syndrome;    /* For correcting ECC errors.     */
+        unsigned long   ei_stat;          /* Helps identify reason of any 
+                                             processor uncorrectable error
+                                             at its external interface.     */
+        unsigned long   ld_lock;          /* Contents of EV5 LD_LOCK register*/
+};
+
+struct el_common_EV6_mcheck {
+	unsigned int FrameSize;		/* Bytes, including this field */
+	unsigned int FrameFlags;	/* <31> = Retry, <30> = Second Error */
+	unsigned int CpuOffset;		/* Offset to CPU-specific info */
+	unsigned int SystemOffset;	/* Offset to system-specific info */
+	unsigned int MCHK_Code;
+	unsigned int MCHK_Frame_Rev;
+	unsigned long I_STAT;		/* EV6 Internal Processor Registers */
+	unsigned long DC_STAT;		/* (See the 21264 Spec) */
+	unsigned long C_ADDR;
+	unsigned long DC1_SYNDROME;
+	unsigned long DC0_SYNDROME;
+	unsigned long C_STAT;
+	unsigned long C_STS;
+	unsigned long MM_STAT;
+	unsigned long EXC_ADDR;
+	unsigned long IER_CM;
+	unsigned long ISUM;
+	unsigned long RESERVED0;
+	unsigned long PAL_BASE;
+	unsigned long I_CTL;
+	unsigned long PCTX;
+};
+
+extern void halt(void) __attribute__((noreturn));
+#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
+
+#define switch_to(P,N,L)						\
+  do {									\
+    (L) = alpha_switch_to(virt_to_phys(&(N)->thread_info->pcb), (P));	\
+    check_mmu_context();						\
+  } while (0)
+
+struct task_struct;
+extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
+
+#define mb() \
+__asm__ __volatile__("mb": : :"memory")
+
+#define rmb() \
+__asm__ __volatile__("mb": : :"memory")
+
+#define wmb() \
+__asm__ __volatile__("wmb": : :"memory")
+
+#define read_barrier_depends() \
+__asm__ __volatile__("mb": : :"memory")
+
+#ifdef CONFIG_SMP
+#define smp_mb()	mb()
+#define smp_rmb()	rmb()
+#define smp_wmb()	wmb()
+#define smp_read_barrier_depends()	read_barrier_depends()
+#else
+#define smp_mb()	barrier()
+#define smp_rmb()	barrier()
+#define smp_wmb()	barrier()
+#define smp_read_barrier_depends()	barrier()
+#endif
+
+#define set_mb(var, value) \
+do { var = value; mb(); } while (0)
+
+#define set_wmb(var, value) \
+do { var = value; wmb(); } while (0)
+
+#define imb() \
+__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
+
+#define draina() \
+__asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory")
+
+enum implver_enum {
+	IMPLVER_EV4,
+	IMPLVER_EV5,
+	IMPLVER_EV6
+};
+
+#ifdef CONFIG_ALPHA_GENERIC
+#define implver()				\
+({ unsigned long __implver;			\
+   __asm__ ("implver %0" : "=r"(__implver));	\
+   (enum implver_enum) __implver; })
+#else
+/* Try to eliminate some dead code.  */
+#ifdef CONFIG_ALPHA_EV4
+#define implver() IMPLVER_EV4
+#endif
+#ifdef CONFIG_ALPHA_EV5
+#define implver() IMPLVER_EV5
+#endif
+#if defined(CONFIG_ALPHA_EV6)
+#define implver() IMPLVER_EV6
+#endif
+#endif
+
+enum amask_enum {
+	AMASK_BWX = (1UL << 0),
+	AMASK_FIX = (1UL << 1),
+	AMASK_CIX = (1UL << 2),
+	AMASK_MAX = (1UL << 8),
+	AMASK_PRECISE_TRAP = (1UL << 9),
+};
+
+#define amask(mask)						\
+({ unsigned long __amask, __input = (mask);			\
+   __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input));	\
+   __amask; })
+
+#define __CALL_PAL_R0(NAME, TYPE)				\
+static inline TYPE NAME(void)					\
+{								\
+	register TYPE __r0 __asm__("$0");			\
+	__asm__ __volatile__(					\
+		"call_pal %1 # " #NAME				\
+		:"=r" (__r0)					\
+		:"i" (PAL_ ## NAME)				\
+		:"$1", "$16", "$22", "$23", "$24", "$25");	\
+	return __r0;						\
+}
+
+#define __CALL_PAL_W1(NAME, TYPE0)				\
+static inline void NAME(TYPE0 arg0)				\
+{								\
+	register TYPE0 __r16 __asm__("$16") = arg0;		\
+	__asm__ __volatile__(					\
+		"call_pal %1 # "#NAME				\
+		: "=r"(__r16)					\
+		: "i"(PAL_ ## NAME), "0"(__r16)			\
+		: "$1", "$22", "$23", "$24", "$25");		\
+}
+
+#define __CALL_PAL_W2(NAME, TYPE0, TYPE1)			\
+static inline void NAME(TYPE0 arg0, TYPE1 arg1)			\
+{								\
+	register TYPE0 __r16 __asm__("$16") = arg0;		\
+	register TYPE1 __r17 __asm__("$17") = arg1;		\
+	__asm__ __volatile__(					\
+		"call_pal %2 # "#NAME				\
+		: "=r"(__r16), "=r"(__r17)			\
+		: "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17)	\
+		: "$1", "$22", "$23", "$24", "$25");		\
+}
+
+#define __CALL_PAL_RW1(NAME, RTYPE, TYPE0)			\
+static inline RTYPE NAME(TYPE0 arg0)				\
+{								\
+	register RTYPE __r0 __asm__("$0");			\
+	register TYPE0 __r16 __asm__("$16") = arg0;		\
+	__asm__ __volatile__(					\
+		"call_pal %2 # "#NAME				\
+		: "=r"(__r16), "=r"(__r0)			\
+		: "i"(PAL_ ## NAME), "0"(__r16)			\
+		: "$1", "$22", "$23", "$24", "$25");		\
+	return __r0;						\
+}
+
+#define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1)		\
+static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1)		\
+{								\
+	register RTYPE __r0 __asm__("$0");			\
+	register TYPE0 __r16 __asm__("$16") = arg0;		\
+	register TYPE1 __r17 __asm__("$17") = arg1;		\
+	__asm__ __volatile__(					\
+		"call_pal %3 # "#NAME				\
+		: "=r"(__r16), "=r"(__r17), "=r"(__r0)		\
+		: "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17)	\
+		: "$1", "$22", "$23", "$24", "$25");		\
+	return __r0;						\
+}
+
+__CALL_PAL_W1(cflush, unsigned long);
+__CALL_PAL_R0(rdmces, unsigned long);
+__CALL_PAL_R0(rdps, unsigned long);
+__CALL_PAL_R0(rdusp, unsigned long);
+__CALL_PAL_RW1(swpipl, unsigned long, unsigned long);
+__CALL_PAL_R0(whami, unsigned long);
+__CALL_PAL_W2(wrent, void*, unsigned long);
+__CALL_PAL_W1(wripir, unsigned long);
+__CALL_PAL_W1(wrkgp, unsigned long);
+__CALL_PAL_W1(wrmces, unsigned long);
+__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long);
+__CALL_PAL_W1(wrusp, unsigned long);
+__CALL_PAL_W1(wrvptptr, unsigned long);
+
+#define IPL_MIN		0
+#define IPL_SW0		1
+#define IPL_SW1		2
+#define IPL_DEV0	3
+#define IPL_DEV1	4
+#define IPL_TIMER	5
+#define IPL_PERF	6
+#define IPL_POWERFAIL	6
+#define IPL_MCHECK	7
+#define IPL_MAX		7
+
+#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
+#undef IPL_MIN
+#define IPL_MIN		__min_ipl
+extern int __min_ipl;
+#endif
+
+#define getipl()		(rdps() & 7)
+#define setipl(ipl)		((void) swpipl(ipl))
+
+#define local_irq_disable()			do { setipl(IPL_MAX); barrier(); } while(0)
+#define local_irq_enable()			do { barrier(); setipl(IPL_MIN); } while(0)
+#define local_save_flags(flags)	((flags) = rdps())
+#define local_irq_save(flags)	do { (flags) = swpipl(IPL_MAX); barrier(); } while(0)
+#define local_irq_restore(flags)	do { barrier(); setipl(flags); barrier(); } while(0)
+
+#define irqs_disabled()	(getipl() == IPL_MAX)
+
+/*
+ * TB routines..
+ */
+#define __tbi(nr,arg,arg1...)					\
+({								\
+	register unsigned long __r16 __asm__("$16") = (nr);	\
+	register unsigned long __r17 __asm__("$17"); arg;	\
+	__asm__ __volatile__(					\
+		"call_pal %3 #__tbi"				\
+		:"=r" (__r16),"=r" (__r17)			\
+		:"0" (__r16),"i" (PAL_tbi) ,##arg1		\
+		:"$0", "$1", "$22", "$23", "$24", "$25");	\
+})
+
+#define tbi(x,y)	__tbi(x,__r17=(y),"1" (__r17))
+#define tbisi(x)	__tbi(1,__r17=(x),"1" (__r17))
+#define tbisd(x)	__tbi(2,__r17=(x),"1" (__r17))
+#define tbis(x)		__tbi(3,__r17=(x),"1" (__r17))
+#define tbiap()		__tbi(-1, /* no second argument */)
+#define tbia()		__tbi(-2, /* no second argument */)
+
+/*
+ * Atomic exchange.
+ * Since it can be used to implement critical sections
+ * it must clobber "memory" (also for interrupts in UP).
+ */
+
+static inline unsigned long
+__xchg_u8(volatile char *m, unsigned long val)
+{
+	unsigned long ret, tmp, addr64;
+
+	__asm__ __volatile__(
+	"	andnot	%4,7,%3\n"
+	"	insbl	%1,%4,%1\n"
+	"1:	ldq_l	%2,0(%3)\n"
+	"	extbl	%2,%4,%0\n"
+	"	mskbl	%2,%4,%2\n"
+	"	or	%1,%2,%2\n"
+	"	stq_c	%2,0(%3)\n"
+	"	beq	%2,2f\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	".subsection 2\n"
+	"2:	br	1b\n"
+	".previous"
+	: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
+	: "r" ((long)m), "1" (val) : "memory");
+
+	return ret;
+}
+
+static inline unsigned long
+__xchg_u16(volatile short *m, unsigned long val)
+{
+	unsigned long ret, tmp, addr64;
+
+	__asm__ __volatile__(
+	"	andnot	%4,7,%3\n"
+	"	inswl	%1,%4,%1\n"
+	"1:	ldq_l	%2,0(%3)\n"
+	"	extwl	%2,%4,%0\n"
+	"	mskwl	%2,%4,%2\n"
+	"	or	%1,%2,%2\n"
+	"	stq_c	%2,0(%3)\n"
+	"	beq	%2,2f\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	".subsection 2\n"
+	"2:	br	1b\n"
+	".previous"
+	: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
+	: "r" ((long)m), "1" (val) : "memory");
+
+	return ret;
+}
+
+static inline unsigned long
+__xchg_u32(volatile int *m, unsigned long val)
+{
+	unsigned long dummy;
+
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%4\n"
+	"	bis $31,%3,%1\n"
+	"	stl_c %1,%2\n"
+	"	beq %1,2f\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	: "=&r" (val), "=&r" (dummy), "=m" (*m)
+	: "rI" (val), "m" (*m) : "memory");
+
+	return val;
+}
+
+static inline unsigned long
+__xchg_u64(volatile long *m, unsigned long val)
+{
+	unsigned long dummy;
+
+	__asm__ __volatile__(
+	"1:	ldq_l %0,%4\n"
+	"	bis $31,%3,%1\n"
+	"	stq_c %1,%2\n"
+	"	beq %1,2f\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	: "=&r" (val), "=&r" (dummy), "=m" (*m)
+	: "rI" (val), "m" (*m) : "memory");
+
+	return val;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+   if something tries to do an invalid xchg().  */
+extern void __xchg_called_with_bad_pointer(void);
+
+static inline unsigned long
+__xchg(volatile void *ptr, unsigned long x, int size)
+{
+	switch (size) {
+		case 1:
+			return __xchg_u8(ptr, x);
+		case 2:
+			return __xchg_u16(ptr, x);
+		case 4:
+			return __xchg_u32(ptr, x);
+		case 8:
+			return __xchg_u64(ptr, x);
+	}
+	__xchg_called_with_bad_pointer();
+	return x;
+}
+
+#define xchg(ptr,x)							     \
+  ({									     \
+     __typeof__(*(ptr)) _x_ = (x);					     \
+     (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
+  })
+
+#define tas(ptr) (xchg((ptr),1))
+
+
+/* 
+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
+ * store NEW in MEM.  Return the initial value in MEM.  Success is
+ * indicated by comparing RETURN with OLD.
+ *
+ * The memory barrier should be placed in SMP only when we actually
+ * make the change. If we don't change anything (so if the returned
+ * prev is equal to old) then we aren't acquiring anything new and
+ * we don't need any memory barrier as far I can tell.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long
+__cmpxchg_u8(volatile char *m, long old, long new)
+{
+	unsigned long prev, tmp, cmp, addr64;
+
+	__asm__ __volatile__(
+	"	andnot	%5,7,%4\n"
+	"	insbl	%1,%5,%1\n"
+	"1:	ldq_l	%2,0(%4)\n"
+	"	extbl	%2,%5,%0\n"
+	"	cmpeq	%0,%6,%3\n"
+	"	beq	%3,2f\n"
+	"	mskbl	%2,%5,%2\n"
+	"	or	%1,%2,%2\n"
+	"	stq_c	%2,0(%4)\n"
+	"	beq	%2,3f\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	"2:\n"
+	".subsection 2\n"
+	"3:	br	1b\n"
+	".previous"
+	: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
+	: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+
+	return prev;
+}
+
+static inline unsigned long
+__cmpxchg_u16(volatile short *m, long old, long new)
+{
+	unsigned long prev, tmp, cmp, addr64;
+
+	__asm__ __volatile__(
+	"	andnot	%5,7,%4\n"
+	"	inswl	%1,%5,%1\n"
+	"1:	ldq_l	%2,0(%4)\n"
+	"	extwl	%2,%5,%0\n"
+	"	cmpeq	%0,%6,%3\n"
+	"	beq	%3,2f\n"
+	"	mskwl	%2,%5,%2\n"
+	"	or	%1,%2,%2\n"
+	"	stq_c	%2,0(%4)\n"
+	"	beq	%2,3f\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	"2:\n"
+	".subsection 2\n"
+	"3:	br	1b\n"
+	".previous"
+	: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
+	: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+
+	return prev;
+}
+
+static inline unsigned long
+__cmpxchg_u32(volatile int *m, int old, int new)
+{
+	unsigned long prev, cmp;
+
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%5\n"
+	"	cmpeq %0,%3,%1\n"
+	"	beq %1,2f\n"
+	"	mov %4,%1\n"
+	"	stl_c %1,%2\n"
+	"	beq %1,3f\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	"2:\n"
+	".subsection 2\n"
+	"3:	br 1b\n"
+	".previous"
+	: "=&r"(prev), "=&r"(cmp), "=m"(*m)
+	: "r"((long) old), "r"(new), "m"(*m) : "memory");
+
+	return prev;
+}
+
+static inline unsigned long
+__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
+{
+	unsigned long prev, cmp;
+
+	__asm__ __volatile__(
+	"1:	ldq_l %0,%5\n"
+	"	cmpeq %0,%3,%1\n"
+	"	beq %1,2f\n"
+	"	mov %4,%1\n"
+	"	stq_c %1,%2\n"
+	"	beq %1,3f\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	"2:\n"
+	".subsection 2\n"
+	"3:	br 1b\n"
+	".previous"
+	: "=&r"(prev), "=&r"(cmp), "=m"(*m)
+	: "r"((long) old), "r"(new), "m"(*m) : "memory");
+
+	return prev;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+   if something tries to do an invalid cmpxchg().  */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+	switch (size) {
+		case 1:
+			return __cmpxchg_u8(ptr, old, new);
+		case 2:
+			return __cmpxchg_u16(ptr, old, new);
+		case 4:
+			return __cmpxchg_u32(ptr, old, new);
+		case 8:
+			return __cmpxchg_u64(ptr, old, new);
+	}
+	__cmpxchg_called_with_bad_pointer();
+	return old;
+}
+
+#define cmpxchg(ptr,o,n)						 \
+  ({									 \
+     __typeof__(*(ptr)) _o_ = (o);					 \
+     __typeof__(*(ptr)) _n_ = (n);					 \
+     (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \
+				    (unsigned long)_n_, sizeof(*(ptr))); \
+  })
+
+#endif /* __ASSEMBLY__ */
+
+#define arch_align_stack(x) (x)
+
+#endif
diff --git a/include/asm-alpha/termbits.h b/include/asm-alpha/termbits.h
new file mode 100644
index 0000000..f4837fa
--- /dev/null
+++ b/include/asm-alpha/termbits.h
@@ -0,0 +1,186 @@
+#ifndef _ALPHA_TERMBITS_H
+#define _ALPHA_TERMBITS_H
+
+#include <linux/posix_types.h>
+
+typedef unsigned char	cc_t;
+typedef unsigned int	speed_t;
+typedef unsigned int	tcflag_t;
+
+/*
+ * termios type and macro definitions.  Be careful about adding stuff
+ * to this file since it's used in GNU libc and there are strict rules
+ * concerning namespace pollution.
+ */
+
+#define NCCS 19
+struct termios {
+	tcflag_t c_iflag;		/* input mode flags */
+	tcflag_t c_oflag;		/* output mode flags */
+	tcflag_t c_cflag;		/* control mode flags */
+	tcflag_t c_lflag;		/* local mode flags */
+	cc_t c_cc[NCCS];		/* control characters */
+	cc_t c_line;			/* line discipline (== c_cc[19]) */
+	speed_t c_ispeed;		/* input speed */
+	speed_t c_ospeed;		/* output speed */
+};
+
+/* c_cc characters */
+#define VEOF 0
+#define VEOL 1
+#define VEOL2 2
+#define VERASE 3
+#define VWERASE 4
+#define VKILL 5
+#define VREPRINT 6
+#define VSWTC 7
+#define VINTR 8
+#define VQUIT 9
+#define VSUSP 10
+#define VSTART 12
+#define VSTOP 13
+#define VLNEXT 14
+#define VDISCARD 15
+#define VMIN 16
+#define VTIME 17
+
+/* c_iflag bits */
+#define IGNBRK	0000001
+#define BRKINT	0000002
+#define IGNPAR	0000004
+#define PARMRK	0000010
+#define INPCK	0000020
+#define ISTRIP	0000040
+#define INLCR	0000100
+#define IGNCR	0000200
+#define ICRNL	0000400
+#define IXON	0001000
+#define IXOFF	0002000
+#define IXANY	0004000
+#define IUCLC	0010000
+#define IMAXBEL	0020000
+#define IUTF8	0040000
+
+/* c_oflag bits */
+#define OPOST	0000001
+#define ONLCR	0000002
+#define OLCUC	0000004
+
+#define OCRNL	0000010
+#define ONOCR	0000020
+#define ONLRET	0000040
+
+#define OFILL	00000100
+#define OFDEL	00000200
+#define NLDLY	00001400
+#define   NL0	00000000
+#define   NL1	00000400
+#define   NL2	00001000
+#define   NL3	00001400
+#define TABDLY	00006000
+#define   TAB0	00000000
+#define   TAB1	00002000
+#define   TAB2	00004000
+#define   TAB3	00006000
+#define CRDLY	00030000
+#define   CR0	00000000
+#define   CR1	00010000
+#define   CR2	00020000
+#define   CR3	00030000
+#define FFDLY	00040000
+#define   FF0	00000000
+#define   FF1	00040000
+#define BSDLY	00100000
+#define   BS0	00000000
+#define   BS1	00100000
+#define VTDLY	00200000
+#define   VT0	00000000
+#define   VT1	00200000
+#define XTABS	01000000 /* Hmm.. Linux/i386 considers this part of TABDLY.. */
+
+/* c_cflag bit meaning */
+#define CBAUD	0000037
+#define  B0	0000000		/* hang up */
+#define  B50	0000001
+#define  B75	0000002
+#define  B110	0000003
+#define  B134	0000004
+#define  B150	0000005
+#define  B200	0000006
+#define  B300	0000007
+#define  B600	0000010
+#define  B1200	0000011
+#define  B1800	0000012
+#define  B2400	0000013
+#define  B4800	0000014
+#define  B9600	0000015
+#define  B19200	0000016
+#define  B38400	0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CBAUDEX 0000000
+#define  B57600   00020
+#define  B115200  00021
+#define  B230400  00022
+#define  B460800  00023
+#define  B500000  00024
+#define  B576000  00025
+#define  B921600  00026
+#define B1000000  00027
+#define B1152000  00030
+#define B1500000  00031
+#define B2000000  00032
+#define B2500000  00033
+#define B3000000  00034
+#define B3500000  00035
+#define B4000000  00036
+
+#define CSIZE	00001400
+#define   CS5	00000000
+#define   CS6	00000400
+#define   CS7	00001000
+#define   CS8	00001400
+
+#define CSTOPB	00002000
+#define CREAD	00004000
+#define PARENB	00010000
+#define PARODD	00020000
+#define HUPCL	00040000
+
+#define CLOCAL	00100000
+#define CRTSCTS	  020000000000		/* flow control */
+
+/* c_lflag bits */
+#define ISIG	0x00000080
+#define ICANON	0x00000100
+#define XCASE	0x00004000
+#define ECHO	0x00000008
+#define ECHOE	0x00000002
+#define ECHOK	0x00000004
+#define ECHONL	0x00000010
+#define NOFLSH	0x80000000
+#define TOSTOP	0x00400000
+#define ECHOCTL	0x00000040
+#define ECHOPRT	0x00000020
+#define ECHOKE	0x00000001
+#define FLUSHO	0x00800000
+#define PENDIN	0x20000000
+#define IEXTEN	0x00000400
+
+/* Values for the ACTION argument to `tcflow'.  */
+#define	TCOOFF		0
+#define	TCOON		1
+#define	TCIOFF		2
+#define	TCION		3
+
+/* Values for the QUEUE_SELECTOR argument to `tcflush'.  */
+#define	TCIFLUSH	0
+#define	TCOFLUSH	1
+#define	TCIOFLUSH	2
+
+/* Values for the OPTIONAL_ACTIONS argument to `tcsetattr'.  */
+#define	TCSANOW		0
+#define	TCSADRAIN	1
+#define	TCSAFLUSH	2
+
+#endif /* _ALPHA_TERMBITS_H */
diff --git a/include/asm-alpha/termios.h b/include/asm-alpha/termios.h
new file mode 100644
index 0000000..1cfd27f
--- /dev/null
+++ b/include/asm-alpha/termios.h
@@ -0,0 +1,164 @@
+#ifndef _ALPHA_TERMIOS_H
+#define _ALPHA_TERMIOS_H
+
+#include <asm/ioctls.h>
+#include <asm/termbits.h>
+
+struct sgttyb {
+	char	sg_ispeed;
+	char	sg_ospeed;
+	char	sg_erase;
+	char	sg_kill;
+	short	sg_flags;
+};
+
+struct tchars {
+	char	t_intrc;
+	char	t_quitc;
+	char	t_startc;
+	char	t_stopc;
+	char	t_eofc;
+	char	t_brkc;
+};
+
+struct ltchars {
+	char	t_suspc;
+	char	t_dsuspc;
+	char	t_rprntc;
+	char	t_flushc;
+	char	t_werasc;
+	char	t_lnextc;
+};
+
+struct winsize {
+	unsigned short ws_row;
+	unsigned short ws_col;
+	unsigned short ws_xpixel;
+	unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+	unsigned short c_iflag;		/* input mode flags */
+	unsigned short c_oflag;		/* output mode flags */
+	unsigned short c_cflag;		/* control mode flags */
+	unsigned short c_lflag;		/* local mode flags */
+	unsigned char c_line;		/* line discipline */
+	unsigned char c_cc[NCC];	/* control characters */
+};
+
+/*
+ * c_cc characters in the termio structure.  Oh, how I love being
+ * backwardly compatible.  Notice that character 4 and 5 are
+ * interpreted differently depending on whether ICANON is set in
+ * c_lflag.  If it's set, they are used as _VEOF and _VEOL, otherwise
+ * as _VMIN and V_TIME.  This is for compatibility with OSF/1 (which
+ * is compatible with sysV)...
+ */
+#define _VINTR	0
+#define _VQUIT	1
+#define _VERASE	2
+#define _VKILL	3
+#define _VEOF	4
+#define _VMIN	4
+#define _VEOL	5
+#define _VTIME	5
+#define _VEOL2	6
+#define _VSWTC	7
+
+/* line disciplines */
+#define N_TTY		0
+#define N_SLIP		1
+#define N_MOUSE		2
+#define N_PPP		3
+#define N_STRIP		4
+#define N_AX25		5
+#define N_X25		6	/* X.25 async */
+#define N_6PACK		7
+#define N_MASC		8	/* Reserved for Mobitex module <kaz@cafe.net> */
+#define N_R3964		9	/* Reserved for Simatic R3964 module */
+#define N_PROFIBUS_FDL	10	/* Reserved for Profibus <Dave@mvhi.com> */
+#define N_IRDA		11	/* Linux IrDa - http://irda.sourceforge.net/ */
+#define N_SMSBLOCK	12	/* SMS block mode - for talking to GSM data cards about SMS messages */
+#define N_HDLC		13	/* synchronous HDLC */
+#define N_SYNC_PPP	14
+#define N_HCI		15  /* Bluetooth HCI UART */
+
+#ifdef __KERNEL__
+/*	eof=^D		eol=\0		eol2=\0		erase=del
+	werase=^W	kill=^U		reprint=^R	sxtc=\0
+	intr=^C		quit=^\		susp=^Z		<OSF/1 VDSUSP>
+	start=^Q	stop=^S		lnext=^V	discard=^U
+	vmin=\1		vtime=\0
+*/
+#define INIT_C_CC "\004\000\000\177\027\025\022\000\003\034\032\000\021\023\026\025\001\000"
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+
+#define user_termio_to_kernel_termios(a_termios, u_termio)			\
+({										\
+	struct termios *k_termios = (a_termios);				\
+	struct termio k_termio;							\
+	int canon, ret;								\
+										\
+	ret = copy_from_user(&k_termio, u_termio, sizeof(k_termio));		\
+	if (!ret) {								\
+		/* Overwrite only the low bits.  */				\
+		*(unsigned short *)&k_termios->c_iflag = k_termio.c_iflag;	\
+		*(unsigned short *)&k_termios->c_oflag = k_termio.c_oflag;	\
+		*(unsigned short *)&k_termios->c_cflag = k_termio.c_cflag;	\
+		*(unsigned short *)&k_termios->c_lflag = k_termio.c_lflag;	\
+		canon = k_termio.c_lflag & ICANON;				\
+										\
+		k_termios->c_cc[VINTR]  = k_termio.c_cc[_VINTR];		\
+		k_termios->c_cc[VQUIT]  = k_termio.c_cc[_VQUIT];		\
+		k_termios->c_cc[VERASE] = k_termio.c_cc[_VERASE];		\
+		k_termios->c_cc[VKILL]  = k_termio.c_cc[_VKILL];		\
+		k_termios->c_cc[VEOL2]  = k_termio.c_cc[_VEOL2];		\
+		k_termios->c_cc[VSWTC]  = k_termio.c_cc[_VSWTC];		\
+		k_termios->c_cc[canon ? VEOF : VMIN]  = k_termio.c_cc[_VEOF];	\
+		k_termios->c_cc[canon ? VEOL : VTIME] = k_termio.c_cc[_VEOL];	\
+	}									\
+	ret;									\
+})
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ *
+ * Note the "fun" _VMIN overloading.
+ */
+#define kernel_termios_to_user_termio(u_termio, a_termios)		\
+({									\
+	struct termios *k_termios = (a_termios);			\
+	struct termio k_termio;						\
+	int canon;							\
+									\
+	k_termio.c_iflag = k_termios->c_iflag;				\
+	k_termio.c_oflag = k_termios->c_oflag;				\
+	k_termio.c_cflag = k_termios->c_cflag;				\
+	canon = (k_termio.c_lflag = k_termios->c_lflag) & ICANON;	\
+									\
+	k_termio.c_line = k_termios->c_line;				\
+	k_termio.c_cc[_VINTR]  = k_termios->c_cc[VINTR];		\
+	k_termio.c_cc[_VQUIT]  = k_termios->c_cc[VQUIT];		\
+	k_termio.c_cc[_VERASE] = k_termios->c_cc[VERASE];		\
+	k_termio.c_cc[_VKILL]  = k_termios->c_cc[VKILL];		\
+	k_termio.c_cc[_VEOF]   = k_termios->c_cc[canon ? VEOF : VMIN];	\
+	k_termio.c_cc[_VEOL]   = k_termios->c_cc[canon ? VEOL : VTIME];	\
+	k_termio.c_cc[_VEOL2]  = k_termios->c_cc[VEOL2];		\
+	k_termio.c_cc[_VSWTC]  = k_termios->c_cc[VSWTC];		\
+									\
+	copy_to_user(u_termio, &k_termio, sizeof(k_termio));		\
+})
+
+#define user_termios_to_kernel_termios(k, u) \
+	copy_from_user(k, u, sizeof(struct termios))
+
+#define kernel_termios_to_user_termios(u, k) \
+	copy_to_user(u, k, sizeof(struct termios))
+
+#endif	/* __KERNEL__ */
+
+#endif	/* _ALPHA_TERMIOS_H */
diff --git a/include/asm-alpha/thread_info.h b/include/asm-alpha/thread_info.h
new file mode 100644
index 0000000..d51491e
--- /dev/null
+++ b/include/asm-alpha/thread_info.h
@@ -0,0 +1,98 @@
+#ifndef _ALPHA_THREAD_INFO_H
+#define _ALPHA_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#include <asm/types.h>
+#include <asm/hwrpb.h>
+#endif
+
+#ifndef __ASSEMBLY__
+struct thread_info {
+	struct pcb_struct	pcb;		/* palcode state */
+
+	struct task_struct	*task;		/* main task structure */
+	unsigned int		flags;		/* low level flags */
+	unsigned int		ieee_state;	/* see fpu.h */
+
+	struct exec_domain	*exec_domain;	/* execution domain */
+	mm_segment_t		addr_limit;	/* thread address space */
+	unsigned		cpu;		/* current CPU */
+	int			preempt_count; /* 0 => preemptable, <0 => BUG */
+
+	int bpt_nsaved;
+	unsigned long bpt_addr[2];		/* breakpoint handling  */
+	unsigned int bpt_insn[2];
+
+	struct restart_block	restart_block;
+};
+
+/*
+ * Macros/functions for gaining access to the thread information structure.
+ */
+#define INIT_THREAD_INFO(tsk)			\
+{						\
+	.task		= &tsk,			\
+	.exec_domain	= &default_exec_domain,	\
+	.addr_limit	= KERNEL_DS,		\
+	.restart_block = {			\
+		.fn = do_no_restart_syscall,	\
+	},					\
+}
+
+#define init_thread_info	(init_thread_union.thread_info)
+#define init_stack		(init_thread_union.stack)
+
+/* How to get the thread information struct from C.  */
+register struct thread_info *__current_thread_info __asm__("$8");
+#define current_thread_info()  __current_thread_info
+
+/* Thread information allocation.  */
+#define THREAD_SIZE (2*PAGE_SIZE)
+#define alloc_thread_info(tsk) \
+  ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
+#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
+#define get_thread_info(ti) get_task_struct((ti)->task)
+#define put_thread_info(ti) put_task_struct((ti)->task)
+
+#endif /* __ASSEMBLY__ */
+
+#define PREEMPT_ACTIVE		0x40000000
+
+/*
+ * Thread information flags:
+ * - these are process state flags and used from assembly
+ * - pending work-to-be-done flags come first to fit in and immediate operand.
+ *
+ * TIF_SYSCALL_TRACE is known to be 0 via blbs.
+ */
+#define TIF_SYSCALL_TRACE	0	/* syscall trace active */
+#define TIF_NOTIFY_RESUME	1	/* resumption notification requested */
+#define TIF_SIGPENDING		2	/* signal pending */
+#define TIF_NEED_RESCHED	3	/* rescheduling necessary */
+#define TIF_POLLING_NRFLAG	4	/* poll_idle is polling NEED_RESCHED */
+#define TIF_DIE_IF_KERNEL	5	/* dik recursion lock */
+#define TIF_UAC_NOPRINT		6	/* see sysinfo.h */
+#define TIF_UAC_NOFIX		7
+#define TIF_UAC_SIGBUS		8
+#define TIF_MEMDIE		9
+
+#define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
+#define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
+
+/* Work to do on interrupt/exception return.  */
+#define _TIF_WORK_MASK		(_TIF_NOTIFY_RESUME	\
+				 | _TIF_SIGPENDING	\
+				 | _TIF_NEED_RESCHED)
+
+/* Work to do on any return to userspace.  */
+#define _TIF_ALLWORK_MASK	(_TIF_WORK_MASK		\
+				 | _TIF_SYSCALL_TRACE)
+
+#endif /* __KERNEL__ */
+#endif /* _ALPHA_THREAD_INFO_H */
diff --git a/include/asm-alpha/timex.h b/include/asm-alpha/timex.h
new file mode 100644
index 0000000..afa0c45
--- /dev/null
+++ b/include/asm-alpha/timex.h
@@ -0,0 +1,31 @@
+/*
+ * linux/include/asm-alpha/timex.h
+ *
+ * ALPHA architecture timex specifications
+ */
+#ifndef _ASMALPHA_TIMEX_H
+#define _ASMALPHA_TIMEX_H
+
+/* With only one or two oddballs, we use the RTC as the ticker, selecting
+   the 32.768kHz reference clock, which nicely divides down to our HZ.  */
+#define CLOCK_TICK_RATE	32768
+
+/*
+ * Standard way to access the cycle counter.
+ * Currently only used on SMP for scheduling.
+ *
+ * Only the low 32 bits are available as a continuously counting entity. 
+ * But this only means we'll force a reschedule every 8 seconds or so,
+ * which isn't an evil thing.
+ */
+
+typedef unsigned int cycles_t;
+
+static inline cycles_t get_cycles (void)
+{
+	cycles_t ret;
+	__asm__ __volatile__ ("rpcc %0" : "=r"(ret));
+	return ret;
+}
+
+#endif
diff --git a/include/asm-alpha/tlb.h b/include/asm-alpha/tlb.h
new file mode 100644
index 0000000..aa91335
--- /dev/null
+++ b/include/asm-alpha/tlb.h
@@ -0,0 +1,15 @@
+#ifndef _ALPHA_TLB_H
+#define _ALPHA_TLB_H
+
+#define tlb_start_vma(tlb, vma)			do { } while (0)
+#define tlb_end_vma(tlb, vma)			do { } while (0)
+#define __tlb_remove_tlb_entry(tlb, pte, addr)	do { } while (0)
+
+#define tlb_flush(tlb)				flush_tlb_mm((tlb)->mm)
+
+#include <asm-generic/tlb.h>
+
+#define __pte_free_tlb(tlb,pte)			pte_free(pte)
+#define __pmd_free_tlb(tlb,pmd)			pmd_free(pmd)
+ 
+#endif
diff --git a/include/asm-alpha/tlbflush.h b/include/asm-alpha/tlbflush.h
new file mode 100644
index 0000000..9d484c1
--- /dev/null
+++ b/include/asm-alpha/tlbflush.h
@@ -0,0 +1,158 @@
+#ifndef _ALPHA_TLBFLUSH_H
+#define _ALPHA_TLBFLUSH_H
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <asm/compiler.h>
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __MMU_EXTERN_INLINE
+#endif
+
+extern void __load_new_mm_context(struct mm_struct *);
+
+
+/* Use a few helper functions to hide the ugly broken ASN
+   numbers on early Alphas (ev4 and ev45).  */
+
+__EXTERN_INLINE void
+ev4_flush_tlb_current(struct mm_struct *mm)
+{
+	__load_new_mm_context(mm);
+	tbiap();
+}
+
+__EXTERN_INLINE void
+ev5_flush_tlb_current(struct mm_struct *mm)
+{
+	__load_new_mm_context(mm);
+}
+
+/* Flush just one page in the current TLB set.  We need to be very
+   careful about the icache here, there is no way to invalidate a
+   specific icache page.  */
+
+__EXTERN_INLINE void
+ev4_flush_tlb_current_page(struct mm_struct * mm,
+			   struct vm_area_struct *vma,
+			   unsigned long addr)
+{
+	int tbi_flag = 2;
+	if (vma->vm_flags & VM_EXEC) {
+		__load_new_mm_context(mm);
+		tbi_flag = 3;
+	}
+	tbi(tbi_flag, addr);
+}
+
+__EXTERN_INLINE void
+ev5_flush_tlb_current_page(struct mm_struct * mm,
+			   struct vm_area_struct *vma,
+			   unsigned long addr)
+{
+	if (vma->vm_flags & VM_EXEC)
+		__load_new_mm_context(mm);
+	else
+		tbi(2, addr);
+}
+
+
+#ifdef CONFIG_ALPHA_GENERIC
+# define flush_tlb_current		alpha_mv.mv_flush_tlb_current
+# define flush_tlb_current_page		alpha_mv.mv_flush_tlb_current_page
+#else
+# ifdef CONFIG_ALPHA_EV4
+#  define flush_tlb_current		ev4_flush_tlb_current
+#  define flush_tlb_current_page	ev4_flush_tlb_current_page
+# else
+#  define flush_tlb_current		ev5_flush_tlb_current
+#  define flush_tlb_current_page	ev5_flush_tlb_current_page
+# endif
+#endif
+
+#ifdef __MMU_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __MMU_EXTERN_INLINE
+#endif
+
+/* Flush current user mapping.  */
+static inline void
+flush_tlb(void)
+{
+	flush_tlb_current(current->active_mm);
+}
+
+/* Flush someone else's user mapping.  */
+static inline void
+flush_tlb_other(struct mm_struct *mm)
+{
+	unsigned long *mmc = &mm->context[smp_processor_id()];
+	/* Check it's not zero first to avoid cacheline ping pong
+	   when possible.  */
+	if (*mmc) *mmc = 0;
+}
+
+/* Flush a specified range of user mapping page tables from TLB.
+   Although Alpha uses VPTE caches, this can be a nop, as Alpha does
+   not have finegrained tlb flushing, so it will flush VPTE stuff
+   during next flush_tlb_range.  */
+
+static inline void
+flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
+		   unsigned long end)
+{
+}
+
+#ifndef CONFIG_SMP
+/* Flush everything (kernel mapping may also have changed
+   due to vmalloc/vfree).  */
+static inline void flush_tlb_all(void)
+{
+	tbia();
+}
+
+/* Flush a specified user mapping.  */
+static inline void
+flush_tlb_mm(struct mm_struct *mm)
+{
+	if (mm == current->active_mm)
+		flush_tlb_current(mm);
+	else
+		flush_tlb_other(mm);
+}
+
+/* Page-granular tlb flush.  */
+static inline void
+flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+	struct mm_struct *mm = vma->vm_mm;
+
+	if (mm == current->active_mm)
+		flush_tlb_current_page(mm, vma, addr);
+	else
+		flush_tlb_other(mm);
+}
+
+/* Flush a specified range of user mapping.  On the Alpha we flush
+   the whole user tlb.  */
+static inline void
+flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+		unsigned long end)
+{
+	flush_tlb_mm(vma->vm_mm);
+}
+
+#else /* CONFIG_SMP */
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
+			    unsigned long);
+
+#endif /* CONFIG_SMP */
+
+#define flush_tlb_kernel_range(start, end) flush_tlb_all()
+
+#endif /* _ALPHA_TLBFLUSH_H */
diff --git a/include/asm-alpha/topology.h b/include/asm-alpha/topology.h
new file mode 100644
index 0000000..eb740e2
--- /dev/null
+++ b/include/asm-alpha/topology.h
@@ -0,0 +1,48 @@
+#ifndef _ASM_ALPHA_TOPOLOGY_H
+#define _ASM_ALPHA_TOPOLOGY_H
+
+#include <linux/smp.h>
+#include <linux/threads.h>
+#include <asm/machvec.h>
+
+#ifdef CONFIG_NUMA
+static inline int cpu_to_node(int cpu)
+{
+	int node;
+	
+	if (!alpha_mv.cpuid_to_nid)
+		return 0;
+
+	node = alpha_mv.cpuid_to_nid(cpu);
+
+#ifdef DEBUG_NUMA
+	BUG_ON(node < 0);
+#endif
+
+	return node;
+}
+
+static inline cpumask_t node_to_cpumask(int node)
+{
+	cpumask_t node_cpu_mask = CPU_MASK_NONE;
+	int cpu;
+
+	for(cpu = 0; cpu < NR_CPUS; cpu++) {
+		if (cpu_online(cpu) && (cpu_to_node(cpu) == node))
+			cpu_set(cpu, node_cpu_mask);
+	}
+
+#ifdef DEBUG_NUMA
+	printk("node %d: cpu_mask: %016lx\n", node, node_cpu_mask);
+#endif
+
+	return node_cpu_mask;
+}
+
+#define pcibus_to_cpumask(bus)	(cpu_online_map)
+
+#else /* CONFIG_NUMA */
+# include <asm-generic/topology.h>
+#endif /* !CONFIG_NUMA */
+
+#endif /* _ASM_ALPHA_TOPOLOGY_H */
diff --git a/include/asm-alpha/types.h b/include/asm-alpha/types.h
new file mode 100644
index 0000000..43264d2
--- /dev/null
+++ b/include/asm-alpha/types.h
@@ -0,0 +1,63 @@
+#ifndef _ALPHA_TYPES_H
+#define _ALPHA_TYPES_H
+
+/*
+ * This file is never included by application software unless
+ * explicitly requested (e.g., via linux/types.h) in which case the
+ * application is Linux specific so (user-) name space pollution is
+ * not a major issue.  However, for interoperability, libraries still
+ * need to be careful to avoid a name clashes.
+ */
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned int umode_t;
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+typedef __signed__ long __s64;
+typedef unsigned long __u64;
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __KERNEL__
+
+#define BITS_PER_LONG 64
+
+#ifndef __ASSEMBLY__
+
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+typedef signed long s64;
+typedef unsigned long u64;
+
+typedef u64 dma_addr_t;
+typedef u64 dma64_addr_t;
+
+typedef unsigned short kmem_bufctl_t;
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ALPHA_TYPES_H */
diff --git a/include/asm-alpha/uaccess.h b/include/asm-alpha/uaccess.h
new file mode 100644
index 0000000..4c39ee7
--- /dev/null
+++ b/include/asm-alpha/uaccess.h
@@ -0,0 +1,517 @@
+#ifndef __ALPHA_UACCESS_H
+#define __ALPHA_UACCESS_H
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not.  If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * Or at least it did once upon a time.  Nowadays it is a mask that
+ * defines which bits of the address space are off limits.  This is a
+ * wee bit faster than the above.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ */
+
+#define KERNEL_DS	((mm_segment_t) { 0UL })
+#define USER_DS		((mm_segment_t) { -0x40000000000UL })
+
+#define VERIFY_READ	0
+#define VERIFY_WRITE	1
+
+#define get_fs()  (current_thread_info()->addr_limit)
+#define get_ds()  (KERNEL_DS)
+#define set_fs(x) (current_thread_info()->addr_limit = (x))
+
+#define segment_eq(a,b)	((a).seg == (b).seg)
+
+/*
+ * Is a address valid? This does a straightforward calculation rather
+ * than tests.
+ *
+ * Address valid if:
+ *  - "addr" doesn't have any high-bits set
+ *  - AND "size" doesn't have any high-bits set
+ *  - AND "addr+size" doesn't have any high-bits set
+ *  - OR we are in kernel mode.
+ */
+#define __access_ok(addr,size,segment) \
+	(((segment).seg & (addr | size | (addr+size))) == 0)
+
+#define access_ok(type,addr,size)				\
+({								\
+	__chk_user_ptr(addr);					\
+	__access_ok(((unsigned long)(addr)),(size),get_fs());	\
+})
+
+/* this function will go away soon - use access_ok() instead */
+extern inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
+{
+	return access_ok(type,addr,size) ? 0 : -EFAULT;
+}
+
+/*
+ * These are the main single-value transfer routines.  They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * As the alpha uses the same address space for kernel and user
+ * data, we can just do these as direct assignments.  (Of course, the
+ * exception handling means that it's no longer "just"...)
+ *
+ * Careful to not
+ * (a) re-use the arguments for side effects (sizeof/typeof is ok)
+ * (b) require any knowledge of processes at this stage
+ */
+#define put_user(x,ptr) \
+  __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs())
+#define get_user(x,ptr) \
+  __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs())
+
+/*
+ * The "__xxx" versions do not do address space checking, useful when
+ * doing multiple accesses to the same area (the programmer has to do the
+ * checks by hand with "access_ok()")
+ */
+#define __put_user(x,ptr) \
+  __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+#define __get_user(x,ptr) \
+  __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
+  
+/*
+ * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
+ * encode the bits we need for resolving the exception.  See the
+ * more extensive comments with fixup_inline_exception below for
+ * more information.
+ */
+
+extern void __get_user_unknown(void);
+
+#define __get_user_nocheck(x,ptr,size)				\
+({								\
+	long __gu_err = 0;					\
+	unsigned long __gu_val;					\
+	__chk_user_ptr(ptr);					\
+	switch (size) {						\
+	  case 1: __get_user_8(ptr); break;			\
+	  case 2: __get_user_16(ptr); break;			\
+	  case 4: __get_user_32(ptr); break;			\
+	  case 8: __get_user_64(ptr); break;			\
+	  default: __get_user_unknown(); break;			\
+	}							\
+	(x) = (__typeof__(*(ptr))) __gu_val;			\
+	__gu_err;						\
+})
+
+#define __get_user_check(x,ptr,size,segment)				\
+({									\
+	long __gu_err = -EFAULT;					\
+	unsigned long __gu_val = 0;					\
+	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
+	if (__access_ok((unsigned long)__gu_addr,size,segment)) {	\
+		__gu_err = 0;						\
+		switch (size) {						\
+		  case 1: __get_user_8(__gu_addr); break;		\
+		  case 2: __get_user_16(__gu_addr); break;		\
+		  case 4: __get_user_32(__gu_addr); break;		\
+		  case 8: __get_user_64(__gu_addr); break;		\
+		  default: __get_user_unknown(); break;			\
+		}							\
+	}								\
+	(x) = (__typeof__(*(ptr))) __gu_val;				\
+	__gu_err;							\
+})
+
+struct __large_struct { unsigned long buf[100]; };
+#define __m(x) (*(struct __large_struct __user *)(x))
+
+#define __get_user_64(addr)				\
+	__asm__("1: ldq %0,%2\n"			\
+	"2:\n"						\
+	".section __ex_table,\"a\"\n"			\
+	"	.long 1b - .\n"				\
+	"	lda %0, 2b-1b(%1)\n"			\
+	".previous"					\
+		: "=r"(__gu_val), "=r"(__gu_err)	\
+		: "m"(__m(addr)), "1"(__gu_err))
+
+#define __get_user_32(addr)				\
+	__asm__("1: ldl %0,%2\n"			\
+	"2:\n"						\
+	".section __ex_table,\"a\"\n"			\
+	"	.long 1b - .\n"				\
+	"	lda %0, 2b-1b(%1)\n"			\
+	".previous"					\
+		: "=r"(__gu_val), "=r"(__gu_err)	\
+		: "m"(__m(addr)), "1"(__gu_err))
+
+#ifdef __alpha_bwx__
+/* Those lucky bastards with ev56 and later CPUs can do byte/word moves.  */
+
+#define __get_user_16(addr)				\
+	__asm__("1: ldwu %0,%2\n"			\
+	"2:\n"						\
+	".section __ex_table,\"a\"\n"			\
+	"	.long 1b - .\n"				\
+	"	lda %0, 2b-1b(%1)\n"			\
+	".previous"					\
+		: "=r"(__gu_val), "=r"(__gu_err)	\
+		: "m"(__m(addr)), "1"(__gu_err))
+
+#define __get_user_8(addr)				\
+	__asm__("1: ldbu %0,%2\n"			\
+	"2:\n"						\
+	".section __ex_table,\"a\"\n"			\
+	"	.long 1b - .\n"				\
+	"	lda %0, 2b-1b(%1)\n"			\
+	".previous"					\
+		: "=r"(__gu_val), "=r"(__gu_err)	\
+		: "m"(__m(addr)), "1"(__gu_err))
+#else
+/* Unfortunately, we can't get an unaligned access trap for the sub-word
+   load, so we have to do a general unaligned operation.  */
+
+#define __get_user_16(addr)						\
+{									\
+	long __gu_tmp;							\
+	__asm__("1: ldq_u %0,0(%3)\n"					\
+	"2:	ldq_u %1,1(%3)\n"					\
+	"	extwl %0,%3,%0\n"					\
+	"	extwh %1,%3,%1\n"					\
+	"	or %0,%1,%0\n"						\
+	"3:\n"								\
+	".section __ex_table,\"a\"\n"					\
+	"	.long 1b - .\n"						\
+	"	lda %0, 3b-1b(%2)\n"					\
+	"	.long 2b - .\n"						\
+	"	lda %0, 3b-2b(%2)\n"					\
+	".previous"							\
+		: "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err)	\
+		: "r"(addr), "2"(__gu_err));				\
+}
+
+#define __get_user_8(addr)						\
+	__asm__("1: ldq_u %0,0(%2)\n"					\
+	"	extbl %0,%2,%0\n"					\
+	"2:\n"								\
+	".section __ex_table,\"a\"\n"					\
+	"	.long 1b - .\n"						\
+	"	lda %0, 2b-1b(%1)\n"					\
+	".previous"							\
+		: "=&r"(__gu_val), "=r"(__gu_err)			\
+		: "r"(addr), "1"(__gu_err))
+#endif
+
+extern void __put_user_unknown(void);
+
+#define __put_user_nocheck(x,ptr,size)				\
+({								\
+	long __pu_err = 0;					\
+	__chk_user_ptr(ptr);					\
+	switch (size) {						\
+	  case 1: __put_user_8(x,ptr); break;			\
+	  case 2: __put_user_16(x,ptr); break;			\
+	  case 4: __put_user_32(x,ptr); break;			\
+	  case 8: __put_user_64(x,ptr); break;			\
+	  default: __put_user_unknown(); break;			\
+	}							\
+	__pu_err;						\
+})
+
+#define __put_user_check(x,ptr,size,segment)				\
+({									\
+	long __pu_err = -EFAULT;					\
+	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
+	if (__access_ok((unsigned long)__pu_addr,size,segment)) {	\
+		__pu_err = 0;						\
+		switch (size) {						\
+		  case 1: __put_user_8(x,__pu_addr); break;		\
+		  case 2: __put_user_16(x,__pu_addr); break;		\
+		  case 4: __put_user_32(x,__pu_addr); break;		\
+		  case 8: __put_user_64(x,__pu_addr); break;		\
+		  default: __put_user_unknown(); break;			\
+		}							\
+	}								\
+	__pu_err;							\
+})
+
+/*
+ * The "__put_user_xx()" macros tell gcc they read from memory
+ * instead of writing: this is because they do not write to
+ * any memory gcc knows about, so there are no aliasing issues
+ */
+#define __put_user_64(x,addr)					\
+__asm__ __volatile__("1: stq %r2,%1\n"				\
+	"2:\n"							\
+	".section __ex_table,\"a\"\n"				\
+	"	.long 1b - .\n"					\
+	"	lda $31,2b-1b(%0)\n"				\
+	".previous"						\
+		: "=r"(__pu_err)				\
+		: "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
+
+#define __put_user_32(x,addr)					\
+__asm__ __volatile__("1: stl %r2,%1\n"				\
+	"2:\n"							\
+	".section __ex_table,\"a\"\n"				\
+	"	.long 1b - .\n"					\
+	"	lda $31,2b-1b(%0)\n"				\
+	".previous"						\
+		: "=r"(__pu_err)				\
+		: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
+
+#ifdef __alpha_bwx__
+/* Those lucky bastards with ev56 and later CPUs can do byte/word moves.  */
+
+#define __put_user_16(x,addr)					\
+__asm__ __volatile__("1: stw %r2,%1\n"				\
+	"2:\n"							\
+	".section __ex_table,\"a\"\n"				\
+	"	.long 1b - .\n"					\
+	"	lda $31,2b-1b(%0)\n"				\
+	".previous"						\
+		: "=r"(__pu_err)				\
+		: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
+
+#define __put_user_8(x,addr)					\
+__asm__ __volatile__("1: stb %r2,%1\n"				\
+	"2:\n"							\
+	".section __ex_table,\"a\"\n"				\
+	"	.long 1b - .\n"					\
+	"	lda $31,2b-1b(%0)\n"				\
+	".previous"						\
+		: "=r"(__pu_err)				\
+		: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
+#else
+/* Unfortunately, we can't get an unaligned access trap for the sub-word
+   write, so we have to do a general unaligned operation.  */
+
+#define __put_user_16(x,addr)					\
+{								\
+	long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4;	\
+	__asm__ __volatile__(					\
+	"1:	ldq_u %2,1(%5)\n"				\
+	"2:	ldq_u %1,0(%5)\n"				\
+	"	inswh %6,%5,%4\n"				\
+	"	inswl %6,%5,%3\n"				\
+	"	mskwh %2,%5,%2\n"				\
+	"	mskwl %1,%5,%1\n"				\
+	"	or %2,%4,%2\n"					\
+	"	or %1,%3,%1\n"					\
+	"3:	stq_u %2,1(%5)\n"				\
+	"4:	stq_u %1,0(%5)\n"				\
+	"5:\n"							\
+	".section __ex_table,\"a\"\n"				\
+	"	.long 1b - .\n"					\
+	"	lda $31, 5b-1b(%0)\n"				\
+	"	.long 2b - .\n"					\
+	"	lda $31, 5b-2b(%0)\n"				\
+	"	.long 3b - .\n"					\
+	"	lda $31, 5b-3b(%0)\n"				\
+	"	.long 4b - .\n"					\
+	"	lda $31, 5b-4b(%0)\n"				\
+	".previous"						\
+		: "=r"(__pu_err), "=&r"(__pu_tmp1),		\
+		  "=&r"(__pu_tmp2), "=&r"(__pu_tmp3),		\
+		  "=&r"(__pu_tmp4)				\
+		: "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
+}
+
+#define __put_user_8(x,addr)					\
+{								\
+	long __pu_tmp1, __pu_tmp2;				\
+	__asm__ __volatile__(					\
+	"1:	ldq_u %1,0(%4)\n"				\
+	"	insbl %3,%4,%2\n"				\
+	"	mskbl %1,%4,%1\n"				\
+	"	or %1,%2,%1\n"					\
+	"2:	stq_u %1,0(%4)\n"				\
+	"3:\n"							\
+	".section __ex_table,\"a\"\n"				\
+	"	.long 1b - .\n"					\
+	"	lda $31, 3b-1b(%0)\n"				\
+	"	.long 2b - .\n"					\
+	"	lda $31, 3b-2b(%0)\n"				\
+	".previous"						\
+		: "=r"(__pu_err),				\
+	  	  "=&r"(__pu_tmp1), "=&r"(__pu_tmp2)		\
+		: "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
+}
+#endif
+
+
+/*
+ * Complex access routines
+ */
+
+/* This little bit of silliness is to get the GP loaded for a function
+   that ordinarily wouldn't.  Otherwise we could have it done by the macro
+   directly, which can be optimized the linker.  */
+#ifdef MODULE
+#define __module_address(sym)		"r"(sym),
+#define __module_call(ra, arg, sym)	"jsr $" #ra ",(%" #arg ")," #sym
+#else
+#define __module_address(sym)
+#define __module_call(ra, arg, sym)	"bsr $" #ra "," #sym " !samegp"
+#endif
+
+extern void __copy_user(void);
+
+extern inline long
+__copy_tofrom_user_nocheck(void *to, const void *from, long len)
+{
+	register void * __cu_to __asm__("$6") = to;
+	register const void * __cu_from __asm__("$7") = from;
+	register long __cu_len __asm__("$0") = len;
+
+	__asm__ __volatile__(
+		__module_call(28, 3, __copy_user)
+		: "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to)
+		: __module_address(__copy_user)
+		  "0" (__cu_len), "1" (__cu_from), "2" (__cu_to)
+		: "$1","$2","$3","$4","$5","$28","memory");
+
+	return __cu_len;
+}
+
+extern inline long
+__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
+{
+	if (__access_ok((unsigned long)validate, len, get_fs()))
+		len = __copy_tofrom_user_nocheck(to, from, len);
+	return len;
+}
+
+#define __copy_to_user(to,from,n)					\
+({									\
+	__chk_user_ptr(to);						\
+	__copy_tofrom_user_nocheck((__force void *)(to),(from),(n));	\
+})
+#define __copy_from_user(to,from,n)					\
+({									\
+	__chk_user_ptr(from);						\
+	__copy_tofrom_user_nocheck((to),(__force void *)(from),(n));	\
+})
+
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
+
+
+extern inline long
+copy_to_user(void __user *to, const void *from, long n)
+{
+	return __copy_tofrom_user((__force void *)to, from, n, to);
+}
+
+extern inline long
+copy_from_user(void *to, const void __user *from, long n)
+{
+	return __copy_tofrom_user(to, (__force void *)from, n, from);
+}
+
+extern void __do_clear_user(void);
+
+extern inline long
+__clear_user(void __user *to, long len)
+{
+	register void __user * __cl_to __asm__("$6") = to;
+	register long __cl_len __asm__("$0") = len;
+	__asm__ __volatile__(
+		__module_call(28, 2, __do_clear_user)
+		: "=r"(__cl_len), "=r"(__cl_to)
+		: __module_address(__do_clear_user)
+		  "0"(__cl_len), "1"(__cl_to)
+		: "$1","$2","$3","$4","$5","$28","memory");
+	return __cl_len;
+}
+
+extern inline long
+clear_user(void __user *to, long len)
+{
+	if (__access_ok((unsigned long)to, len, get_fs()))
+		len = __clear_user(to, len);
+	return len;
+}
+
+#undef __module_address
+#undef __module_call
+
+/* Returns: -EFAULT if exception before terminator, N if the entire
+   buffer filled, else strlen.  */
+
+extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len);
+
+extern inline long
+strncpy_from_user(char *to, const char __user *from, long n)
+{
+	long ret = -EFAULT;
+	if (__access_ok((unsigned long)from, 0, get_fs()))
+		ret = __strncpy_from_user(to, from, n);
+	return ret;
+}
+
+/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
+extern long __strlen_user(const char __user *);
+
+extern inline long strlen_user(const char __user *str)
+{
+	return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0;
+}
+
+/* Returns: 0 if exception before NUL or reaching the supplied limit (N),
+ * a value greater than N if the limit would be exceeded, else strlen.  */
+extern long __strnlen_user(const char __user *, long);
+
+extern inline long strnlen_user(const char __user *str, long n)
+{
+	return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0;
+}
+
+/*
+ * About the exception table:
+ *
+ * - insn is a 32-bit pc-relative offset from the faulting insn.
+ * - nextinsn is a 16-bit offset off of the faulting instruction
+ *   (not off of the *next* instruction as branches are).
+ * - errreg is the register in which to place -EFAULT.
+ * - valreg is the final target register for the load sequence
+ *   and will be zeroed.
+ *
+ * Either errreg or valreg may be $31, in which case nothing happens.
+ *
+ * The exception fixup information "just so happens" to be arranged
+ * as in a MEM format instruction.  This lets us emit our three
+ * values like so:
+ *
+ *      lda valreg, nextinsn(errreg)
+ *
+ */
+
+struct exception_table_entry
+{
+	signed int insn;
+	union exception_fixup {
+		unsigned unit;
+		struct {
+			signed int nextinsn : 16;
+			unsigned int errreg : 5;
+			unsigned int valreg : 5;
+		} bits;
+	} fixup;
+};
+
+/* Returns the new pc */
+#define fixup_exception(map_reg, fixup, pc)			\
+({								\
+	if ((fixup)->fixup.bits.valreg != 31)			\
+		map_reg((fixup)->fixup.bits.valreg) = 0;	\
+	if ((fixup)->fixup.bits.errreg != 31)			\
+		map_reg((fixup)->fixup.bits.errreg) = -EFAULT;	\
+	(pc) + (fixup)->fixup.bits.nextinsn;			\
+})
+
+
+#endif /* __ALPHA_UACCESS_H */
diff --git a/include/asm-alpha/ucontext.h b/include/asm-alpha/ucontext.h
new file mode 100644
index 0000000..47578ab
--- /dev/null
+++ b/include/asm-alpha/ucontext.h
@@ -0,0 +1,13 @@
+#ifndef _ASMAXP_UCONTEXT_H
+#define _ASMAXP_UCONTEXT_H
+
+struct ucontext {
+	unsigned long	  uc_flags;
+	struct ucontext  *uc_link;
+	old_sigset_t	  uc_osf_sigmask;
+	stack_t		  uc_stack;
+	struct sigcontext uc_mcontext;
+	sigset_t	  uc_sigmask;	/* mask last for extensibility */
+};
+
+#endif /* !_ASMAXP_UCONTEXT_H */
diff --git a/include/asm-alpha/unaligned.h b/include/asm-alpha/unaligned.h
new file mode 100644
index 0000000..a1d7284
--- /dev/null
+++ b/include/asm-alpha/unaligned.h
@@ -0,0 +1,6 @@
+#ifndef __ALPHA_UNALIGNED_H
+#define __ALPHA_UNALIGNED_H
+
+#include <asm-generic/unaligned.h>
+
+#endif
diff --git a/include/asm-alpha/unistd.h b/include/asm-alpha/unistd.h
new file mode 100644
index 0000000..c4e70e8
--- /dev/null
+++ b/include/asm-alpha/unistd.h
@@ -0,0 +1,656 @@
+#ifndef _ALPHA_UNISTD_H
+#define _ALPHA_UNISTD_H
+
+#define __NR_osf_syscall	  0	/* not implemented */
+#define __NR_exit		  1
+#define __NR_fork		  2
+#define __NR_read		  3
+#define __NR_write		  4
+#define __NR_osf_old_open	  5	/* not implemented */
+#define __NR_close		  6
+#define __NR_osf_wait4		  7
+#define __NR_osf_old_creat	  8	/* not implemented */
+#define __NR_link		  9
+#define __NR_unlink		 10
+#define __NR_osf_execve		 11	/* not implemented */
+#define __NR_chdir		 12
+#define __NR_fchdir		 13
+#define __NR_mknod		 14
+#define __NR_chmod		 15
+#define __NR_chown		 16
+#define __NR_brk		 17
+#define __NR_osf_getfsstat	 18	/* not implemented */
+#define __NR_lseek		 19
+#define __NR_getxpid		 20
+#define __NR_osf_mount		 21
+#define __NR_umount		 22
+#define __NR_setuid		 23
+#define __NR_getxuid		 24
+#define __NR_exec_with_loader	 25	/* not implemented */
+#define __NR_ptrace		 26
+#define __NR_osf_nrecvmsg	 27	/* not implemented */
+#define __NR_osf_nsendmsg	 28	/* not implemented */
+#define __NR_osf_nrecvfrom	 29	/* not implemented */
+#define __NR_osf_naccept	 30	/* not implemented */
+#define __NR_osf_ngetpeername	 31	/* not implemented */
+#define __NR_osf_ngetsockname	 32	/* not implemented */
+#define __NR_access		 33
+#define __NR_osf_chflags	 34	/* not implemented */
+#define __NR_osf_fchflags	 35	/* not implemented */
+#define __NR_sync		 36
+#define __NR_kill		 37
+#define __NR_osf_old_stat	 38	/* not implemented */
+#define __NR_setpgid		 39
+#define __NR_osf_old_lstat	 40	/* not implemented */
+#define __NR_dup		 41
+#define __NR_pipe		 42
+#define __NR_osf_set_program_attributes	43
+#define __NR_osf_profil		 44	/* not implemented */
+#define __NR_open		 45
+#define __NR_osf_old_sigaction	 46	/* not implemented */
+#define __NR_getxgid		 47
+#define __NR_osf_sigprocmask	 48
+#define __NR_osf_getlogin	 49	/* not implemented */
+#define __NR_osf_setlogin	 50	/* not implemented */
+#define __NR_acct		 51
+#define __NR_sigpending		 52
+
+#define __NR_ioctl		 54
+#define __NR_osf_reboot		 55	/* not implemented */
+#define __NR_osf_revoke		 56	/* not implemented */
+#define __NR_symlink		 57
+#define __NR_readlink		 58
+#define __NR_execve		 59
+#define __NR_umask		 60
+#define __NR_chroot		 61
+#define __NR_osf_old_fstat	 62	/* not implemented */
+#define __NR_getpgrp		 63
+#define __NR_getpagesize	 64
+#define __NR_osf_mremap		 65	/* not implemented */
+#define __NR_vfork		 66
+#define __NR_stat		 67
+#define __NR_lstat		 68
+#define __NR_osf_sbrk		 69	/* not implemented */
+#define __NR_osf_sstk		 70	/* not implemented */
+#define __NR_mmap		 71	/* OSF/1 mmap is superset of Linux */
+#define __NR_osf_old_vadvise	 72	/* not implemented */
+#define __NR_munmap		 73
+#define __NR_mprotect		 74
+#define __NR_madvise		 75
+#define __NR_vhangup		 76
+#define __NR_osf_kmodcall	 77	/* not implemented */
+#define __NR_osf_mincore	 78	/* not implemented */
+#define __NR_getgroups		 79
+#define __NR_setgroups		 80
+#define __NR_osf_old_getpgrp	 81	/* not implemented */
+#define __NR_setpgrp		 82	/* BSD alias for setpgid */
+#define __NR_osf_setitimer	 83
+#define __NR_osf_old_wait	 84	/* not implemented */
+#define __NR_osf_table		 85	/* not implemented */
+#define __NR_osf_getitimer	 86
+#define __NR_gethostname	 87
+#define __NR_sethostname	 88
+#define __NR_getdtablesize	 89
+#define __NR_dup2		 90
+#define __NR_fstat		 91
+#define __NR_fcntl		 92
+#define __NR_osf_select		 93
+#define __NR_poll		 94
+#define __NR_fsync		 95
+#define __NR_setpriority	 96
+#define __NR_socket		 97
+#define __NR_connect		 98
+#define __NR_accept		 99
+#define __NR_getpriority	100
+#define __NR_send		101
+#define __NR_recv		102
+#define __NR_sigreturn		103
+#define __NR_bind		104
+#define __NR_setsockopt		105
+#define __NR_listen		106
+#define __NR_osf_plock		107	/* not implemented */
+#define __NR_osf_old_sigvec	108	/* not implemented */
+#define __NR_osf_old_sigblock	109	/* not implemented */
+#define __NR_osf_old_sigsetmask	110	/* not implemented */
+#define __NR_sigsuspend		111
+#define __NR_osf_sigstack	112
+#define __NR_recvmsg		113
+#define __NR_sendmsg		114
+#define __NR_osf_old_vtrace	115	/* not implemented */
+#define __NR_osf_gettimeofday	116
+#define __NR_osf_getrusage	117
+#define __NR_getsockopt		118
+
+#define __NR_readv		120
+#define __NR_writev		121
+#define __NR_osf_settimeofday	122
+#define __NR_fchown		123
+#define __NR_fchmod		124
+#define __NR_recvfrom		125
+#define __NR_setreuid		126
+#define __NR_setregid		127
+#define __NR_rename		128
+#define __NR_truncate		129
+#define __NR_ftruncate		130
+#define __NR_flock		131
+#define __NR_setgid		132
+#define __NR_sendto		133
+#define __NR_shutdown		134
+#define __NR_socketpair		135
+#define __NR_mkdir		136
+#define __NR_rmdir		137
+#define __NR_osf_utimes		138
+#define __NR_osf_old_sigreturn	139	/* not implemented */
+#define __NR_osf_adjtime	140	/* not implemented */
+#define __NR_getpeername	141
+#define __NR_osf_gethostid	142	/* not implemented */
+#define __NR_osf_sethostid	143	/* not implemented */
+#define __NR_getrlimit		144
+#define __NR_setrlimit		145
+#define __NR_osf_old_killpg	146	/* not implemented */
+#define __NR_setsid		147
+#define __NR_quotactl		148
+#define __NR_osf_oldquota	149	/* not implemented */
+#define __NR_getsockname	150
+
+#define __NR_osf_pid_block	153	/* not implemented */
+#define __NR_osf_pid_unblock	154	/* not implemented */
+
+#define __NR_sigaction		156
+#define __NR_osf_sigwaitprim	157	/* not implemented */
+#define __NR_osf_nfssvc		158	/* not implemented */
+#define __NR_osf_getdirentries	159
+#define __NR_osf_statfs		160
+#define __NR_osf_fstatfs	161
+
+#define __NR_osf_asynch_daemon	163	/* not implemented */
+#define __NR_osf_getfh		164	/* not implemented */	
+#define __NR_osf_getdomainname	165
+#define __NR_setdomainname	166
+
+#define __NR_osf_exportfs	169	/* not implemented */
+
+#define __NR_osf_alt_plock	181	/* not implemented */
+
+#define __NR_osf_getmnt		184	/* not implemented */
+
+#define __NR_osf_alt_sigpending	187	/* not implemented */
+#define __NR_osf_alt_setsid	188	/* not implemented */
+
+#define __NR_osf_swapon		199
+#define __NR_msgctl		200
+#define __NR_msgget		201
+#define __NR_msgrcv		202
+#define __NR_msgsnd		203
+#define __NR_semctl		204
+#define __NR_semget		205
+#define __NR_semop		206
+#define __NR_osf_utsname	207
+#define __NR_lchown		208
+#define __NR_osf_shmat		209
+#define __NR_shmctl		210
+#define __NR_shmdt		211
+#define __NR_shmget		212
+#define __NR_osf_mvalid		213	/* not implemented */
+#define __NR_osf_getaddressconf	214	/* not implemented */
+#define __NR_osf_msleep		215	/* not implemented */
+#define __NR_osf_mwakeup	216	/* not implemented */
+#define __NR_msync		217
+#define __NR_osf_signal		218	/* not implemented */
+#define __NR_osf_utc_gettime	219	/* not implemented */
+#define __NR_osf_utc_adjtime	220	/* not implemented */
+
+#define __NR_osf_security	222	/* not implemented */
+#define __NR_osf_kloadcall	223	/* not implemented */
+
+#define __NR_getpgid		233
+#define __NR_getsid		234
+#define __NR_sigaltstack	235
+#define __NR_osf_waitid		236	/* not implemented */
+#define __NR_osf_priocntlset	237	/* not implemented */
+#define __NR_osf_sigsendset	238	/* not implemented */
+#define __NR_osf_set_speculative	239	/* not implemented */
+#define __NR_osf_msfs_syscall	240	/* not implemented */
+#define __NR_osf_sysinfo	241
+#define __NR_osf_uadmin		242	/* not implemented */
+#define __NR_osf_fuser		243	/* not implemented */
+#define __NR_osf_proplist_syscall    244
+#define __NR_osf_ntp_adjtime	245	/* not implemented */
+#define __NR_osf_ntp_gettime	246	/* not implemented */
+#define __NR_osf_pathconf	247	/* not implemented */
+#define __NR_osf_fpathconf	248	/* not implemented */
+
+#define __NR_osf_uswitch	250	/* not implemented */
+#define __NR_osf_usleep_thread	251
+#define __NR_osf_audcntl	252	/* not implemented */
+#define __NR_osf_audgen		253	/* not implemented */
+#define __NR_sysfs		254
+#define __NR_osf_subsys_info	255	/* not implemented */
+#define __NR_osf_getsysinfo	256
+#define __NR_osf_setsysinfo	257
+#define __NR_osf_afs_syscall	258	/* not implemented */
+#define __NR_osf_swapctl	259	/* not implemented */
+#define __NR_osf_memcntl	260	/* not implemented */
+#define __NR_osf_fdatasync	261	/* not implemented */
+
+
+/*
+ * Linux-specific system calls begin at 300
+ */
+#define __NR_bdflush		300
+#define __NR_sethae		301
+#define __NR_mount		302
+#define __NR_old_adjtimex	303
+#define __NR_swapoff		304
+#define __NR_getdents		305
+#define __NR_create_module	306
+#define __NR_init_module	307
+#define __NR_delete_module	308
+#define __NR_get_kernel_syms	309
+#define __NR_syslog		310
+#define __NR_reboot		311
+#define __NR_clone		312
+#define __NR_uselib		313
+#define __NR_mlock		314
+#define __NR_munlock		315
+#define __NR_mlockall		316
+#define __NR_munlockall		317
+#define __NR_sysinfo		318
+#define __NR__sysctl		319
+/* 320 was sys_idle.  */
+#define __NR_oldumount		321
+#define __NR_swapon		322
+#define __NR_times		323
+#define __NR_personality	324
+#define __NR_setfsuid		325
+#define __NR_setfsgid		326
+#define __NR_ustat		327
+#define __NR_statfs		328
+#define __NR_fstatfs		329
+#define __NR_sched_setparam		330
+#define __NR_sched_getparam		331
+#define __NR_sched_setscheduler		332
+#define __NR_sched_getscheduler		333
+#define __NR_sched_yield		334
+#define __NR_sched_get_priority_max	335
+#define __NR_sched_get_priority_min	336
+#define __NR_sched_rr_get_interval	337
+#define __NR_afs_syscall		338
+#define __NR_uname			339
+#define __NR_nanosleep			340
+#define __NR_mremap			341
+#define __NR_nfsservctl			342
+#define __NR_setresuid			343
+#define __NR_getresuid			344
+#define __NR_pciconfig_read		345
+#define __NR_pciconfig_write		346
+#define __NR_query_module		347
+#define __NR_prctl			348
+#define __NR_pread64			349
+#define __NR_pwrite64			350
+#define __NR_rt_sigreturn		351
+#define __NR_rt_sigaction		352
+#define __NR_rt_sigprocmask		353
+#define __NR_rt_sigpending		354
+#define __NR_rt_sigtimedwait		355
+#define __NR_rt_sigqueueinfo		356
+#define __NR_rt_sigsuspend		357
+#define __NR_select			358
+#define __NR_gettimeofday		359
+#define __NR_settimeofday		360
+#define __NR_getitimer			361
+#define __NR_setitimer			362
+#define __NR_utimes			363
+#define __NR_getrusage			364
+#define __NR_wait4			365
+#define __NR_adjtimex			366
+#define __NR_getcwd			367
+#define __NR_capget			368
+#define __NR_capset			369
+#define __NR_sendfile			370
+#define __NR_setresgid			371
+#define __NR_getresgid			372
+#define __NR_dipc			373
+#define __NR_pivot_root			374
+#define __NR_mincore			375
+#define __NR_pciconfig_iobase		376
+#define __NR_getdents64			377
+#define __NR_gettid			378
+#define __NR_readahead			379
+/* 380 is unused */
+#define __NR_tkill			381
+#define __NR_setxattr			382
+#define __NR_lsetxattr			383
+#define __NR_fsetxattr			384
+#define __NR_getxattr			385
+#define __NR_lgetxattr			386
+#define __NR_fgetxattr			387
+#define __NR_listxattr			388
+#define __NR_llistxattr			389
+#define __NR_flistxattr			390
+#define __NR_removexattr		391
+#define __NR_lremovexattr		392
+#define __NR_fremovexattr		393
+#define __NR_futex			394
+#define __NR_sched_setaffinity		395     
+#define __NR_sched_getaffinity		396
+#define __NR_tuxcall			397
+#define __NR_io_setup			398
+#define __NR_io_destroy			399
+#define __NR_io_getevents		400
+#define __NR_io_submit			401
+#define __NR_io_cancel			402
+#define __NR_exit_group			405
+#define __NR_lookup_dcookie		406
+#define __NR_sys_epoll_create		407
+#define __NR_sys_epoll_ctl		408
+#define __NR_sys_epoll_wait		409
+#define __NR_remap_file_pages		410
+#define __NR_set_tid_address		411
+#define __NR_restart_syscall		412
+#define __NR_fadvise64			413
+#define __NR_timer_create		414
+#define __NR_timer_settime		415
+#define __NR_timer_gettime		416
+#define __NR_timer_getoverrun		417
+#define __NR_timer_delete		418
+#define __NR_clock_settime		419
+#define __NR_clock_gettime		420
+#define __NR_clock_getres		421
+#define __NR_clock_nanosleep		422
+#define __NR_semtimedop			423
+#define __NR_tgkill			424
+#define __NR_stat64			425
+#define __NR_lstat64			426
+#define __NR_fstat64			427
+#define __NR_vserver			428
+#define __NR_mbind			429
+#define __NR_get_mempolicy		430
+#define __NR_set_mempolicy		431
+#define __NR_mq_open			432
+#define __NR_mq_unlink			433
+#define __NR_mq_timedsend		434
+#define __NR_mq_timedreceive		435
+#define __NR_mq_notify			436
+#define __NR_mq_getsetattr		437
+#define __NR_waitid			438
+
+#define NR_SYSCALLS			439
+
+#if defined(__GNUC__)
+
+#define _syscall_return(type)						\
+	return (_sc_err ? errno = _sc_ret, _sc_ret = -1L : 0), (type) _sc_ret
+
+#define _syscall_clobbers						\
+	"$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8",			\
+	"$22", "$23", "$24", "$25", "$27", "$28" 			\
+
+#define _syscall0(type, name)						\
+type name(void)								\
+{									\
+	long _sc_ret, _sc_err;						\
+	{								\
+		register long _sc_0 __asm__("$0");			\
+		register long _sc_19 __asm__("$19");			\
+									\
+		_sc_0 = __NR_##name;					\
+		__asm__("callsys # %0 %1 %2"				\
+			: "=r"(_sc_0), "=r"(_sc_19)			\
+			: "0"(_sc_0)					\
+			: _syscall_clobbers);				\
+		_sc_ret = _sc_0, _sc_err = _sc_19;			\
+	}								\
+	_syscall_return(type);						\
+}
+
+#define _syscall1(type,name,type1,arg1)					\
+type name(type1 arg1)							\
+{									\
+	long _sc_ret, _sc_err;						\
+	{								\
+		register long _sc_0 __asm__("$0");			\
+		register long _sc_16 __asm__("$16");			\
+		register long _sc_19 __asm__("$19");			\
+									\
+		_sc_0 = __NR_##name;					\
+		_sc_16 = (long) (arg1);					\
+		__asm__("callsys # %0 %1 %2 %3"				\
+			: "=r"(_sc_0), "=r"(_sc_19)			\
+			: "0"(_sc_0), "r"(_sc_16)			\
+			: _syscall_clobbers);				\
+		_sc_ret = _sc_0, _sc_err = _sc_19;			\
+	}								\
+	_syscall_return(type);						\
+}
+
+#define _syscall2(type,name,type1,arg1,type2,arg2)			\
+type name(type1 arg1,type2 arg2)					\
+{									\
+	long _sc_ret, _sc_err;						\
+	{								\
+		register long _sc_0 __asm__("$0");			\
+		register long _sc_16 __asm__("$16");			\
+		register long _sc_17 __asm__("$17");			\
+		register long _sc_19 __asm__("$19");			\
+									\
+		_sc_0 = __NR_##name;					\
+		_sc_16 = (long) (arg1);					\
+		_sc_17 = (long) (arg2);					\
+		__asm__("callsys # %0 %1 %2 %3 %4"			\
+			: "=r"(_sc_0), "=r"(_sc_19)			\
+			: "0"(_sc_0), "r"(_sc_16), "r"(_sc_17)		\
+			: _syscall_clobbers);				\
+		_sc_ret = _sc_0, _sc_err = _sc_19;			\
+	}								\
+	_syscall_return(type);						\
+}
+
+#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)		\
+type name(type1 arg1,type2 arg2,type3 arg3)				\
+{									\
+	long _sc_ret, _sc_err;						\
+	{								\
+		register long _sc_0 __asm__("$0");			\
+		register long _sc_16 __asm__("$16");			\
+		register long _sc_17 __asm__("$17");			\
+		register long _sc_18 __asm__("$18");			\
+		register long _sc_19 __asm__("$19");			\
+									\
+		_sc_0 = __NR_##name;					\
+		_sc_16 = (long) (arg1);					\
+		_sc_17 = (long) (arg2);					\
+		_sc_18 = (long) (arg3);					\
+		__asm__("callsys # %0 %1 %2 %3 %4 %5"			\
+			: "=r"(_sc_0), "=r"(_sc_19)			\
+			: "0"(_sc_0), "r"(_sc_16), "r"(_sc_17),		\
+			  "r"(_sc_18)					\
+			: _syscall_clobbers);				\
+		_sc_ret = _sc_0, _sc_err = _sc_19;			\
+	}								\
+	_syscall_return(type);						\
+}
+
+#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
+type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4)		 \
+{									 \
+	long _sc_ret, _sc_err;						\
+	{								\
+		register long _sc_0 __asm__("$0");			\
+		register long _sc_16 __asm__("$16");			\
+		register long _sc_17 __asm__("$17");			\
+		register long _sc_18 __asm__("$18");			\
+		register long _sc_19 __asm__("$19");			\
+									\
+		_sc_0 = __NR_##name;					\
+		_sc_16 = (long) (arg1);					\
+		_sc_17 = (long) (arg2);					\
+		_sc_18 = (long) (arg3);					\
+		_sc_19 = (long) (arg4);					\
+		__asm__("callsys # %0 %1 %2 %3 %4 %5 %6"		\
+			: "=r"(_sc_0), "=r"(_sc_19)			\
+			: "0"(_sc_0), "r"(_sc_16), "r"(_sc_17),		\
+			  "r"(_sc_18), "1"(_sc_19)			\
+			: _syscall_clobbers);				\
+		_sc_ret = _sc_0, _sc_err = _sc_19;			\
+	}								\
+	_syscall_return(type);						\
+} 
+
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
+	  type5,arg5)							 \
+type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
+{									\
+	long _sc_ret, _sc_err;						\
+	{								\
+		register long _sc_0 __asm__("$0");			\
+		register long _sc_16 __asm__("$16");			\
+		register long _sc_17 __asm__("$17");			\
+		register long _sc_18 __asm__("$18");			\
+		register long _sc_19 __asm__("$19");			\
+		register long _sc_20 __asm__("$20");			\
+									\
+		_sc_0 = __NR_##name;					\
+		_sc_16 = (long) (arg1);					\
+		_sc_17 = (long) (arg2);					\
+		_sc_18 = (long) (arg3);					\
+		_sc_19 = (long) (arg4);					\
+		_sc_20 = (long) (arg5);					\
+		__asm__("callsys # %0 %1 %2 %3 %4 %5 %6 %7"		\
+			: "=r"(_sc_0), "=r"(_sc_19)			\
+			: "0"(_sc_0), "r"(_sc_16), "r"(_sc_17),		\
+			  "r"(_sc_18), "1"(_sc_19), "r"(_sc_20)		\
+			: _syscall_clobbers);				\
+		_sc_ret = _sc_0, _sc_err = _sc_19;			\
+	}								\
+	_syscall_return(type);						\
+}
+
+#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
+	  type5,arg5,type6,arg6)					 \
+type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, type6 arg6)\
+{									\
+	long _sc_ret, _sc_err;						\
+	{								\
+		register long _sc_0 __asm__("$0");			\
+		register long _sc_16 __asm__("$16");			\
+		register long _sc_17 __asm__("$17");			\
+		register long _sc_18 __asm__("$18");			\
+		register long _sc_19 __asm__("$19");			\
+		register long _sc_20 __asm__("$20");			\
+		register long _sc_21 __asm__("$21");			\
+									\
+		_sc_0 = __NR_##name;					\
+		_sc_16 = (long) (arg1);					\
+		_sc_17 = (long) (arg2);					\
+		_sc_18 = (long) (arg3);					\
+		_sc_19 = (long) (arg4);					\
+		_sc_20 = (long) (arg5);					\
+		_sc_21 = (long) (arg6);					\
+		__asm__("callsys # %0 %1 %2 %3 %4 %5 %6 %7 %8"		\
+			: "=r"(_sc_0), "=r"(_sc_19)			\
+			: "0"(_sc_0), "r"(_sc_16), "r"(_sc_17),		\
+			  "r"(_sc_18), "1"(_sc_19), "r"(_sc_20), "r"(_sc_21) \
+			: _syscall_clobbers);				\
+		_sc_ret = _sc_0, _sc_err = _sc_19;			\
+	}								\
+	_syscall_return(type);						\
+}
+
+#endif /* __LIBRARY__ && __GNUC__ */
+
+#ifdef __KERNEL__
+#define __ARCH_WANT_IPC_PARSE_VERSION
+#define __ARCH_WANT_OLD_READDIR
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_SOCKETCALL
+#define __ARCH_WANT_SYS_FADVISE64
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_OLD_GETRLIMIT
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __ARCH_WANT_SYS_SIGPENDING
+#endif
+
+#ifdef __KERNEL_SYSCALLS__
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/signal.h>
+#include <linux/syscalls.h>
+#include <asm/ptrace.h>
+
+static inline long open(const char * name, int mode, int flags)
+{
+	return sys_open(name, mode, flags);
+}
+
+static inline long dup(int fd)
+{
+	return sys_dup(fd);
+}
+
+static inline long close(int fd)
+{
+	return sys_close(fd);
+}
+
+static inline off_t lseek(int fd, off_t off, int whence)
+{
+	return sys_lseek(fd, off, whence);
+}
+
+static inline void _exit(int value)
+{
+	sys_exit(value);
+}
+
+#define exit(x) _exit(x)
+
+static inline long write(int fd, const char * buf, size_t nr)
+{
+	return sys_write(fd, buf, nr);
+}
+
+static inline long read(int fd, char * buf, size_t nr)
+{
+	return sys_read(fd, buf, nr);
+}
+
+extern int execve(char *, char **, char **);
+
+static inline long setsid(void)
+{
+	return sys_setsid();
+}
+
+static inline pid_t waitpid(int pid, int * wait_stat, int flags)
+{
+	return sys_wait4(pid, wait_stat, flags, NULL);
+}
+
+asmlinkage int sys_execve(char *ufilename, char **argv, char **envp,
+			unsigned long a3, unsigned long a4, unsigned long a5,
+			struct pt_regs regs);
+asmlinkage long sys_rt_sigaction(int sig,
+				const struct sigaction __user *act,
+				struct sigaction __user *oact,
+				size_t sigsetsize,
+				void *restorer);
+
+#endif /* __KERNEL_SYSCALLS__ */
+
+/* "Conditional" syscalls.  What we want is
+
+	__attribute__((weak,alias("sys_ni_syscall")))
+
+   but that raises the problem of what type to give the symbol.  If we use
+   a prototype, it'll conflict with the definition given in this file and
+   others.  If we use __typeof, we discover that not all symbols actually
+   have declarations.  If we use no prototype, then we get warnings from
+   -Wstrict-prototypes.  Ho hum.  */
+
+#define cond_syscall(x)  asm(".weak\t" #x "\n" #x " = sys_ni_syscall")
+
+#endif /* _ALPHA_UNISTD_H */
diff --git a/include/asm-alpha/user.h b/include/asm-alpha/user.h
new file mode 100644
index 0000000..7e417fc
--- /dev/null
+++ b/include/asm-alpha/user.h
@@ -0,0 +1,53 @@
+#ifndef _ALPHA_USER_H
+#define _ALPHA_USER_H
+
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+
+#include <asm/page.h>
+#include <asm/reg.h>
+
+/*
+ * Core file format: The core file is written in such a way that gdb
+ * can understand it and provide useful information to the user (under
+ * linux we use the `trad-core' bfd, NOT the osf-core).  The file contents
+ * are as follows:
+ *
+ *  upage: 1 page consisting of a user struct that tells gdb
+ *	what is present in the file.  Directly after this is a
+ *	copy of the task_struct, which is currently not used by gdb,
+ *	but it may come in handy at some point.  All of the registers
+ *	are stored as part of the upage.  The upage should always be
+ *	only one page long.
+ *  data: The data segment follows next.  We use current->end_text to
+ *	current->brk to pick up all of the user variables, plus any memory
+ *	that may have been sbrk'ed.  No attempt is made to determine if a
+ *	page is demand-zero or if a page is totally unused, we just cover
+ *	the entire range.  All of the addresses are rounded in such a way
+ *	that an integral number of pages is written.
+ *  stack: We need the stack information in order to get a meaningful
+ *	backtrace.  We need to write the data from usp to
+ *	current->start_stack, so we round each of these in order to be able
+ *	to write an integer number of pages.
+ */
+struct user {
+	unsigned long	regs[EF_SIZE/8+32];	/* integer and fp regs */
+	size_t		u_tsize;		/* text size (pages) */
+	size_t		u_dsize;		/* data size (pages) */
+	size_t		u_ssize;		/* stack size (pages) */
+	unsigned long	start_code;		/* text starting address */
+	unsigned long	start_data;		/* data starting address */
+	unsigned long	start_stack;		/* stack starting address */
+	long int	signal;			/* signal causing core dump */
+	struct regs *	u_ar0;			/* help gdb find registers */
+	unsigned long	magic;			/* identifies a core file */
+	char		u_comm[32];		/* user command name */
+};
+
+#define NBPG			PAGE_SIZE
+#define UPAGES			1
+#define HOST_TEXT_START_ADDR	(u.start_code)
+#define HOST_DATA_START_ADDR	(u.start_data)
+#define HOST_STACK_END_ADDR	(u.start_stack + u.u_ssize * NBPG)
+
+#endif /* _ALPHA_USER_H */
diff --git a/include/asm-alpha/vga.h b/include/asm-alpha/vga.h
new file mode 100644
index 0000000..8ca4f6b
--- /dev/null
+++ b/include/asm-alpha/vga.h
@@ -0,0 +1,51 @@
+/*
+ *	Access to VGA videoram
+ *
+ *	(c) 1998 Martin Mares <mj@ucw.cz>
+ */
+
+#ifndef _LINUX_ASM_VGA_H_
+#define _LINUX_ASM_VGA_H_
+
+#include <asm/io.h>
+
+#define VT_BUF_HAVE_RW
+#define VT_BUF_HAVE_MEMSETW
+#define VT_BUF_HAVE_MEMCPYW
+
+extern inline void scr_writew(u16 val, volatile u16 *addr)
+{
+	if (__is_ioaddr(addr))
+		__raw_writew(val, (volatile u16 __iomem *) addr);
+	else
+		*addr = val;
+}
+
+extern inline u16 scr_readw(volatile const u16 *addr)
+{
+	if (__is_ioaddr(addr))
+		return __raw_readw((volatile const u16 __iomem *) addr);
+	else
+		return *addr;
+}
+
+extern inline void scr_memsetw(u16 *s, u16 c, unsigned int count)
+{
+	if (__is_ioaddr(s))
+		memsetw_io((u16 __iomem *) s, c, count);
+	else
+		memsetw(s, c, count);
+}
+
+/* Do not trust that the usage will be correct; analyze the arguments.  */
+extern void scr_memcpyw(u16 *d, const u16 *s, unsigned int count);
+
+/* ??? These are currently only used for downloading character sets.  As
+   such, they don't need memory barriers.  Is this all they are intended
+   to be used for?  */
+#define vga_readb(a)	readb((u8 __iomem *)(a))
+#define vga_writeb(v,a)	writeb(v, (u8 __iomem *)(a))
+
+#define VGA_MAP_MEM(x)	((unsigned long) ioremap(x, 0))
+
+#endif
diff --git a/include/asm-alpha/xor.h b/include/asm-alpha/xor.h
new file mode 100644
index 0000000..5ee1c2b
--- /dev/null
+++ b/include/asm-alpha/xor.h
@@ -0,0 +1,855 @@
+/*
+ * include/asm-alpha/xor.h
+ *
+ * Optimized RAID-5 checksumming functions for alpha EV5 and EV6
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+extern void xor_alpha_2(unsigned long, unsigned long *, unsigned long *);
+extern void xor_alpha_3(unsigned long, unsigned long *, unsigned long *,
+		        unsigned long *);
+extern void xor_alpha_4(unsigned long, unsigned long *, unsigned long *,
+		        unsigned long *, unsigned long *);
+extern void xor_alpha_5(unsigned long, unsigned long *, unsigned long *,
+		        unsigned long *, unsigned long *, unsigned long *);
+
+extern void xor_alpha_prefetch_2(unsigned long, unsigned long *,
+				 unsigned long *);
+extern void xor_alpha_prefetch_3(unsigned long, unsigned long *,
+				 unsigned long *, unsigned long *);
+extern void xor_alpha_prefetch_4(unsigned long, unsigned long *,
+				 unsigned long *, unsigned long *,
+				 unsigned long *);
+extern void xor_alpha_prefetch_5(unsigned long, unsigned long *,
+				 unsigned long *, unsigned long *,
+				 unsigned long *, unsigned long *);
+
+asm("								\n\
+	.text							\n\
+	.align 3						\n\
+	.ent xor_alpha_2					\n\
+xor_alpha_2:							\n\
+	.prologue 0						\n\
+	srl $16, 6, $16						\n\
+	.align 4						\n\
+2:								\n\
+	ldq $0,0($17)						\n\
+	ldq $1,0($18)						\n\
+	ldq $2,8($17)						\n\
+	ldq $3,8($18)						\n\
+								\n\
+	ldq $4,16($17)						\n\
+	ldq $5,16($18)						\n\
+	ldq $6,24($17)						\n\
+	ldq $7,24($18)						\n\
+								\n\
+	ldq $19,32($17)						\n\
+	ldq $20,32($18)						\n\
+	ldq $21,40($17)						\n\
+	ldq $22,40($18)						\n\
+								\n\
+	ldq $23,48($17)						\n\
+	ldq $24,48($18)						\n\
+	ldq $25,56($17)						\n\
+	xor $0,$1,$0		# 7 cycles from $1 load		\n\
+								\n\
+	ldq $27,56($18)						\n\
+	xor $2,$3,$2						\n\
+	stq $0,0($17)						\n\
+	xor $4,$5,$4						\n\
+								\n\
+	stq $2,8($17)						\n\
+	xor $6,$7,$6						\n\
+	stq $4,16($17)						\n\
+	xor $19,$20,$19						\n\
+								\n\
+	stq $6,24($17)						\n\
+	xor $21,$22,$21						\n\
+	stq $19,32($17)						\n\
+	xor $23,$24,$23						\n\
+								\n\
+	stq $21,40($17)						\n\
+	xor $25,$27,$25						\n\
+	stq $23,48($17)						\n\
+	subq $16,1,$16						\n\
+								\n\
+	stq $25,56($17)						\n\
+	addq $17,64,$17						\n\
+	addq $18,64,$18						\n\
+	bgt $16,2b						\n\
+								\n\
+	ret							\n\
+	.end xor_alpha_2					\n\
+								\n\
+	.align 3						\n\
+	.ent xor_alpha_3					\n\
+xor_alpha_3:							\n\
+	.prologue 0						\n\
+	srl $16, 6, $16						\n\
+	.align 4						\n\
+3:								\n\
+	ldq $0,0($17)						\n\
+	ldq $1,0($18)						\n\
+	ldq $2,0($19)						\n\
+	ldq $3,8($17)						\n\
+								\n\
+	ldq $4,8($18)						\n\
+	ldq $6,16($17)						\n\
+	ldq $7,16($18)						\n\
+	ldq $21,24($17)						\n\
+								\n\
+	ldq $22,24($18)						\n\
+	ldq $24,32($17)						\n\
+	ldq $25,32($18)						\n\
+	ldq $5,8($19)						\n\
+								\n\
+	ldq $20,16($19)						\n\
+	ldq $23,24($19)						\n\
+	ldq $27,32($19)						\n\
+	nop							\n\
+								\n\
+	xor $0,$1,$1		# 8 cycles from $0 load		\n\
+	xor $3,$4,$4		# 6 cycles from $4 load		\n\
+	xor $6,$7,$7		# 6 cycles from $7 load		\n\
+	xor $21,$22,$22		# 5 cycles from $22 load	\n\
+								\n\
+	xor $1,$2,$2		# 9 cycles from $2 load		\n\
+	xor $24,$25,$25		# 5 cycles from $25 load	\n\
+	stq $2,0($17)						\n\
+	xor $4,$5,$5		# 6 cycles from $5 load		\n\
+								\n\
+	stq $5,8($17)						\n\
+	xor $7,$20,$20		# 7 cycles from $20 load	\n\
+	stq $20,16($17)						\n\
+	xor $22,$23,$23		# 7 cycles from $23 load	\n\
+								\n\
+	stq $23,24($17)						\n\
+	xor $25,$27,$27		# 7 cycles from $27 load	\n\
+	stq $27,32($17)						\n\
+	nop							\n\
+								\n\
+	ldq $0,40($17)						\n\
+	ldq $1,40($18)						\n\
+	ldq $3,48($17)						\n\
+	ldq $4,48($18)						\n\
+								\n\
+	ldq $6,56($17)						\n\
+	ldq $7,56($18)						\n\
+	ldq $2,40($19)						\n\
+	ldq $5,48($19)						\n\
+								\n\
+	ldq $20,56($19)						\n\
+	xor $0,$1,$1		# 4 cycles from $1 load		\n\
+	xor $3,$4,$4		# 5 cycles from $4 load		\n\
+	xor $6,$7,$7		# 5 cycles from $7 load		\n\
+								\n\
+	xor $1,$2,$2		# 4 cycles from $2 load		\n\
+	xor $4,$5,$5		# 5 cycles from $5 load		\n\
+	stq $2,40($17)						\n\
+	xor $7,$20,$20		# 4 cycles from $20 load	\n\
+								\n\
+	stq $5,48($17)						\n\
+	subq $16,1,$16						\n\
+	stq $20,56($17)						\n\
+	addq $19,64,$19						\n\
+								\n\
+	addq $18,64,$18						\n\
+	addq $17,64,$17						\n\
+	bgt $16,3b						\n\
+	ret							\n\
+	.end xor_alpha_3					\n\
+								\n\
+	.align 3						\n\
+	.ent xor_alpha_4					\n\
+xor_alpha_4:							\n\
+	.prologue 0						\n\
+	srl $16, 6, $16						\n\
+	.align 4						\n\
+4:								\n\
+	ldq $0,0($17)						\n\
+	ldq $1,0($18)						\n\
+	ldq $2,0($19)						\n\
+	ldq $3,0($20)						\n\
+								\n\
+	ldq $4,8($17)						\n\
+	ldq $5,8($18)						\n\
+	ldq $6,8($19)						\n\
+	ldq $7,8($20)						\n\
+								\n\
+	ldq $21,16($17)						\n\
+	ldq $22,16($18)						\n\
+	ldq $23,16($19)						\n\
+	ldq $24,16($20)						\n\
+								\n\
+	ldq $25,24($17)						\n\
+	xor $0,$1,$1		# 6 cycles from $1 load		\n\
+	ldq $27,24($18)						\n\
+	xor $2,$3,$3		# 6 cycles from $3 load		\n\
+								\n\
+	ldq $0,24($19)						\n\
+	xor $1,$3,$3						\n\
+	ldq $1,24($20)						\n\
+	xor $4,$5,$5		# 7 cycles from $5 load		\n\
+								\n\
+	stq $3,0($17)						\n\
+	xor $6,$7,$7						\n\
+	xor $21,$22,$22		# 7 cycles from $22 load	\n\
+	xor $5,$7,$7						\n\
+								\n\
+	stq $7,8($17)						\n\
+	xor $23,$24,$24		# 7 cycles from $24 load	\n\
+	ldq $2,32($17)						\n\
+	xor $22,$24,$24						\n\
+								\n\
+	ldq $3,32($18)						\n\
+	ldq $4,32($19)						\n\
+	ldq $5,32($20)						\n\
+	xor $25,$27,$27		# 8 cycles from $27 load	\n\
+								\n\
+	ldq $6,40($17)						\n\
+	ldq $7,40($18)						\n\
+	ldq $21,40($19)						\n\
+	ldq $22,40($20)						\n\
+								\n\
+	stq $24,16($17)						\n\
+	xor $0,$1,$1		# 9 cycles from $1 load		\n\
+	xor $2,$3,$3		# 5 cycles from $3 load		\n\
+	xor $27,$1,$1						\n\
+								\n\
+	stq $1,24($17)						\n\
+	xor $4,$5,$5		# 5 cycles from $5 load		\n\
+	ldq $23,48($17)						\n\
+	ldq $24,48($18)						\n\
+								\n\
+	ldq $25,48($19)						\n\
+	xor $3,$5,$5						\n\
+	ldq $27,48($20)						\n\
+	ldq $0,56($17)						\n\
+								\n\
+	ldq $1,56($18)						\n\
+	ldq $2,56($19)						\n\
+	xor $6,$7,$7		# 8 cycles from $6 load		\n\
+	ldq $3,56($20)						\n\
+								\n\
+	stq $5,32($17)						\n\
+	xor $21,$22,$22		# 8 cycles from $22 load	\n\
+	xor $7,$22,$22						\n\
+	xor $23,$24,$24		# 5 cycles from $24 load	\n\
+								\n\
+	stq $22,40($17)						\n\
+	xor $25,$27,$27		# 5 cycles from $27 load	\n\
+	xor $24,$27,$27						\n\
+	xor $0,$1,$1		# 5 cycles from $1 load		\n\
+								\n\
+	stq $27,48($17)						\n\
+	xor $2,$3,$3		# 4 cycles from $3 load		\n\
+	xor $1,$3,$3						\n\
+	subq $16,1,$16						\n\
+								\n\
+	stq $3,56($17)						\n\
+	addq $20,64,$20						\n\
+	addq $19,64,$19						\n\
+	addq $18,64,$18						\n\
+								\n\
+	addq $17,64,$17						\n\
+	bgt $16,4b						\n\
+	ret							\n\
+	.end xor_alpha_4					\n\
+								\n\
+	.align 3						\n\
+	.ent xor_alpha_5					\n\
+xor_alpha_5:							\n\
+	.prologue 0						\n\
+	srl $16, 6, $16						\n\
+	.align 4						\n\
+5:								\n\
+	ldq $0,0($17)						\n\
+	ldq $1,0($18)						\n\
+	ldq $2,0($19)						\n\
+	ldq $3,0($20)						\n\
+								\n\
+	ldq $4,0($21)						\n\
+	ldq $5,8($17)						\n\
+	ldq $6,8($18)						\n\
+	ldq $7,8($19)						\n\
+								\n\
+	ldq $22,8($20)						\n\
+	ldq $23,8($21)						\n\
+	ldq $24,16($17)						\n\
+	ldq $25,16($18)						\n\
+								\n\
+	ldq $27,16($19)						\n\
+	xor $0,$1,$1		# 6 cycles from $1 load		\n\
+	ldq $28,16($20)						\n\
+	xor $2,$3,$3		# 6 cycles from $3 load		\n\
+								\n\
+	ldq $0,16($21)						\n\
+	xor $1,$3,$3						\n\
+	ldq $1,24($17)						\n\
+	xor $3,$4,$4		# 7 cycles from $4 load		\n\
+								\n\
+	stq $4,0($17)						\n\
+	xor $5,$6,$6		# 7 cycles from $6 load		\n\
+	xor $7,$22,$22		# 7 cycles from $22 load	\n\
+	xor $6,$23,$23		# 7 cycles from $23 load	\n\
+								\n\
+	ldq $2,24($18)						\n\
+	xor $22,$23,$23						\n\
+	ldq $3,24($19)						\n\
+	xor $24,$25,$25		# 8 cycles from $25 load	\n\
+								\n\
+	stq $23,8($17)						\n\
+	xor $25,$27,$27		# 8 cycles from $27 load	\n\
+	ldq $4,24($20)						\n\
+	xor $28,$0,$0		# 7 cycles from $0 load		\n\
+								\n\
+	ldq $5,24($21)						\n\
+	xor $27,$0,$0						\n\
+	ldq $6,32($17)						\n\
+	ldq $7,32($18)						\n\
+								\n\
+	stq $0,16($17)						\n\
+	xor $1,$2,$2		# 6 cycles from $2 load		\n\
+	ldq $22,32($19)						\n\
+	xor $3,$4,$4		# 4 cycles from $4 load		\n\
+								\n\
+	ldq $23,32($20)						\n\
+	xor $2,$4,$4						\n\
+	ldq $24,32($21)						\n\
+	ldq $25,40($17)						\n\
+								\n\
+	ldq $27,40($18)						\n\
+	ldq $28,40($19)						\n\
+	ldq $0,40($20)						\n\
+	xor $4,$5,$5		# 7 cycles from $5 load		\n\
+								\n\
+	stq $5,24($17)						\n\
+	xor $6,$7,$7		# 7 cycles from $7 load		\n\
+	ldq $1,40($21)						\n\
+	ldq $2,48($17)						\n\
+								\n\
+	ldq $3,48($18)						\n\
+	xor $7,$22,$22		# 7 cycles from $22 load	\n\
+	ldq $4,48($19)						\n\
+	xor $23,$24,$24		# 6 cycles from $24 load	\n\
+								\n\
+	ldq $5,48($20)						\n\
+	xor $22,$24,$24						\n\
+	ldq $6,48($21)						\n\
+	xor $25,$27,$27		# 7 cycles from $27 load	\n\
+								\n\
+	stq $24,32($17)						\n\
+	xor $27,$28,$28		# 8 cycles from $28 load	\n\
+	ldq $7,56($17)						\n\
+	xor $0,$1,$1		# 6 cycles from $1 load		\n\
+								\n\
+	ldq $22,56($18)						\n\
+	ldq $23,56($19)						\n\
+	ldq $24,56($20)						\n\
+	ldq $25,56($21)						\n\
+								\n\
+	xor $28,$1,$1						\n\
+	xor $2,$3,$3		# 9 cycles from $3 load		\n\
+	xor $3,$4,$4		# 9 cycles from $4 load		\n\
+	xor $5,$6,$6		# 8 cycles from $6 load		\n\
+								\n\
+	stq $1,40($17)						\n\
+	xor $4,$6,$6						\n\
+	xor $7,$22,$22		# 7 cycles from $22 load	\n\
+	xor $23,$24,$24		# 6 cycles from $24 load	\n\
+								\n\
+	stq $6,48($17)						\n\
+	xor $22,$24,$24						\n\
+	subq $16,1,$16						\n\
+	xor $24,$25,$25		# 8 cycles from $25 load	\n\
+								\n\
+	stq $25,56($17)						\n\
+	addq $21,64,$21						\n\
+	addq $20,64,$20						\n\
+	addq $19,64,$19						\n\
+								\n\
+	addq $18,64,$18						\n\
+	addq $17,64,$17						\n\
+	bgt $16,5b						\n\
+	ret							\n\
+	.end xor_alpha_5					\n\
+								\n\
+	.align 3						\n\
+	.ent xor_alpha_prefetch_2				\n\
+xor_alpha_prefetch_2:						\n\
+	.prologue 0						\n\
+	srl $16, 6, $16						\n\
+								\n\
+	ldq $31, 0($17)						\n\
+	ldq $31, 0($18)						\n\
+								\n\
+	ldq $31, 64($17)					\n\
+	ldq $31, 64($18)					\n\
+								\n\
+	ldq $31, 128($17)					\n\
+	ldq $31, 128($18)					\n\
+								\n\
+	ldq $31, 192($17)					\n\
+	ldq $31, 192($18)					\n\
+	.align 4						\n\
+2:								\n\
+	ldq $0,0($17)						\n\
+	ldq $1,0($18)						\n\
+	ldq $2,8($17)						\n\
+	ldq $3,8($18)						\n\
+								\n\
+	ldq $4,16($17)						\n\
+	ldq $5,16($18)						\n\
+	ldq $6,24($17)						\n\
+	ldq $7,24($18)						\n\
+								\n\
+	ldq $19,32($17)						\n\
+	ldq $20,32($18)						\n\
+	ldq $21,40($17)						\n\
+	ldq $22,40($18)						\n\
+								\n\
+	ldq $23,48($17)						\n\
+	ldq $24,48($18)						\n\
+	ldq $25,56($17)						\n\
+	ldq $27,56($18)						\n\
+								\n\
+	ldq $31,256($17)					\n\
+	xor $0,$1,$0		# 8 cycles from $1 load		\n\
+	ldq $31,256($18)					\n\
+	xor $2,$3,$2						\n\
+								\n\
+	stq $0,0($17)						\n\
+	xor $4,$5,$4						\n\
+	stq $2,8($17)						\n\
+	xor $6,$7,$6						\n\
+								\n\
+	stq $4,16($17)						\n\
+	xor $19,$20,$19						\n\
+	stq $6,24($17)						\n\
+	xor $21,$22,$21						\n\
+								\n\
+	stq $19,32($17)						\n\
+	xor $23,$24,$23						\n\
+	stq $21,40($17)						\n\
+	xor $25,$27,$25						\n\
+								\n\
+	stq $23,48($17)						\n\
+	subq $16,1,$16						\n\
+	stq $25,56($17)						\n\
+	addq $17,64,$17						\n\
+								\n\
+	addq $18,64,$18						\n\
+	bgt $16,2b						\n\
+	ret							\n\
+	.end xor_alpha_prefetch_2				\n\
+								\n\
+	.align 3						\n\
+	.ent xor_alpha_prefetch_3				\n\
+xor_alpha_prefetch_3:						\n\
+	.prologue 0						\n\
+	srl $16, 6, $16						\n\
+								\n\
+	ldq $31, 0($17)						\n\
+	ldq $31, 0($18)						\n\
+	ldq $31, 0($19)						\n\
+								\n\
+	ldq $31, 64($17)					\n\
+	ldq $31, 64($18)					\n\
+	ldq $31, 64($19)					\n\
+								\n\
+	ldq $31, 128($17)					\n\
+	ldq $31, 128($18)					\n\
+	ldq $31, 128($19)					\n\
+								\n\
+	ldq $31, 192($17)					\n\
+	ldq $31, 192($18)					\n\
+	ldq $31, 192($19)					\n\
+	.align 4						\n\
+3:								\n\
+	ldq $0,0($17)						\n\
+	ldq $1,0($18)						\n\
+	ldq $2,0($19)						\n\
+	ldq $3,8($17)						\n\
+								\n\
+	ldq $4,8($18)						\n\
+	ldq $6,16($17)						\n\
+	ldq $7,16($18)						\n\
+	ldq $21,24($17)						\n\
+								\n\
+	ldq $22,24($18)						\n\
+	ldq $24,32($17)						\n\
+	ldq $25,32($18)						\n\
+	ldq $5,8($19)						\n\
+								\n\
+	ldq $20,16($19)						\n\
+	ldq $23,24($19)						\n\
+	ldq $27,32($19)						\n\
+	nop							\n\
+								\n\
+	xor $0,$1,$1		# 8 cycles from $0 load		\n\
+	xor $3,$4,$4		# 7 cycles from $4 load		\n\
+	xor $6,$7,$7		# 6 cycles from $7 load		\n\
+	xor $21,$22,$22		# 5 cycles from $22 load	\n\
+								\n\
+	xor $1,$2,$2		# 9 cycles from $2 load		\n\
+	xor $24,$25,$25		# 5 cycles from $25 load	\n\
+	stq $2,0($17)						\n\
+	xor $4,$5,$5		# 6 cycles from $5 load		\n\
+								\n\
+	stq $5,8($17)						\n\
+	xor $7,$20,$20		# 7 cycles from $20 load	\n\
+	stq $20,16($17)						\n\
+	xor $22,$23,$23		# 7 cycles from $23 load	\n\
+								\n\
+	stq $23,24($17)						\n\
+	xor $25,$27,$27		# 7 cycles from $27 load	\n\
+	stq $27,32($17)						\n\
+	nop							\n\
+								\n\
+	ldq $0,40($17)						\n\
+	ldq $1,40($18)						\n\
+	ldq $3,48($17)						\n\
+	ldq $4,48($18)						\n\
+								\n\
+	ldq $6,56($17)						\n\
+	ldq $7,56($18)						\n\
+	ldq $2,40($19)						\n\
+	ldq $5,48($19)						\n\
+								\n\
+	ldq $20,56($19)						\n\
+	ldq $31,256($17)					\n\
+	ldq $31,256($18)					\n\
+	ldq $31,256($19)					\n\
+								\n\
+	xor $0,$1,$1		# 6 cycles from $1 load		\n\
+	xor $3,$4,$4		# 5 cycles from $4 load		\n\
+	xor $6,$7,$7		# 5 cycles from $7 load		\n\
+	xor $1,$2,$2		# 4 cycles from $2 load		\n\
+								\n\
+	xor $4,$5,$5		# 5 cycles from $5 load		\n\
+	xor $7,$20,$20		# 4 cycles from $20 load	\n\
+	stq $2,40($17)						\n\
+	subq $16,1,$16						\n\
+								\n\
+	stq $5,48($17)						\n\
+	addq $19,64,$19						\n\
+	stq $20,56($17)						\n\
+	addq $18,64,$18						\n\
+								\n\
+	addq $17,64,$17						\n\
+	bgt $16,3b						\n\
+	ret							\n\
+	.end xor_alpha_prefetch_3				\n\
+								\n\
+	.align 3						\n\
+	.ent xor_alpha_prefetch_4				\n\
+xor_alpha_prefetch_4:						\n\
+	.prologue 0						\n\
+	srl $16, 6, $16						\n\
+								\n\
+	ldq $31, 0($17)						\n\
+	ldq $31, 0($18)						\n\
+	ldq $31, 0($19)						\n\
+	ldq $31, 0($20)						\n\
+								\n\
+	ldq $31, 64($17)					\n\
+	ldq $31, 64($18)					\n\
+	ldq $31, 64($19)					\n\
+	ldq $31, 64($20)					\n\
+								\n\
+	ldq $31, 128($17)					\n\
+	ldq $31, 128($18)					\n\
+	ldq $31, 128($19)					\n\
+	ldq $31, 128($20)					\n\
+								\n\
+	ldq $31, 192($17)					\n\
+	ldq $31, 192($18)					\n\
+	ldq $31, 192($19)					\n\
+	ldq $31, 192($20)					\n\
+	.align 4						\n\
+4:								\n\
+	ldq $0,0($17)						\n\
+	ldq $1,0($18)						\n\
+	ldq $2,0($19)						\n\
+	ldq $3,0($20)						\n\
+								\n\
+	ldq $4,8($17)						\n\
+	ldq $5,8($18)						\n\
+	ldq $6,8($19)						\n\
+	ldq $7,8($20)						\n\
+								\n\
+	ldq $21,16($17)						\n\
+	ldq $22,16($18)						\n\
+	ldq $23,16($19)						\n\
+	ldq $24,16($20)						\n\
+								\n\
+	ldq $25,24($17)						\n\
+	xor $0,$1,$1		# 6 cycles from $1 load		\n\
+	ldq $27,24($18)						\n\
+	xor $2,$3,$3		# 6 cycles from $3 load		\n\
+								\n\
+	ldq $0,24($19)						\n\
+	xor $1,$3,$3						\n\
+	ldq $1,24($20)						\n\
+	xor $4,$5,$5		# 7 cycles from $5 load		\n\
+								\n\
+	stq $3,0($17)						\n\
+	xor $6,$7,$7						\n\
+	xor $21,$22,$22		# 7 cycles from $22 load	\n\
+	xor $5,$7,$7						\n\
+								\n\
+	stq $7,8($17)						\n\
+	xor $23,$24,$24		# 7 cycles from $24 load	\n\
+	ldq $2,32($17)						\n\
+	xor $22,$24,$24						\n\
+								\n\
+	ldq $3,32($18)						\n\
+	ldq $4,32($19)						\n\
+	ldq $5,32($20)						\n\
+	xor $25,$27,$27		# 8 cycles from $27 load	\n\
+								\n\
+	ldq $6,40($17)						\n\
+	ldq $7,40($18)						\n\
+	ldq $21,40($19)						\n\
+	ldq $22,40($20)						\n\
+								\n\
+	stq $24,16($17)						\n\
+	xor $0,$1,$1		# 9 cycles from $1 load		\n\
+	xor $2,$3,$3		# 5 cycles from $3 load		\n\
+	xor $27,$1,$1						\n\
+								\n\
+	stq $1,24($17)						\n\
+	xor $4,$5,$5		# 5 cycles from $5 load		\n\
+	ldq $23,48($17)						\n\
+	xor $3,$5,$5						\n\
+								\n\
+	ldq $24,48($18)						\n\
+	ldq $25,48($19)						\n\
+	ldq $27,48($20)						\n\
+	ldq $0,56($17)						\n\
+								\n\
+	ldq $1,56($18)						\n\
+	ldq $2,56($19)						\n\
+	ldq $3,56($20)						\n\
+	xor $6,$7,$7		# 8 cycles from $6 load		\n\
+								\n\
+	ldq $31,256($17)					\n\
+	xor $21,$22,$22		# 8 cycles from $22 load	\n\
+	ldq $31,256($18)					\n\
+	xor $7,$22,$22						\n\
+								\n\
+	ldq $31,256($19)					\n\
+	xor $23,$24,$24		# 6 cycles from $24 load	\n\
+	ldq $31,256($20)					\n\
+	xor $25,$27,$27		# 6 cycles from $27 load	\n\
+								\n\
+	stq $5,32($17)						\n\
+	xor $24,$27,$27						\n\
+	xor $0,$1,$1		# 7 cycles from $1 load		\n\
+	xor $2,$3,$3		# 6 cycles from $3 load		\n\
+								\n\
+	stq $22,40($17)						\n\
+	xor $1,$3,$3						\n\
+	stq $27,48($17)						\n\
+	subq $16,1,$16						\n\
+								\n\
+	stq $3,56($17)						\n\
+	addq $20,64,$20						\n\
+	addq $19,64,$19						\n\
+	addq $18,64,$18						\n\
+								\n\
+	addq $17,64,$17						\n\
+	bgt $16,4b						\n\
+	ret							\n\
+	.end xor_alpha_prefetch_4				\n\
+								\n\
+	.align 3						\n\
+	.ent xor_alpha_prefetch_5				\n\
+xor_alpha_prefetch_5:						\n\
+	.prologue 0						\n\
+	srl $16, 6, $16						\n\
+								\n\
+	ldq $31, 0($17)						\n\
+	ldq $31, 0($18)						\n\
+	ldq $31, 0($19)						\n\
+	ldq $31, 0($20)						\n\
+	ldq $31, 0($21)						\n\
+								\n\
+	ldq $31, 64($17)					\n\
+	ldq $31, 64($18)					\n\
+	ldq $31, 64($19)					\n\
+	ldq $31, 64($20)					\n\
+	ldq $31, 64($21)					\n\
+								\n\
+	ldq $31, 128($17)					\n\
+	ldq $31, 128($18)					\n\
+	ldq $31, 128($19)					\n\
+	ldq $31, 128($20)					\n\
+	ldq $31, 128($21)					\n\
+								\n\
+	ldq $31, 192($17)					\n\
+	ldq $31, 192($18)					\n\
+	ldq $31, 192($19)					\n\
+	ldq $31, 192($20)					\n\
+	ldq $31, 192($21)					\n\
+	.align 4						\n\
+5:								\n\
+	ldq $0,0($17)						\n\
+	ldq $1,0($18)						\n\
+	ldq $2,0($19)						\n\
+	ldq $3,0($20)						\n\
+								\n\
+	ldq $4,0($21)						\n\
+	ldq $5,8($17)						\n\
+	ldq $6,8($18)						\n\
+	ldq $7,8($19)						\n\
+								\n\
+	ldq $22,8($20)						\n\
+	ldq $23,8($21)						\n\
+	ldq $24,16($17)						\n\
+	ldq $25,16($18)						\n\
+								\n\
+	ldq $27,16($19)						\n\
+	xor $0,$1,$1		# 6 cycles from $1 load		\n\
+	ldq $28,16($20)						\n\
+	xor $2,$3,$3		# 6 cycles from $3 load		\n\
+								\n\
+	ldq $0,16($21)						\n\
+	xor $1,$3,$3						\n\
+	ldq $1,24($17)						\n\
+	xor $3,$4,$4		# 7 cycles from $4 load		\n\
+								\n\
+	stq $4,0($17)						\n\
+	xor $5,$6,$6		# 7 cycles from $6 load		\n\
+	xor $7,$22,$22		# 7 cycles from $22 load	\n\
+	xor $6,$23,$23		# 7 cycles from $23 load	\n\
+								\n\
+	ldq $2,24($18)						\n\
+	xor $22,$23,$23						\n\
+	ldq $3,24($19)						\n\
+	xor $24,$25,$25		# 8 cycles from $25 load	\n\
+								\n\
+	stq $23,8($17)						\n\
+	xor $25,$27,$27		# 8 cycles from $27 load	\n\
+	ldq $4,24($20)						\n\
+	xor $28,$0,$0		# 7 cycles from $0 load		\n\
+								\n\
+	ldq $5,24($21)						\n\
+	xor $27,$0,$0						\n\
+	ldq $6,32($17)						\n\
+	ldq $7,32($18)						\n\
+								\n\
+	stq $0,16($17)						\n\
+	xor $1,$2,$2		# 6 cycles from $2 load		\n\
+	ldq $22,32($19)						\n\
+	xor $3,$4,$4		# 4 cycles from $4 load		\n\
+								\n\
+	ldq $23,32($20)						\n\
+	xor $2,$4,$4						\n\
+	ldq $24,32($21)						\n\
+	ldq $25,40($17)						\n\
+								\n\
+	ldq $27,40($18)						\n\
+	ldq $28,40($19)						\n\
+	ldq $0,40($20)						\n\
+	xor $4,$5,$5		# 7 cycles from $5 load		\n\
+								\n\
+	stq $5,24($17)						\n\
+	xor $6,$7,$7		# 7 cycles from $7 load		\n\
+	ldq $1,40($21)						\n\
+	ldq $2,48($17)						\n\
+								\n\
+	ldq $3,48($18)						\n\
+	xor $7,$22,$22		# 7 cycles from $22 load	\n\
+	ldq $4,48($19)						\n\
+	xor $23,$24,$24		# 6 cycles from $24 load	\n\
+								\n\
+	ldq $5,48($20)						\n\
+	xor $22,$24,$24						\n\
+	ldq $6,48($21)						\n\
+	xor $25,$27,$27		# 7 cycles from $27 load	\n\
+								\n\
+	stq $24,32($17)						\n\
+	xor $27,$28,$28		# 8 cycles from $28 load	\n\
+	ldq $7,56($17)						\n\
+	xor $0,$1,$1		# 6 cycles from $1 load		\n\
+								\n\
+	ldq $22,56($18)						\n\
+	ldq $23,56($19)						\n\
+	ldq $24,56($20)						\n\
+	ldq $25,56($21)						\n\
+								\n\
+	ldq $31,256($17)					\n\
+	xor $28,$1,$1						\n\
+	ldq $31,256($18)					\n\
+	xor $2,$3,$3		# 9 cycles from $3 load		\n\
+								\n\
+	ldq $31,256($19)					\n\
+	xor $3,$4,$4		# 9 cycles from $4 load		\n\
+	ldq $31,256($20)					\n\
+	xor $5,$6,$6		# 8 cycles from $6 load		\n\
+								\n\
+	stq $1,40($17)						\n\
+	xor $4,$6,$6						\n\
+	xor $7,$22,$22		# 7 cycles from $22 load	\n\
+	xor $23,$24,$24		# 6 cycles from $24 load	\n\
+								\n\
+	stq $6,48($17)						\n\
+	xor $22,$24,$24						\n\
+	ldq $31,256($21)					\n\
+	xor $24,$25,$25		# 8 cycles from $25 load	\n\
+								\n\
+	stq $25,56($17)						\n\
+	subq $16,1,$16						\n\
+	addq $21,64,$21						\n\
+	addq $20,64,$20						\n\
+								\n\
+	addq $19,64,$19						\n\
+	addq $18,64,$18						\n\
+	addq $17,64,$17						\n\
+	bgt $16,5b						\n\
+								\n\
+	ret							\n\
+	.end xor_alpha_prefetch_5				\n\
+");
+
+static struct xor_block_template xor_block_alpha = {
+	.name	= "alpha",
+	.do_2	= xor_alpha_2,
+	.do_3	= xor_alpha_3,
+	.do_4	= xor_alpha_4,
+	.do_5	= xor_alpha_5,
+};
+
+static struct xor_block_template xor_block_alpha_prefetch = {
+	.name	= "alpha prefetch",
+	.do_2	= xor_alpha_prefetch_2,
+	.do_3	= xor_alpha_prefetch_3,
+	.do_4	= xor_alpha_prefetch_4,
+	.do_5	= xor_alpha_prefetch_5,
+};
+
+/* For grins, also test the generic routines.  */
+#include <asm-generic/xor.h>
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES				\
+	do {						\
+		xor_speed(&xor_block_8regs);		\
+		xor_speed(&xor_block_32regs);		\
+		xor_speed(&xor_block_alpha);		\
+		xor_speed(&xor_block_alpha_prefetch);	\
+	} while (0)
+
+/* Force the use of alpha_prefetch if EV6, as it is significantly
+   faster in the cold cache case.  */
+#define XOR_SELECT_TEMPLATE(FASTEST) \
+	(implver() == IMPLVER_EV6 ? &xor_block_alpha_prefetch : FASTEST)