Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/include/asm-i386/8253pit.h b/include/asm-i386/8253pit.h
new file mode 100644
index 0000000..96c7c35
--- /dev/null
+++ b/include/asm-i386/8253pit.h
@@ -0,0 +1,12 @@
+/*
+ * 8253/8254 Programmable Interval Timer
+ */
+
+#ifndef _8253PIT_H
+#define _8253PIT_H
+
+#include <asm/timex.h>
+
+#define PIT_TICK_RATE 	CLOCK_TICK_RATE
+
+#endif
diff --git a/include/asm-i386/a.out.h b/include/asm-i386/a.out.h
new file mode 100644
index 0000000..ab17bb8
--- /dev/null
+++ b/include/asm-i386/a.out.h
@@ -0,0 +1,26 @@
+#ifndef __I386_A_OUT_H__
+#define __I386_A_OUT_H__
+
+struct exec
+{
+  unsigned long a_info;		/* Use macros N_MAGIC, etc for access */
+  unsigned a_text;		/* length of text, in bytes */
+  unsigned a_data;		/* length of data, in bytes */
+  unsigned a_bss;		/* length of uninitialized data area for file, in bytes */
+  unsigned a_syms;		/* length of symbol table data in file, in bytes */
+  unsigned a_entry;		/* start address */
+  unsigned a_trsize;		/* length of relocation info for text, in bytes */
+  unsigned a_drsize;		/* length of relocation info for data, in bytes */
+};
+
+#define N_TRSIZE(a)	((a).a_trsize)
+#define N_DRSIZE(a)	((a).a_drsize)
+#define N_SYMSIZE(a)	((a).a_syms)
+
+#ifdef __KERNEL__
+
+#define STACK_TOP	TASK_SIZE
+
+#endif
+
+#endif /* __A_OUT_GNU_H__ */
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h
new file mode 100644
index 0000000..c976c1d
--- /dev/null
+++ b/include/asm-i386/acpi.h
@@ -0,0 +1,190 @@
+/*
+ *  asm-i386/acpi.h
+ *
+ *  Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *  Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
+  *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#ifndef _ASM_ACPI_H
+#define _ASM_ACPI_H
+
+#ifdef __KERNEL__
+
+#include <asm/system.h>		/* defines cmpxchg */
+
+#define COMPILER_DEPENDENT_INT64   long long
+#define COMPILER_DEPENDENT_UINT64  unsigned long long
+
+/*
+ * Calling conventions:
+ *
+ * ACPI_SYSTEM_XFACE        - Interfaces to host OS (handlers, threads)
+ * ACPI_EXTERNAL_XFACE      - External ACPI interfaces 
+ * ACPI_INTERNAL_XFACE      - Internal ACPI interfaces
+ * ACPI_INTERNAL_VAR_XFACE  - Internal variable-parameter list interfaces
+ */
+#define ACPI_SYSTEM_XFACE
+#define ACPI_EXTERNAL_XFACE
+#define ACPI_INTERNAL_XFACE
+#define ACPI_INTERNAL_VAR_XFACE
+
+/* Asm macros */
+
+#define ACPI_ASM_MACROS
+#define BREAKPOINT3
+#define ACPI_DISABLE_IRQS() local_irq_disable()
+#define ACPI_ENABLE_IRQS()  local_irq_enable()
+#define ACPI_FLUSH_CPU_CACHE()	wbinvd()
+
+
+static inline int
+__acpi_acquire_global_lock (unsigned int *lock)
+{
+	unsigned int old, new, val;
+	do {
+		old = *lock;
+		new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
+		val = cmpxchg(lock, old, new);
+	} while (unlikely (val != old));
+	return (new < 3) ? -1 : 0;
+}
+
+static inline int
+__acpi_release_global_lock (unsigned int *lock)
+{
+	unsigned int old, new, val;
+	do {
+		old = *lock;
+		new = old & ~0x3;
+		val = cmpxchg(lock, old, new);
+	} while (unlikely (val != old));
+	return old & 0x1;
+}
+
+#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \
+	((Acq) = __acpi_acquire_global_lock((unsigned int *) GLptr))
+
+#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \
+	((Acq) = __acpi_release_global_lock((unsigned int *) GLptr))
+
+/*
+ * Math helper asm macros
+ */
+#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
+        asm("divl %2;"        \
+        :"=a"(q32), "=d"(r32) \
+        :"r"(d32),            \
+        "0"(n_lo), "1"(n_hi))
+
+
+#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
+    asm("shrl   $1,%2;"             \
+        "rcrl   $1,%3;"             \
+        :"=r"(n_hi), "=r"(n_lo)     \
+        :"0"(n_hi), "1"(n_lo))
+
+/*
+ * Refer Intel ACPI _PDC support document for bit definitions
+ */
+#define ACPI_PDC_EST_CAPABILITY_SMP 	0xa
+#define ACPI_PDC_EST_CAPABILITY_MSR	0x1
+
+#ifdef CONFIG_ACPI_BOOT 
+extern int acpi_lapic;
+extern int acpi_ioapic;
+extern int acpi_noirq;
+extern int acpi_strict;
+extern int acpi_disabled;
+extern int acpi_ht;
+extern int acpi_pci_disabled;
+static inline void disable_acpi(void) 
+{ 
+	acpi_disabled = 1; 
+	acpi_ht = 0;
+	acpi_pci_disabled = 1;
+	acpi_noirq = 1;
+}
+
+/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
+#define FIX_ACPI_PAGES 4
+
+extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
+
+#ifdef CONFIG_X86_IO_APIC
+extern int skip_ioapic_setup;
+extern int acpi_skip_timer_override;
+
+extern void check_acpi_pci(void);
+
+static inline void disable_ioapic_setup(void)
+{
+	skip_ioapic_setup = 1;
+}
+
+static inline int ioapic_setup_disabled(void)
+{
+	return skip_ioapic_setup;
+}
+
+#else
+static inline void disable_ioapic_setup(void) { }
+static inline void check_acpi_pci(void) { }
+
+#endif
+
+#else	/* CONFIG_ACPI_BOOT */
+#  define acpi_lapic 0
+#  define acpi_ioapic 0
+
+#endif
+
+#ifdef CONFIG_ACPI_PCI
+static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
+static inline void acpi_disable_pci(void) 
+{
+	acpi_pci_disabled = 1; 
+	acpi_noirq_set();
+}
+extern int acpi_irq_balance_set(char *str);
+#else
+static inline void acpi_noirq_set(void) { }
+static inline void acpi_disable_pci(void) { }
+static inline int acpi_irq_balance_set(char *str) { return 0; }
+#endif
+
+#ifdef CONFIG_ACPI_SLEEP
+
+/* routines for saving/restoring kernel state */
+extern int acpi_save_state_mem(void);
+extern void acpi_restore_state_mem(void);
+
+extern unsigned long acpi_wakeup_address;
+
+/* early initialization routine */
+extern void acpi_reserve_bootmem(void);
+
+#endif /*CONFIG_ACPI_SLEEP*/
+
+extern u8 x86_acpiid_to_apicid[];
+
+#endif /*__KERNEL__*/
+
+#endif /*_ASM_ACPI_H*/
diff --git a/include/asm-i386/agp.h b/include/asm-i386/agp.h
new file mode 100644
index 0000000..a917ff5
--- /dev/null
+++ b/include/asm-i386/agp.h
@@ -0,0 +1,24 @@
+#ifndef AGP_H
+#define AGP_H 1
+
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+/* 
+ * Functions to keep the agpgart mappings coherent with the MMU.
+ * The GART gives the CPU a physical alias of pages in memory. The alias region is
+ * mapped uncacheable. Make sure there are no conflicting mappings
+ * with different cachability attributes for the same page. This avoids
+ * data corruption on some CPUs.
+ */
+
+int map_page_into_agp(struct page *page);
+int unmap_page_from_agp(struct page *page);
+#define flush_agp_mappings() global_flush_tlb()
+
+/* Could use CLFLUSH here if the cpu supports it. But then it would
+   need to be called for each cacheline of the whole page so it may not be 
+   worth it. Would need a page for it. */
+#define flush_agp_cache() asm volatile("wbinvd":::"memory")
+
+#endif
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
new file mode 100644
index 0000000..e1de674
--- /dev/null
+++ b/include/asm-i386/apic.h
@@ -0,0 +1,126 @@
+#ifndef __ASM_APIC_H
+#define __ASM_APIC_H
+
+#include <linux/config.h>
+#include <linux/pm.h>
+#include <asm/fixmap.h>
+#include <asm/apicdef.h>
+#include <asm/system.h>
+
+#define Dprintk(x...)
+
+/*
+ * Debugging macros
+ */
+#define APIC_QUIET   0
+#define APIC_VERBOSE 1
+#define APIC_DEBUG   2
+
+extern int apic_verbosity;
+
+/*
+ * Define the default level of output to be very little
+ * This can be turned up by using apic=verbose for more
+ * information and apic=debug for _lots_ of information.
+ * apic_verbosity is defined in apic.c
+ */
+#define apic_printk(v, s, a...) do {       \
+		if ((v) <= apic_verbosity) \
+			printk(s, ##a);    \
+	} while (0)
+
+
+#ifdef CONFIG_X86_LOCAL_APIC
+
+/*
+ * Basic functions accessing APICs.
+ */
+
+static __inline void apic_write(unsigned long reg, unsigned long v)
+{
+	*((volatile unsigned long *)(APIC_BASE+reg)) = v;
+}
+
+static __inline void apic_write_atomic(unsigned long reg, unsigned long v)
+{
+	xchg((volatile unsigned long *)(APIC_BASE+reg), v);
+}
+
+static __inline unsigned long apic_read(unsigned long reg)
+{
+	return *((volatile unsigned long *)(APIC_BASE+reg));
+}
+
+static __inline__ void apic_wait_icr_idle(void)
+{
+	while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY )
+		cpu_relax();
+}
+
+int get_physical_broadcast(void);
+
+#ifdef CONFIG_X86_GOOD_APIC
+# define FORCE_READ_AROUND_WRITE 0
+# define apic_read_around(x)
+# define apic_write_around(x,y) apic_write((x),(y))
+#else
+# define FORCE_READ_AROUND_WRITE 1
+# define apic_read_around(x) apic_read(x)
+# define apic_write_around(x,y) apic_write_atomic((x),(y))
+#endif
+
+static inline void ack_APIC_irq(void)
+{
+	/*
+	 * ack_APIC_irq() actually gets compiled as a single instruction:
+	 * - a single rmw on Pentium/82489DX
+	 * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC)
+	 * ... yummie.
+	 */
+
+	/* Docs say use 0 for future compatibility */
+	apic_write_around(APIC_EOI, 0);
+}
+
+extern void (*wait_timer_tick)(void);
+
+extern int get_maxlvt(void);
+extern void clear_local_APIC(void);
+extern void connect_bsp_APIC (void);
+extern void disconnect_bsp_APIC (void);
+extern void disable_local_APIC (void);
+extern void lapic_shutdown (void);
+extern int verify_local_APIC (void);
+extern void cache_APIC_registers (void);
+extern void sync_Arb_IDs (void);
+extern void init_bsp_APIC (void);
+extern void setup_local_APIC (void);
+extern void init_apic_mappings (void);
+extern void smp_local_timer_interrupt (struct pt_regs * regs);
+extern void setup_boot_APIC_clock (void);
+extern void setup_secondary_APIC_clock (void);
+extern void setup_apic_nmi_watchdog (void);
+extern int reserve_lapic_nmi(void);
+extern void release_lapic_nmi(void);
+extern void disable_timer_nmi_watchdog(void);
+extern void enable_timer_nmi_watchdog(void);
+extern void nmi_watchdog_tick (struct pt_regs * regs);
+extern int APIC_init_uniprocessor (void);
+extern void disable_APIC_timer(void);
+extern void enable_APIC_timer(void);
+
+extern int check_nmi_watchdog (void);
+extern void enable_NMI_through_LVT0 (void * dummy);
+
+extern unsigned int nmi_watchdog;
+#define NMI_NONE	0
+#define NMI_IO_APIC	1
+#define NMI_LOCAL_APIC	2
+#define NMI_INVALID	3
+
+#else /* !CONFIG_X86_LOCAL_APIC */
+static inline void lapic_shutdown(void) { }
+
+#endif /* !CONFIG_X86_LOCAL_APIC */
+
+#endif /* __ASM_APIC_H */
diff --git a/include/asm-i386/apicdef.h b/include/asm-i386/apicdef.h
new file mode 100644
index 0000000..c689554
--- /dev/null
+++ b/include/asm-i386/apicdef.h
@@ -0,0 +1,377 @@
+#ifndef __ASM_APICDEF_H
+#define __ASM_APICDEF_H
+
+/*
+ * Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
+ *
+ * Alan Cox <Alan.Cox@linux.org>, 1995.
+ * Ingo Molnar <mingo@redhat.com>, 1999, 2000
+ */
+
+#define		APIC_DEFAULT_PHYS_BASE	0xfee00000
+ 
+#define		APIC_ID		0x20
+#define		APIC_LVR	0x30
+#define			APIC_LVR_MASK		0xFF00FF
+#define			GET_APIC_VERSION(x)	((x)&0xFF)
+#define			GET_APIC_MAXLVT(x)	(((x)>>16)&0xFF)
+#define			APIC_INTEGRATED(x)	((x)&0xF0)
+#define		APIC_TASKPRI	0x80
+#define			APIC_TPRI_MASK		0xFF
+#define		APIC_ARBPRI	0x90
+#define			APIC_ARBPRI_MASK	0xFF
+#define		APIC_PROCPRI	0xA0
+#define		APIC_EOI	0xB0
+#define			APIC_EIO_ACK		0x0		/* Write this to the EOI register */
+#define		APIC_RRR	0xC0
+#define		APIC_LDR	0xD0
+#define			APIC_LDR_MASK		(0xFF<<24)
+#define			GET_APIC_LOGICAL_ID(x)	(((x)>>24)&0xFF)
+#define			SET_APIC_LOGICAL_ID(x)	(((x)<<24))
+#define			APIC_ALL_CPUS		0xFF
+#define		APIC_DFR	0xE0
+#define			APIC_DFR_CLUSTER		0x0FFFFFFFul
+#define			APIC_DFR_FLAT			0xFFFFFFFFul
+#define		APIC_SPIV	0xF0
+#define			APIC_SPIV_FOCUS_DISABLED	(1<<9)
+#define			APIC_SPIV_APIC_ENABLED		(1<<8)
+#define		APIC_ISR	0x100
+#define		APIC_TMR	0x180
+#define 	APIC_IRR	0x200
+#define 	APIC_ESR	0x280
+#define			APIC_ESR_SEND_CS	0x00001
+#define			APIC_ESR_RECV_CS	0x00002
+#define			APIC_ESR_SEND_ACC	0x00004
+#define			APIC_ESR_RECV_ACC	0x00008
+#define			APIC_ESR_SENDILL	0x00020
+#define			APIC_ESR_RECVILL	0x00040
+#define			APIC_ESR_ILLREGA	0x00080
+#define		APIC_ICR	0x300
+#define			APIC_DEST_SELF		0x40000
+#define			APIC_DEST_ALLINC	0x80000
+#define			APIC_DEST_ALLBUT	0xC0000
+#define			APIC_ICR_RR_MASK	0x30000
+#define			APIC_ICR_RR_INVALID	0x00000
+#define			APIC_ICR_RR_INPROG	0x10000
+#define			APIC_ICR_RR_VALID	0x20000
+#define			APIC_INT_LEVELTRIG	0x08000
+#define			APIC_INT_ASSERT		0x04000
+#define			APIC_ICR_BUSY		0x01000
+#define			APIC_DEST_LOGICAL	0x00800
+#define			APIC_DM_FIXED		0x00000
+#define			APIC_DM_LOWEST		0x00100
+#define			APIC_DM_SMI		0x00200
+#define			APIC_DM_REMRD		0x00300
+#define			APIC_DM_NMI		0x00400
+#define			APIC_DM_INIT		0x00500
+#define			APIC_DM_STARTUP		0x00600
+#define			APIC_DM_EXTINT		0x00700
+#define			APIC_VECTOR_MASK	0x000FF
+#define		APIC_ICR2	0x310
+#define			GET_APIC_DEST_FIELD(x)	(((x)>>24)&0xFF)
+#define			SET_APIC_DEST_FIELD(x)	((x)<<24)
+#define		APIC_LVTT	0x320
+#define		APIC_LVTTHMR	0x330
+#define		APIC_LVTPC	0x340
+#define		APIC_LVT0	0x350
+#define			APIC_LVT_TIMER_BASE_MASK	(0x3<<18)
+#define			GET_APIC_TIMER_BASE(x)		(((x)>>18)&0x3)
+#define			SET_APIC_TIMER_BASE(x)		(((x)<<18))
+#define			APIC_TIMER_BASE_CLKIN		0x0
+#define			APIC_TIMER_BASE_TMBASE		0x1
+#define			APIC_TIMER_BASE_DIV		0x2
+#define			APIC_LVT_TIMER_PERIODIC		(1<<17)
+#define			APIC_LVT_MASKED			(1<<16)
+#define			APIC_LVT_LEVEL_TRIGGER		(1<<15)
+#define			APIC_LVT_REMOTE_IRR		(1<<14)
+#define			APIC_INPUT_POLARITY		(1<<13)
+#define			APIC_SEND_PENDING		(1<<12)
+#define			GET_APIC_DELIVERY_MODE(x)	(((x)>>8)&0x7)
+#define			SET_APIC_DELIVERY_MODE(x,y)	(((x)&~0x700)|((y)<<8))
+#define				APIC_MODE_FIXED		0x0
+#define				APIC_MODE_NMI		0x4
+#define				APIC_MODE_EXINT		0x7
+#define 	APIC_LVT1	0x360
+#define		APIC_LVTERR	0x370
+#define		APIC_TMICT	0x380
+#define		APIC_TMCCT	0x390
+#define		APIC_TDCR	0x3E0
+#define			APIC_TDR_DIV_TMBASE	(1<<2)
+#define			APIC_TDR_DIV_1		0xB
+#define			APIC_TDR_DIV_2		0x0
+#define			APIC_TDR_DIV_4		0x1
+#define			APIC_TDR_DIV_8		0x2
+#define			APIC_TDR_DIV_16		0x3
+#define			APIC_TDR_DIV_32		0x8
+#define			APIC_TDR_DIV_64		0x9
+#define			APIC_TDR_DIV_128	0xA
+
+#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
+
+#ifdef CONFIG_NUMA
+ #define MAX_IO_APICS 32
+#else
+ #define MAX_IO_APICS 8
+#endif
+
+/*
+ * the local APIC register structure, memory mapped. Not terribly well
+ * tested, but we might eventually use this one in the future - the
+ * problem why we cannot use it right now is the P5 APIC, it has an
+ * errata which cannot take 8-bit reads and writes, only 32-bit ones ...
+ */
+#define u32 unsigned int
+
+#define lapic ((volatile struct local_apic *)APIC_BASE)
+
+struct local_apic {
+
+/*000*/	struct { u32 __reserved[4]; } __reserved_01;
+
+/*010*/	struct { u32 __reserved[4]; } __reserved_02;
+
+/*020*/	struct { /* APIC ID Register */
+		u32   __reserved_1	: 24,
+			phys_apic_id	:  4,
+			__reserved_2	:  4;
+		u32 __reserved[3];
+	} id;
+
+/*030*/	const
+	struct { /* APIC Version Register */
+		u32   version		:  8,
+			__reserved_1	:  8,
+			max_lvt		:  8,
+			__reserved_2	:  8;
+		u32 __reserved[3];
+	} version;
+
+/*040*/	struct { u32 __reserved[4]; } __reserved_03;
+
+/*050*/	struct { u32 __reserved[4]; } __reserved_04;
+
+/*060*/	struct { u32 __reserved[4]; } __reserved_05;
+
+/*070*/	struct { u32 __reserved[4]; } __reserved_06;
+
+/*080*/	struct { /* Task Priority Register */
+		u32   priority	:  8,
+			__reserved_1	: 24;
+		u32 __reserved_2[3];
+	} tpr;
+
+/*090*/	const
+	struct { /* Arbitration Priority Register */
+		u32   priority	:  8,
+			__reserved_1	: 24;
+		u32 __reserved_2[3];
+	} apr;
+
+/*0A0*/	const
+	struct { /* Processor Priority Register */
+		u32   priority	:  8,
+			__reserved_1	: 24;
+		u32 __reserved_2[3];
+	} ppr;
+
+/*0B0*/	struct { /* End Of Interrupt Register */
+		u32   eoi;
+		u32 __reserved[3];
+	} eoi;
+
+/*0C0*/	struct { u32 __reserved[4]; } __reserved_07;
+
+/*0D0*/	struct { /* Logical Destination Register */
+		u32   __reserved_1	: 24,
+			logical_dest	:  8;
+		u32 __reserved_2[3];
+	} ldr;
+
+/*0E0*/	struct { /* Destination Format Register */
+		u32   __reserved_1	: 28,
+			model		:  4;
+		u32 __reserved_2[3];
+	} dfr;
+
+/*0F0*/	struct { /* Spurious Interrupt Vector Register */
+		u32	spurious_vector	:  8,
+			apic_enabled	:  1,
+			focus_cpu	:  1,
+			__reserved_2	: 22;
+		u32 __reserved_3[3];
+	} svr;
+
+/*100*/	struct { /* In Service Register */
+/*170*/		u32 bitfield;
+		u32 __reserved[3];
+	} isr [8];
+
+/*180*/	struct { /* Trigger Mode Register */
+/*1F0*/		u32 bitfield;
+		u32 __reserved[3];
+	} tmr [8];
+
+/*200*/	struct { /* Interrupt Request Register */
+/*270*/		u32 bitfield;
+		u32 __reserved[3];
+	} irr [8];
+
+/*280*/	union { /* Error Status Register */
+		struct {
+			u32   send_cs_error			:  1,
+				receive_cs_error		:  1,
+				send_accept_error		:  1,
+				receive_accept_error		:  1,
+				__reserved_1			:  1,
+				send_illegal_vector		:  1,
+				receive_illegal_vector		:  1,
+				illegal_register_address	:  1,
+				__reserved_2			: 24;
+			u32 __reserved_3[3];
+		} error_bits;
+		struct {
+			u32 errors;
+			u32 __reserved_3[3];
+		} all_errors;
+	} esr;
+
+/*290*/	struct { u32 __reserved[4]; } __reserved_08;
+
+/*2A0*/	struct { u32 __reserved[4]; } __reserved_09;
+
+/*2B0*/	struct { u32 __reserved[4]; } __reserved_10;
+
+/*2C0*/	struct { u32 __reserved[4]; } __reserved_11;
+
+/*2D0*/	struct { u32 __reserved[4]; } __reserved_12;
+
+/*2E0*/	struct { u32 __reserved[4]; } __reserved_13;
+
+/*2F0*/	struct { u32 __reserved[4]; } __reserved_14;
+
+/*300*/	struct { /* Interrupt Command Register 1 */
+		u32   vector			:  8,
+			delivery_mode		:  3,
+			destination_mode	:  1,
+			delivery_status		:  1,
+			__reserved_1		:  1,
+			level			:  1,
+			trigger			:  1,
+			__reserved_2		:  2,
+			shorthand		:  2,
+			__reserved_3		:  12;
+		u32 __reserved_4[3];
+	} icr1;
+
+/*310*/	struct { /* Interrupt Command Register 2 */
+		union {
+			u32   __reserved_1	: 24,
+				phys_dest	:  4,
+				__reserved_2	:  4;
+			u32   __reserved_3	: 24,
+				logical_dest	:  8;
+		} dest;
+		u32 __reserved_4[3];
+	} icr2;
+
+/*320*/	struct { /* LVT - Timer */
+		u32   vector		:  8,
+			__reserved_1	:  4,
+			delivery_status	:  1,
+			__reserved_2	:  3,
+			mask		:  1,
+			timer_mode	:  1,
+			__reserved_3	: 14;
+		u32 __reserved_4[3];
+	} lvt_timer;
+
+/*330*/	struct { /* LVT - Thermal Sensor */
+		u32  vector		:  8,
+			delivery_mode	:  3,
+			__reserved_1	:  1,
+			delivery_status	:  1,
+			__reserved_2	:  3,
+			mask		:  1,
+			__reserved_3	: 15;
+		u32 __reserved_4[3];
+	} lvt_thermal;
+
+/*340*/	struct { /* LVT - Performance Counter */
+		u32   vector		:  8,
+			delivery_mode	:  3,
+			__reserved_1	:  1,
+			delivery_status	:  1,
+			__reserved_2	:  3,
+			mask		:  1,
+			__reserved_3	: 15;
+		u32 __reserved_4[3];
+	} lvt_pc;
+
+/*350*/	struct { /* LVT - LINT0 */
+		u32   vector		:  8,
+			delivery_mode	:  3,
+			__reserved_1	:  1,
+			delivery_status	:  1,
+			polarity	:  1,
+			remote_irr	:  1,
+			trigger		:  1,
+			mask		:  1,
+			__reserved_2	: 15;
+		u32 __reserved_3[3];
+	} lvt_lint0;
+
+/*360*/	struct { /* LVT - LINT1 */
+		u32   vector		:  8,
+			delivery_mode	:  3,
+			__reserved_1	:  1,
+			delivery_status	:  1,
+			polarity	:  1,
+			remote_irr	:  1,
+			trigger		:  1,
+			mask		:  1,
+			__reserved_2	: 15;
+		u32 __reserved_3[3];
+	} lvt_lint1;
+
+/*370*/	struct { /* LVT - Error */
+		u32   vector		:  8,
+			__reserved_1	:  4,
+			delivery_status	:  1,
+			__reserved_2	:  3,
+			mask		:  1,
+			__reserved_3	: 15;
+		u32 __reserved_4[3];
+	} lvt_error;
+
+/*380*/	struct { /* Timer Initial Count Register */
+		u32   initial_count;
+		u32 __reserved_2[3];
+	} timer_icr;
+
+/*390*/	const
+	struct { /* Timer Current Count Register */
+		u32   curr_count;
+		u32 __reserved_2[3];
+	} timer_ccr;
+
+/*3A0*/	struct { u32 __reserved[4]; } __reserved_16;
+
+/*3B0*/	struct { u32 __reserved[4]; } __reserved_17;
+
+/*3C0*/	struct { u32 __reserved[4]; } __reserved_18;
+
+/*3D0*/	struct { u32 __reserved[4]; } __reserved_19;
+
+/*3E0*/	struct { /* Timer Divide Configuration Register */
+		u32   divisor		:  4,
+			__reserved_1	: 28;
+		u32 __reserved_2[3];
+	} timer_dcr;
+
+/*3F0*/	struct { u32 __reserved[4]; } __reserved_20;
+
+} __attribute__ ((packed));
+
+#undef u32
+
+#endif
diff --git a/include/asm-i386/arch_hooks.h b/include/asm-i386/arch_hooks.h
new file mode 100644
index 0000000..28b96a6
--- /dev/null
+++ b/include/asm-i386/arch_hooks.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_ARCH_HOOKS_H
+#define _ASM_ARCH_HOOKS_H
+
+#include <linux/interrupt.h>
+
+/*
+ *	linux/include/asm/arch_hooks.h
+ *
+ *	define the architecture specific hooks 
+ */
+
+/* these aren't arch hooks, they are generic routines
+ * that can be used by the hooks */
+extern void init_ISA_irqs(void);
+extern void apic_intr_init(void);
+extern void smp_intr_init(void);
+extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+/* these are the defined hooks */
+extern void intr_init_hook(void);
+extern void pre_intr_init_hook(void);
+extern void pre_setup_arch_hook(void);
+extern void trap_init_hook(void);
+extern void time_init_hook(void);
+extern void mca_nmi_hook(void);
+
+#endif
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
new file mode 100644
index 0000000..509720b
--- /dev/null
+++ b/include/asm-i386/atomic.h
@@ -0,0 +1,236 @@
+#ifndef __ARCH_I386_ATOMIC__
+#define __ARCH_I386_ATOMIC__
+
+#include <linux/config.h>
+#include <linux/compiler.h>
+#include <asm/processor.h>
+
+/*
+ * Atomic operations that C can't guarantee us.  Useful for
+ * resource counting etc..
+ */
+
+#ifdef CONFIG_SMP
+#define LOCK "lock ; "
+#else
+#define LOCK ""
+#endif
+
+/*
+ * Make sure gcc doesn't try to be clever and move things around
+ * on us. We need to use _exactly_ the address the user gave us,
+ * not some alias that contains the same information.
+ */
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i)	{ (i) }
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ * 
+ * Atomically reads the value of @v.
+ */ 
+#define atomic_read(v)		((v)->counter)
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ * 
+ * Atomically sets the value of @v to @i.
+ */ 
+#define atomic_set(v,i)		(((v)->counter) = (i))
+
+/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ * 
+ * Atomically adds @i to @v.
+ */
+static __inline__ void atomic_add(int i, atomic_t *v)
+{
+	__asm__ __volatile__(
+		LOCK "addl %1,%0"
+		:"=m" (v->counter)
+		:"ir" (i), "m" (v->counter));
+}
+
+/**
+ * atomic_sub - subtract the atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ * 
+ * Atomically subtracts @i from @v.
+ */
+static __inline__ void atomic_sub(int i, atomic_t *v)
+{
+	__asm__ __volatile__(
+		LOCK "subl %1,%0"
+		:"=m" (v->counter)
+		:"ir" (i), "m" (v->counter));
+}
+
+/**
+ * atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ * 
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
+{
+	unsigned char c;
+
+	__asm__ __volatile__(
+		LOCK "subl %2,%0; sete %1"
+		:"=m" (v->counter), "=qm" (c)
+		:"ir" (i), "m" (v->counter) : "memory");
+	return c;
+}
+
+/**
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ * 
+ * Atomically increments @v by 1.
+ */ 
+static __inline__ void atomic_inc(atomic_t *v)
+{
+	__asm__ __volatile__(
+		LOCK "incl %0"
+		:"=m" (v->counter)
+		:"m" (v->counter));
+}
+
+/**
+ * atomic_dec - decrement atomic variable
+ * @v: pointer of type atomic_t
+ * 
+ * Atomically decrements @v by 1.
+ */ 
+static __inline__ void atomic_dec(atomic_t *v)
+{
+	__asm__ __volatile__(
+		LOCK "decl %0"
+		:"=m" (v->counter)
+		:"m" (v->counter));
+}
+
+/**
+ * atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ * 
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */ 
+static __inline__ int atomic_dec_and_test(atomic_t *v)
+{
+	unsigned char c;
+
+	__asm__ __volatile__(
+		LOCK "decl %0; sete %1"
+		:"=m" (v->counter), "=qm" (c)
+		:"m" (v->counter) : "memory");
+	return c != 0;
+}
+
+/**
+ * atomic_inc_and_test - increment and test 
+ * @v: pointer of type atomic_t
+ * 
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */ 
+static __inline__ int atomic_inc_and_test(atomic_t *v)
+{
+	unsigned char c;
+
+	__asm__ __volatile__(
+		LOCK "incl %0; sete %1"
+		:"=m" (v->counter), "=qm" (c)
+		:"m" (v->counter) : "memory");
+	return c != 0;
+}
+
+/**
+ * atomic_add_negative - add and test if negative
+ * @v: pointer of type atomic_t
+ * @i: integer value to add
+ * 
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */ 
+static __inline__ int atomic_add_negative(int i, atomic_t *v)
+{
+	unsigned char c;
+
+	__asm__ __volatile__(
+		LOCK "addl %2,%0; sets %1"
+		:"=m" (v->counter), "=qm" (c)
+		:"ir" (i), "m" (v->counter) : "memory");
+	return c;
+}
+
+/**
+ * atomic_add_return - add and return
+ * @v: pointer of type atomic_t
+ * @i: integer value to add
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static __inline__ int atomic_add_return(int i, atomic_t *v)
+{
+	int __i;
+#ifdef CONFIG_M386
+	if(unlikely(boot_cpu_data.x86==3))
+		goto no_xadd;
+#endif
+	/* Modern 486+ processor */
+	__i = i;
+	__asm__ __volatile__(
+		LOCK "xaddl %0, %1;"
+		:"=r"(i)
+		:"m"(v->counter), "0"(i));
+	return i + __i;
+
+#ifdef CONFIG_M386
+no_xadd: /* Legacy 386 processor */
+	local_irq_disable();
+	__i = atomic_read(v);
+	atomic_set(v, i + __i);
+	local_irq_enable();
+	return i + __i;
+#endif
+}
+
+static __inline__ int atomic_sub_return(int i, atomic_t *v)
+{
+	return atomic_add_return(-i,v);
+}
+
+#define atomic_inc_return(v)  (atomic_add_return(1,v))
+#define atomic_dec_return(v)  (atomic_sub_return(1,v))
+
+/* These are x86-specific, used by some header files */
+#define atomic_clear_mask(mask, addr) \
+__asm__ __volatile__(LOCK "andl %0,%1" \
+: : "r" (~(mask)),"m" (*addr) : "memory")
+
+#define atomic_set_mask(mask, addr) \
+__asm__ __volatile__(LOCK "orl %0,%1" \
+: : "r" (mask),"m" (*(addr)) : "memory")
+
+/* Atomic operations are already serializing on x86 */
+#define smp_mb__before_atomic_dec()	barrier()
+#define smp_mb__after_atomic_dec()	barrier()
+#define smp_mb__before_atomic_inc()	barrier()
+#define smp_mb__after_atomic_inc()	barrier()
+
+#endif
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
new file mode 100644
index 0000000..9db0b712
--- /dev/null
+++ b/include/asm-i386/bitops.h
@@ -0,0 +1,462 @@
+#ifndef _I386_BITOPS_H
+#define _I386_BITOPS_H
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ */
+
+#include <linux/config.h>
+#include <linux/compiler.h>
+
+/*
+ * These have to be done with inline assembly: that way the bit-setting
+ * is guaranteed to be atomic. All bit operations return 0 if the bit
+ * was cleared before the operation and != 0 if it was not.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+
+#ifdef CONFIG_SMP
+#define LOCK_PREFIX "lock ; "
+#else
+#define LOCK_PREFIX ""
+#endif
+
+#define ADDR (*(volatile long *) addr)
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered.  See __set_bit()
+ * if you do not require the atomic guarantees.
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writting portable code,
+ * make sure not to rely on its reordering guarantees.
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void set_bit(int nr, volatile unsigned long * addr)
+{
+	__asm__ __volatile__( LOCK_PREFIX
+		"btsl %1,%0"
+		:"=m" (ADDR)
+		:"Ir" (nr));
+}
+
+/**
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static inline void __set_bit(int nr, volatile unsigned long * addr)
+{
+	__asm__(
+		"btsl %1,%0"
+		:"=m" (ADDR)
+		:"Ir" (nr));
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered.  However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * in order to ensure changes are visible on other processors.
+ */
+static inline void clear_bit(int nr, volatile unsigned long * addr)
+{
+	__asm__ __volatile__( LOCK_PREFIX
+		"btrl %1,%0"
+		:"=m" (ADDR)
+		:"Ir" (nr));
+}
+
+static inline void __clear_bit(int nr, volatile unsigned long * addr)
+{
+	__asm__ __volatile__(
+		"btrl %1,%0"
+		:"=m" (ADDR)
+		:"Ir" (nr));
+}
+#define smp_mb__before_clear_bit()	barrier()
+#define smp_mb__after_clear_bit()	barrier()
+
+/**
+ * __change_bit - Toggle a bit in memory
+ * @nr: the bit to change
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static inline void __change_bit(int nr, volatile unsigned long * addr)
+{
+	__asm__ __volatile__(
+		"btcl %1,%0"
+		:"=m" (ADDR)
+		:"Ir" (nr));
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * change_bit() is atomic and may not be reordered. It may be
+ * reordered on other architectures than x86.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void change_bit(int nr, volatile unsigned long * addr)
+{
+	__asm__ __volatile__( LOCK_PREFIX
+		"btcl %1,%0"
+		:"=m" (ADDR)
+		:"Ir" (nr));
+}
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.  
+ * It may be reordered on other architectures than x86.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_set_bit(int nr, volatile unsigned long * addr)
+{
+	int oldbit;
+
+	__asm__ __volatile__( LOCK_PREFIX
+		"btsl %2,%1\n\tsbbl %0,%0"
+		:"=r" (oldbit),"=m" (ADDR)
+		:"Ir" (nr) : "memory");
+	return oldbit;
+}
+
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.  
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail.  You must protect multiple accesses with a lock.
+ */
+static inline int __test_and_set_bit(int nr, volatile unsigned long * addr)
+{
+	int oldbit;
+
+	__asm__(
+		"btsl %2,%1\n\tsbbl %0,%0"
+		:"=r" (oldbit),"=m" (ADDR)
+		:"Ir" (nr));
+	return oldbit;
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It can be reorderdered on other architectures other than x86.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_clear_bit(int nr, volatile unsigned long * addr)
+{
+	int oldbit;
+
+	__asm__ __volatile__( LOCK_PREFIX
+		"btrl %2,%1\n\tsbbl %0,%0"
+		:"=r" (oldbit),"=m" (ADDR)
+		:"Ir" (nr) : "memory");
+	return oldbit;
+}
+
+/**
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.  
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail.  You must protect multiple accesses with a lock.
+ */
+static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+	int oldbit;
+
+	__asm__(
+		"btrl %2,%1\n\tsbbl %0,%0"
+		:"=r" (oldbit),"=m" (ADDR)
+		:"Ir" (nr));
+	return oldbit;
+}
+
+/* WARNING: non atomic and it can be reordered! */
+static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
+{
+	int oldbit;
+
+	__asm__ __volatile__(
+		"btcl %2,%1\n\tsbbl %0,%0"
+		:"=r" (oldbit),"=m" (ADDR)
+		:"Ir" (nr) : "memory");
+	return oldbit;
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.  
+ * It also implies a memory barrier.
+ */
+static inline int test_and_change_bit(int nr, volatile unsigned long* addr)
+{
+	int oldbit;
+
+	__asm__ __volatile__( LOCK_PREFIX
+		"btcl %2,%1\n\tsbbl %0,%0"
+		:"=r" (oldbit),"=m" (ADDR)
+		:"Ir" (nr) : "memory");
+	return oldbit;
+}
+
+#if 0 /* Fool kernel-doc since it doesn't do macros yet */
+/**
+ * test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static int test_bit(int nr, const volatile void * addr);
+#endif
+
+static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
+{
+	return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
+}
+
+static inline int variable_test_bit(int nr, const volatile unsigned long * addr)
+{
+	int oldbit;
+
+	__asm__ __volatile__(
+		"btl %2,%1\n\tsbbl %0,%0"
+		:"=r" (oldbit)
+		:"m" (ADDR),"Ir" (nr));
+	return oldbit;
+}
+
+#define test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ constant_test_bit((nr),(addr)) : \
+ variable_test_bit((nr),(addr)))
+
+#undef ADDR
+
+/**
+ * find_first_zero_bit - find the first zero bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit-number of the first zero bit, not the number of the byte
+ * containing a bit.
+ */
+static inline int find_first_zero_bit(const unsigned long *addr, unsigned size)
+{
+	int d0, d1, d2;
+	int res;
+
+	if (!size)
+		return 0;
+	/* This looks at memory. Mark it volatile to tell gcc not to move it around */
+	__asm__ __volatile__(
+		"movl $-1,%%eax\n\t"
+		"xorl %%edx,%%edx\n\t"
+		"repe; scasl\n\t"
+		"je 1f\n\t"
+		"xorl -4(%%edi),%%eax\n\t"
+		"subl $4,%%edi\n\t"
+		"bsfl %%eax,%%edx\n"
+		"1:\tsubl %%ebx,%%edi\n\t"
+		"shll $3,%%edi\n\t"
+		"addl %%edi,%%edx"
+		:"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
+		:"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory");
+	return res;
+}
+
+/**
+ * find_next_zero_bit - find the first zero bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+int find_next_zero_bit(const unsigned long *addr, int size, int offset);
+
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit-number of the first set bit, not the number of the byte
+ * containing a bit.
+ */
+static inline int find_first_bit(const unsigned long *addr, unsigned size)
+{
+	int d0, d1;
+	int res;
+
+	/* This looks at memory. Mark it volatile to tell gcc not to move it around */
+	__asm__ __volatile__(
+		"xorl %%eax,%%eax\n\t"
+		"repe; scasl\n\t"
+		"jz 1f\n\t"
+		"leal -4(%%edi),%%edi\n\t"
+		"bsfl (%%edi),%%eax\n"
+		"1:\tsubl %%ebx,%%edi\n\t"
+		"shll $3,%%edi\n\t"
+		"addl %%edi,%%eax"
+		:"=a" (res), "=&c" (d0), "=&D" (d1)
+		:"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory");
+	return res;
+}
+
+/**
+ * find_next_bit - find the first set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+int find_next_bit(const unsigned long *addr, int size, int offset);
+
+/**
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+static inline unsigned long ffz(unsigned long word)
+{
+	__asm__("bsfl %1,%0"
+		:"=r" (word)
+		:"r" (~word));
+	return word;
+}
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __ffs(unsigned long word)
+{
+	__asm__("bsfl %1,%0"
+		:"=r" (word)
+		:"rm" (word));
+	return word;
+}
+
+/*
+ * fls: find last bit set.
+ */
+
+#define fls(x) generic_fls(x)
+
+#ifdef __KERNEL__
+
+/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 140-bit bitmap where the first 100 bits are
+ * unlikely to be set. It's guaranteed that at least one of the 140
+ * bits is cleared.
+ */
+static inline int sched_find_first_bit(const unsigned long *b)
+{
+	if (unlikely(b[0]))
+		return __ffs(b[0]);
+	if (unlikely(b[1]))
+		return __ffs(b[1]) + 32;
+	if (unlikely(b[2]))
+		return __ffs(b[2]) + 64;
+	if (b[3])
+		return __ffs(b[3]) + 96;
+	return __ffs(b[4]) + 128;
+}
+
+/**
+ * ffs - find first bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+static inline int ffs(int x)
+{
+	int r;
+
+	__asm__("bsfl %1,%0\n\t"
+		"jnz 1f\n\t"
+		"movl $-1,%0\n"
+		"1:" : "=r" (r) : "rm" (x));
+	return r+1;
+}
+
+/**
+ * hweightN - returns the hamming weight of a N-bit word
+ * @x: the word to weigh
+ *
+ * The Hamming Weight of a number is the total number of bits set in it.
+ */
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+#endif /* __KERNEL__ */
+
+#ifdef __KERNEL__
+
+#define ext2_set_bit(nr,addr) \
+	__test_and_set_bit((nr),(unsigned long*)addr)
+#define ext2_set_bit_atomic(lock,nr,addr) \
+        test_and_set_bit((nr),(unsigned long*)addr)
+#define ext2_clear_bit(nr, addr) \
+	__test_and_clear_bit((nr),(unsigned long*)addr)
+#define ext2_clear_bit_atomic(lock,nr, addr) \
+	        test_and_clear_bit((nr),(unsigned long*)addr)
+#define ext2_test_bit(nr, addr)      test_bit((nr),(unsigned long*)addr)
+#define ext2_find_first_zero_bit(addr, size) \
+	find_first_zero_bit((unsigned long*)addr, size)
+#define ext2_find_next_zero_bit(addr, size, off) \
+	find_next_zero_bit((unsigned long*)addr, size, off)
+
+/* Bitmap functions for the minix filesystem.  */
+#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr)
+#define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr)
+#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr)
+#define minix_test_bit(nr,addr) test_bit(nr,(void*)addr)
+#define minix_find_first_zero_bit(addr,size) \
+	find_first_zero_bit((void*)addr,size)
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_BITOPS_H */
diff --git a/include/asm-i386/boot.h b/include/asm-i386/boot.h
new file mode 100644
index 0000000..96b228e
--- /dev/null
+++ b/include/asm-i386/boot.h
@@ -0,0 +1,15 @@
+#ifndef _LINUX_BOOT_H
+#define _LINUX_BOOT_H
+
+/* Don't touch these, unless you really know what you're doing. */
+#define DEF_INITSEG	0x9000
+#define DEF_SYSSEG	0x1000
+#define DEF_SETUPSEG	0x9020
+#define DEF_SYSSIZE	0x7F00
+
+/* Internal svga startup constants */
+#define NORMAL_VGA	0xffff		/* 80x25 mode */
+#define EXTENDED_VGA	0xfffe		/* 80x50 mode */
+#define ASK_VGA		0xfffd		/* ask for it at bootup */
+
+#endif
diff --git a/include/asm-i386/bug.h b/include/asm-i386/bug.h
new file mode 100644
index 0000000..706eb51
--- /dev/null
+++ b/include/asm-i386/bug.h
@@ -0,0 +1,25 @@
+#ifndef _I386_BUG_H
+#define _I386_BUG_H
+
+#include <linux/config.h>
+
+/*
+ * Tell the user there is some problem.
+ * The offending file and line are encoded after the "officially
+ * undefined" opcode for parsing in the trap handler.
+ */
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define BUG()				\
+ __asm__ __volatile__(	"ud2\n"		\
+			"\t.word %c0\n"	\
+			"\t.long %c1\n"	\
+			 : : "i" (__LINE__), "i" (__FILE__))
+#else
+#define BUG() __asm__ __volatile__("ud2\n")
+#endif
+
+#define HAVE_ARCH_BUG
+#include <asm-generic/bug.h>
+
+#endif
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h
new file mode 100644
index 0000000..6789fc2
--- /dev/null
+++ b/include/asm-i386/bugs.h
@@ -0,0 +1,213 @@
+/*
+ *  include/asm-i386/bugs.h
+ *
+ *  Copyright (C) 1994  Linus Torvalds
+ *
+ *  Cyrix stuff, June 1998 by:
+ *	- Rafael R. Reilova (moved everything from head.S),
+ *        <rreilova@ececs.uc.edu>
+ *	- Channing Corn (tests & fixes),
+ *	- Andrew D. Balsa (code cleanup).
+ *
+ *  Pentium III FXSR, SSE support
+ *	Gareth Hughes <gareth@valinux.com>, May 2000
+ */
+
+/*
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Needs:
+ *	void check_bugs(void);
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/msr.h>
+
+static int __init no_halt(char *s)
+{
+	boot_cpu_data.hlt_works_ok = 0;
+	return 1;
+}
+
+__setup("no-hlt", no_halt);
+
+static int __init mca_pentium(char *s)
+{
+	mca_pentium_flag = 1;
+	return 1;
+}
+
+__setup("mca-pentium", mca_pentium);
+
+static int __init no_387(char *s)
+{
+	boot_cpu_data.hard_math = 0;
+	write_cr0(0xE | read_cr0());
+	return 1;
+}
+
+__setup("no387", no_387);
+
+static double __initdata x = 4195835.0;
+static double __initdata y = 3145727.0;
+
+/*
+ * This used to check for exceptions.. 
+ * However, it turns out that to support that,
+ * the XMM trap handlers basically had to
+ * be buggy. So let's have a correct XMM trap
+ * handler, and forget about printing out
+ * some status at boot.
+ *
+ * We should really only care about bugs here
+ * anyway. Not features.
+ */
+static void __init check_fpu(void)
+{
+	if (!boot_cpu_data.hard_math) {
+#ifndef CONFIG_MATH_EMULATION
+		printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
+		printk(KERN_EMERG "Giving up.\n");
+		for (;;) ;
+#endif
+		return;
+	}
+
+/* Enable FXSR and company _before_ testing for FP problems. */
+	/*
+	 * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
+	 */
+	if (offsetof(struct task_struct, thread.i387.fxsave) & 15) {
+		extern void __buggy_fxsr_alignment(void);
+		__buggy_fxsr_alignment();
+	}
+	if (cpu_has_fxsr) {
+		printk(KERN_INFO "Enabling fast FPU save and restore... ");
+		set_in_cr4(X86_CR4_OSFXSR);
+		printk("done.\n");
+	}
+	if (cpu_has_xmm) {
+		printk(KERN_INFO "Enabling unmasked SIMD FPU exception support... ");
+		set_in_cr4(X86_CR4_OSXMMEXCPT);
+		printk("done.\n");
+	}
+
+	/* Test for the divl bug.. */
+	__asm__("fninit\n\t"
+		"fldl %1\n\t"
+		"fdivl %2\n\t"
+		"fmull %2\n\t"
+		"fldl %1\n\t"
+		"fsubp %%st,%%st(1)\n\t"
+		"fistpl %0\n\t"
+		"fwait\n\t"
+		"fninit"
+		: "=m" (*&boot_cpu_data.fdiv_bug)
+		: "m" (*&x), "m" (*&y));
+	if (boot_cpu_data.fdiv_bug)
+		printk("Hmm, FPU with FDIV bug.\n");
+}
+
+static void __init check_hlt(void)
+{
+	printk(KERN_INFO "Checking 'hlt' instruction... ");
+	if (!boot_cpu_data.hlt_works_ok) {
+		printk("disabled\n");
+		return;
+	}
+	__asm__ __volatile__("hlt ; hlt ; hlt ; hlt");
+	printk("OK.\n");
+}
+
+/*
+ *	Most 386 processors have a bug where a POPAD can lock the 
+ *	machine even from user space.
+ */
+ 
+static void __init check_popad(void)
+{
+#ifndef CONFIG_X86_POPAD_OK
+	int res, inp = (int) &res;
+
+	printk(KERN_INFO "Checking for popad bug... ");
+	__asm__ __volatile__( 
+	  "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx "
+	  : "=&a" (res)
+	  : "d" (inp)
+	  : "ecx", "edi" );
+	/* If this fails, it means that any user program may lock the CPU hard. Too bad. */
+	if (res != 12345678) printk( "Buggy.\n" );
+		        else printk( "OK.\n" );
+#endif
+}
+
+/*
+ * Check whether we are able to run this kernel safely on SMP.
+ *
+ * - In order to run on a i386, we need to be compiled for i386
+ *   (for due to lack of "invlpg" and working WP on a i386)
+ * - In order to run on anything without a TSC, we need to be
+ *   compiled for a i486.
+ * - In order to support the local APIC on a buggy Pentium machine,
+ *   we need to be compiled with CONFIG_X86_GOOD_APIC disabled,
+ *   which happens implicitly if compiled for a Pentium or lower
+ *   (unless an advanced selection of CPU features is used) as an
+ *   otherwise config implies a properly working local APIC without
+ *   the need to do extra reads from the APIC.
+*/
+
+static void __init check_config(void)
+{
+/*
+ * We'd better not be a i386 if we're configured to use some
+ * i486+ only features! (WP works in supervisor mode and the
+ * new "invlpg" and "bswap" instructions)
+ */
+#if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_BSWAP)
+	if (boot_cpu_data.x86 == 3)
+		panic("Kernel requires i486+ for 'invlpg' and other features");
+#endif
+
+/*
+ * If we configured ourselves for a TSC, we'd better have one!
+ */
+#ifdef CONFIG_X86_TSC
+	if (!cpu_has_tsc)
+		panic("Kernel compiled for Pentium+, requires TSC feature!");
+#endif
+
+/*
+ * If we were told we had a good local APIC, check for buggy Pentia,
+ * i.e. all B steppings and the C2 stepping of P54C when using their
+ * integrated APIC (see 11AP erratum in "Pentium Processor
+ * Specification Update").
+ */
+#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_GOOD_APIC)
+	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL
+	    && cpu_has_apic
+	    && boot_cpu_data.x86 == 5
+	    && boot_cpu_data.x86_model == 2
+	    && (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11))
+		panic("Kernel compiled for PMMX+, assumes a local APIC without the read-before-write bug!");
+#endif
+}
+
+extern void alternative_instructions(void);
+
+static void __init check_bugs(void)
+{
+	identify_cpu(&boot_cpu_data);
+#ifndef CONFIG_SMP
+	printk("CPU: ");
+	print_cpu_info(&boot_cpu_data);
+#endif
+	check_config();
+	check_fpu();
+	check_hlt();
+	check_popad();
+	system_utsname.machine[1] = '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
+	alternative_instructions(); 
+}
diff --git a/include/asm-i386/byteorder.h b/include/asm-i386/byteorder.h
new file mode 100644
index 0000000..a0d73f4
--- /dev/null
+++ b/include/asm-i386/byteorder.h
@@ -0,0 +1,59 @@
+#ifndef _I386_BYTEORDER_H
+#define _I386_BYTEORDER_H
+
+#include <asm/types.h>
+#include <linux/compiler.h>
+
+#ifdef __GNUC__
+
+/* For avoiding bswap on i386 */
+#ifdef __KERNEL__
+#include <linux/config.h>
+#endif
+
+static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
+{
+#ifdef CONFIG_X86_BSWAP
+	__asm__("bswap %0" : "=r" (x) : "0" (x));
+#else
+	__asm__("xchgb %b0,%h0\n\t"	/* swap lower bytes	*/
+		"rorl $16,%0\n\t"	/* swap words		*/
+		"xchgb %b0,%h0"		/* swap higher bytes	*/
+		:"=q" (x)
+		: "0" (x));
+#endif
+	return x;
+}
+
+static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val)
+{ 
+	union { 
+		struct { __u32 a,b; } s;
+		__u64 u;
+	} v;
+	v.u = val;
+#ifdef CONFIG_X86_BSWAP
+	asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" 
+	    : "=r" (v.s.a), "=r" (v.s.b) 
+	    : "0" (v.s.a), "1" (v.s.b)); 
+#else
+   v.s.a = ___arch__swab32(v.s.a); 
+	v.s.b = ___arch__swab32(v.s.b); 
+	asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b));
+#endif
+	return v.u;	
+} 
+
+/* Do not define swab16.  Gcc is smart enough to recognize "C" version and
+   convert it into rotation or exhange.  */
+
+#define __arch__swab64(x) ___arch__swab64(x)
+#define __arch__swab32(x) ___arch__swab32(x)
+
+#define __BYTEORDER_HAS_U64__
+
+#endif /* __GNUC__ */
+
+#include <linux/byteorder/little_endian.h>
+
+#endif /* _I386_BYTEORDER_H */
diff --git a/include/asm-i386/cache.h b/include/asm-i386/cache.h
new file mode 100644
index 0000000..8497887
--- /dev/null
+++ b/include/asm-i386/cache.h
@@ -0,0 +1,15 @@
+/*
+ * include/asm-i386/cache.h
+ */
+#ifndef __ARCH_I386_CACHE_H
+#define __ARCH_I386_CACHE_H
+
+#include <linux/config.h>
+
+/* L1 cache line size */
+#define L1_CACHE_SHIFT	(CONFIG_X86_L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES	(1 << L1_CACHE_SHIFT)
+
+#define L1_CACHE_SHIFT_MAX 7	/* largest L1 which this arch supports */
+
+#endif
diff --git a/include/asm-i386/cacheflush.h b/include/asm-i386/cacheflush.h
new file mode 100644
index 0000000..2ea36de
--- /dev/null
+++ b/include/asm-i386/cacheflush.h
@@ -0,0 +1,34 @@
+#ifndef _I386_CACHEFLUSH_H
+#define _I386_CACHEFLUSH_H
+
+/* Keep includes the same across arches.  */
+#include <linux/mm.h>
+
+/* Caches aren't brain-dead on the intel. */
+#define flush_cache_all()			do { } while (0)
+#define flush_cache_mm(mm)			do { } while (0)
+#define flush_cache_range(vma, start, end)	do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
+#define flush_dcache_page(page)			do { } while (0)
+#define flush_dcache_mmap_lock(mapping)		do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
+#define flush_icache_range(start, end)		do { } while (0)
+#define flush_icache_page(vma,pg)		do { } while (0)
+#define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
+#define flush_cache_vmap(start, end)		do { } while (0)
+#define flush_cache_vunmap(start, end)		do { } while (0)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+	memcpy(dst, src, len)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+	memcpy(dst, src, len)
+
+void global_flush_tlb(void); 
+int change_page_attr(struct page *page, int numpages, pgprot_t prot);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+/* internal debugging function */
+void kernel_map_pages(struct page *page, int numpages, int enable);
+#endif
+
+#endif /* _I386_CACHEFLUSH_H */
diff --git a/include/asm-i386/checksum.h b/include/asm-i386/checksum.h
new file mode 100644
index 0000000..d76a5f0
--- /dev/null
+++ b/include/asm-i386/checksum.h
@@ -0,0 +1,192 @@
+#ifndef _I386_CHECKSUM_H
+#define _I386_CHECKSUM_H
+
+#include <linux/in6.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+asmlinkage unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums, and handles user-space pointer exceptions correctly, when needed.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+
+asmlinkage unsigned int csum_partial_copy_generic(const unsigned char *src, unsigned char *dst,
+						  int len, int sum, int *src_err_ptr, int *dst_err_ptr);
+
+/*
+ *	Note: when you get a NULL pointer exception here this means someone
+ *	passed in an incorrect kernel address to one of these functions.
+ *
+ *	If you use these functions directly please don't forget the
+ *	verify_area().
+ */
+static __inline__
+unsigned int csum_partial_copy_nocheck (const unsigned char *src, unsigned char *dst,
+					int len, int sum)
+{
+	return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
+}
+
+static __inline__
+unsigned int csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst,
+						int len, int sum, int *err_ptr)
+{
+	might_sleep();
+	return csum_partial_copy_generic((__force unsigned char *)src, dst,
+					len, sum, err_ptr, NULL);
+}
+
+/*
+ *	This is a version of ip_compute_csum() optimized for IP headers,
+ *	which always checksum on 4 octet boundaries.
+ *
+ *	By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
+ *	Arnt Gulbrandsen.
+ */
+static inline unsigned short ip_fast_csum(unsigned char * iph,
+					  unsigned int ihl)
+{
+	unsigned int sum;
+
+	__asm__ __volatile__(
+	    "movl (%1), %0	;\n"
+	    "subl $4, %2	;\n"
+	    "jbe 2f		;\n"
+	    "addl 4(%1), %0	;\n"
+	    "adcl 8(%1), %0	;\n"
+	    "adcl 12(%1), %0	;\n"
+"1:	    adcl 16(%1), %0	;\n"
+	    "lea 4(%1), %1	;\n"
+	    "decl %2		;\n"
+	    "jne 1b		;\n"
+	    "adcl $0, %0	;\n"
+	    "movl %0, %2	;\n"
+	    "shrl $16, %0	;\n"
+	    "addw %w2, %w0	;\n"
+	    "adcl $0, %0	;\n"
+	    "notl %0		;\n"
+"2:				;\n"
+	/* Since the input registers which are loaded with iph and ipl
+	   are modified, we must also specify them as outputs, or gcc
+	   will assume they contain their original values. */
+	: "=r" (sum), "=r" (iph), "=r" (ihl)
+	: "1" (iph), "2" (ihl)
+	: "memory");
+	return(sum);
+}
+
+/*
+ *	Fold a partial checksum
+ */
+
+static inline unsigned int csum_fold(unsigned int sum)
+{
+	__asm__(
+		"addl %1, %0		;\n"
+		"adcl $0xffff, %0	;\n"
+		: "=r" (sum)
+		: "r" (sum << 16), "0" (sum & 0xffff0000)
+	);
+	return (~sum) >> 16;
+}
+
+static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
+						   unsigned long daddr,
+						   unsigned short len,
+						   unsigned short proto,
+						   unsigned int sum)
+{
+    __asm__(
+	"addl %1, %0	;\n"
+	"adcl %2, %0	;\n"
+	"adcl %3, %0	;\n"
+	"adcl $0, %0	;\n"
+	: "=r" (sum)
+	: "g" (daddr), "g"(saddr), "g"((ntohs(len)<<16)+proto*256), "0"(sum));
+    return sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
+						   unsigned long daddr,
+						   unsigned short len,
+						   unsigned short proto,
+						   unsigned int sum)
+{
+	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+
+static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
+{
+    return csum_fold (csum_partial(buff, len, 0));
+}
+
+#define _HAVE_ARCH_IPV6_CSUM
+static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
+						     struct in6_addr *daddr,
+						     __u32 len,
+						     unsigned short proto,
+						     unsigned int sum)
+{
+	__asm__(
+		"addl 0(%1), %0		;\n"
+		"adcl 4(%1), %0		;\n"
+		"adcl 8(%1), %0		;\n"
+		"adcl 12(%1), %0	;\n"
+		"adcl 0(%2), %0		;\n"
+		"adcl 4(%2), %0		;\n"
+		"adcl 8(%2), %0		;\n"
+		"adcl 12(%2), %0	;\n"
+		"adcl %3, %0		;\n"
+		"adcl %4, %0		;\n"
+		"adcl $0, %0		;\n"
+		: "=&r" (sum)
+		: "r" (saddr), "r" (daddr),
+		  "r"(htonl(len)), "r"(htonl(proto)), "0"(sum));
+
+	return csum_fold(sum);
+}
+
+/*
+ *	Copy and checksum to user
+ */
+#define HAVE_CSUM_COPY_USER
+static __inline__ unsigned int csum_and_copy_to_user(const unsigned char *src,
+						     unsigned char __user *dst,
+						     int len, int sum, 
+						     int *err_ptr)
+{
+	might_sleep();
+	if (access_ok(VERIFY_WRITE, dst, len))
+		return csum_partial_copy_generic(src, (__force unsigned char *)dst, len, sum, NULL, err_ptr);
+
+	if (len)
+		*err_ptr = -EFAULT;
+
+	return -1; /* invalid checksum */
+}
+
+#endif
diff --git a/include/asm-i386/cpu.h b/include/asm-i386/cpu.h
new file mode 100644
index 0000000..002740b
--- /dev/null
+++ b/include/asm-i386/cpu.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_I386_CPU_H_
+#define _ASM_I386_CPU_H_
+
+#include <linux/device.h>
+#include <linux/cpu.h>
+#include <linux/topology.h>
+#include <linux/nodemask.h>
+
+#include <asm/node.h>
+
+struct i386_cpu {
+	struct cpu cpu;
+};
+extern int arch_register_cpu(int num);
+#ifdef CONFIG_HOTPLUG_CPU
+extern void arch_unregister_cpu(int);
+#endif
+
+#endif /* _ASM_I386_CPU_H_ */
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
new file mode 100644
index 0000000..e147cab
--- /dev/null
+++ b/include/asm-i386/cpufeature.h
@@ -0,0 +1,129 @@
+/*
+ * cpufeature.h
+ *
+ * Defines x86 CPU feature bits
+ */
+
+#ifndef __ASM_I386_CPUFEATURE_H
+#define __ASM_I386_CPUFEATURE_H
+
+#include <linux/bitops.h>
+
+#define NCAPINTS	7	/* N 32-bit words worth of info */
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
+#define X86_FEATURE_FPU		(0*32+ 0) /* Onboard FPU */
+#define X86_FEATURE_VME		(0*32+ 1) /* Virtual Mode Extensions */
+#define X86_FEATURE_DE		(0*32+ 2) /* Debugging Extensions */
+#define X86_FEATURE_PSE 	(0*32+ 3) /* Page Size Extensions */
+#define X86_FEATURE_TSC		(0*32+ 4) /* Time Stamp Counter */
+#define X86_FEATURE_MSR		(0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */
+#define X86_FEATURE_PAE		(0*32+ 6) /* Physical Address Extensions */
+#define X86_FEATURE_MCE		(0*32+ 7) /* Machine Check Architecture */
+#define X86_FEATURE_CX8		(0*32+ 8) /* CMPXCHG8 instruction */
+#define X86_FEATURE_APIC	(0*32+ 9) /* Onboard APIC */
+#define X86_FEATURE_SEP		(0*32+11) /* SYSENTER/SYSEXIT */
+#define X86_FEATURE_MTRR	(0*32+12) /* Memory Type Range Registers */
+#define X86_FEATURE_PGE		(0*32+13) /* Page Global Enable */
+#define X86_FEATURE_MCA		(0*32+14) /* Machine Check Architecture */
+#define X86_FEATURE_CMOV	(0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */
+#define X86_FEATURE_PAT		(0*32+16) /* Page Attribute Table */
+#define X86_FEATURE_PSE36	(0*32+17) /* 36-bit PSEs */
+#define X86_FEATURE_PN		(0*32+18) /* Processor serial number */
+#define X86_FEATURE_CLFLSH	(0*32+19) /* Supports the CLFLUSH instruction */
+#define X86_FEATURE_DTES	(0*32+21) /* Debug Trace Store */
+#define X86_FEATURE_ACPI	(0*32+22) /* ACPI via MSR */
+#define X86_FEATURE_MMX		(0*32+23) /* Multimedia Extensions */
+#define X86_FEATURE_FXSR	(0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
+				          /* of FPU context), and CR4.OSFXSR available */
+#define X86_FEATURE_XMM		(0*32+25) /* Streaming SIMD Extensions */
+#define X86_FEATURE_XMM2	(0*32+26) /* Streaming SIMD Extensions-2 */
+#define X86_FEATURE_SELFSNOOP	(0*32+27) /* CPU self snoop */
+#define X86_FEATURE_HT		(0*32+28) /* Hyper-Threading */
+#define X86_FEATURE_ACC		(0*32+29) /* Automatic clock control */
+#define X86_FEATURE_IA64	(0*32+30) /* IA-64 processor */
+
+/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
+/* Don't duplicate feature flags which are redundant with Intel! */
+#define X86_FEATURE_SYSCALL	(1*32+11) /* SYSCALL/SYSRET */
+#define X86_FEATURE_MP		(1*32+19) /* MP Capable. */
+#define X86_FEATURE_NX		(1*32+20) /* Execute Disable */
+#define X86_FEATURE_MMXEXT	(1*32+22) /* AMD MMX extensions */
+#define X86_FEATURE_LM		(1*32+29) /* Long Mode (x86-64) */
+#define X86_FEATURE_3DNOWEXT	(1*32+30) /* AMD 3DNow! extensions */
+#define X86_FEATURE_3DNOW	(1*32+31) /* 3DNow! */
+
+/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
+#define X86_FEATURE_RECOVERY	(2*32+ 0) /* CPU in recovery mode */
+#define X86_FEATURE_LONGRUN	(2*32+ 1) /* Longrun power control */
+#define X86_FEATURE_LRTI	(2*32+ 3) /* LongRun table interface */
+
+/* Other features, Linux-defined mapping, word 3 */
+/* This range is used for feature bits which conflict or are synthesized */
+#define X86_FEATURE_CXMMX	(3*32+ 0) /* Cyrix MMX extensions */
+#define X86_FEATURE_K6_MTRR	(3*32+ 1) /* AMD K6 nonstandard MTRRs */
+#define X86_FEATURE_CYRIX_ARR	(3*32+ 2) /* Cyrix ARRs (= MTRRs) */
+#define X86_FEATURE_CENTAUR_MCR	(3*32+ 3) /* Centaur MCRs (= MTRRs) */
+/* cpu types for specific tunings: */
+#define X86_FEATURE_K8		(3*32+ 4) /* Opteron, Athlon64 */
+#define X86_FEATURE_K7		(3*32+ 5) /* Athlon */
+#define X86_FEATURE_P3		(3*32+ 6) /* P3 */
+#define X86_FEATURE_P4		(3*32+ 7) /* P4 */
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
+#define X86_FEATURE_XMM3	(4*32+ 0) /* Streaming SIMD Extensions-3 */
+#define X86_FEATURE_MWAIT	(4*32+ 3) /* Monitor/Mwait support */
+#define X86_FEATURE_DSCPL	(4*32+ 4) /* CPL Qualified Debug Store */
+#define X86_FEATURE_EST		(4*32+ 7) /* Enhanced SpeedStep */
+#define X86_FEATURE_TM2		(4*32+ 8) /* Thermal Monitor 2 */
+#define X86_FEATURE_CID		(4*32+10) /* Context ID */
+#define X86_FEATURE_CX16        (4*32+13) /* CMPXCHG16B */
+#define X86_FEATURE_XTPR	(4*32+14) /* Send Task Priority Messages */
+
+/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
+#define X86_FEATURE_XSTORE	(5*32+ 2) /* on-CPU RNG present (xstore insn) */
+#define X86_FEATURE_XSTORE_EN	(5*32+ 3) /* on-CPU RNG enabled */
+#define X86_FEATURE_XCRYPT	(5*32+ 6) /* on-CPU crypto (xcrypt insn) */
+#define X86_FEATURE_XCRYPT_EN	(5*32+ 7) /* on-CPU crypto enabled */
+
+/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
+#define X86_FEATURE_LAHF_LM	(5*32+ 0) /* LAHF/SAHF in long mode */
+#define X86_FEATURE_CMP_LEGACY	(5*32+ 1) /* If yes HyperThreading not valid */
+
+#define cpu_has(c, bit)		test_bit(bit, (c)->x86_capability)
+#define boot_cpu_has(bit)	test_bit(bit, boot_cpu_data.x86_capability)
+
+#define cpu_has_fpu		boot_cpu_has(X86_FEATURE_FPU)
+#define cpu_has_vme		boot_cpu_has(X86_FEATURE_VME)
+#define cpu_has_de		boot_cpu_has(X86_FEATURE_DE)
+#define cpu_has_pse		boot_cpu_has(X86_FEATURE_PSE)
+#define cpu_has_tsc		boot_cpu_has(X86_FEATURE_TSC)
+#define cpu_has_pae		boot_cpu_has(X86_FEATURE_PAE)
+#define cpu_has_pge		boot_cpu_has(X86_FEATURE_PGE)
+#define cpu_has_apic		boot_cpu_has(X86_FEATURE_APIC)
+#define cpu_has_sep		boot_cpu_has(X86_FEATURE_SEP)
+#define cpu_has_mtrr		boot_cpu_has(X86_FEATURE_MTRR)
+#define cpu_has_mmx		boot_cpu_has(X86_FEATURE_MMX)
+#define cpu_has_fxsr		boot_cpu_has(X86_FEATURE_FXSR)
+#define cpu_has_xmm		boot_cpu_has(X86_FEATURE_XMM)
+#define cpu_has_xmm2		boot_cpu_has(X86_FEATURE_XMM2)
+#define cpu_has_xmm3		boot_cpu_has(X86_FEATURE_XMM3)
+#define cpu_has_ht		boot_cpu_has(X86_FEATURE_HT)
+#define cpu_has_mp		boot_cpu_has(X86_FEATURE_MP)
+#define cpu_has_nx		boot_cpu_has(X86_FEATURE_NX)
+#define cpu_has_k6_mtrr		boot_cpu_has(X86_FEATURE_K6_MTRR)
+#define cpu_has_cyrix_arr	boot_cpu_has(X86_FEATURE_CYRIX_ARR)
+#define cpu_has_centaur_mcr	boot_cpu_has(X86_FEATURE_CENTAUR_MCR)
+#define cpu_has_xstore		boot_cpu_has(X86_FEATURE_XSTORE)
+#define cpu_has_xstore_enabled	boot_cpu_has(X86_FEATURE_XSTORE_EN)
+#define cpu_has_xcrypt		boot_cpu_has(X86_FEATURE_XCRYPT)
+#define cpu_has_xcrypt_enabled	boot_cpu_has(X86_FEATURE_XCRYPT_EN)
+
+#endif /* __ASM_I386_CPUFEATURE_H */
+
+/* 
+ * Local Variables:
+ * mode:c
+ * comment-column:42
+ * End:
+ */
diff --git a/include/asm-i386/cputime.h b/include/asm-i386/cputime.h
new file mode 100644
index 0000000..398ed7c
--- /dev/null
+++ b/include/asm-i386/cputime.h
@@ -0,0 +1,6 @@
+#ifndef __I386_CPUTIME_H
+#define __I386_CPUTIME_H
+
+#include <asm-generic/cputime.h>
+
+#endif /* __I386_CPUTIME_H */
diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h
new file mode 100644
index 0000000..d973289
--- /dev/null
+++ b/include/asm-i386/current.h
@@ -0,0 +1,15 @@
+#ifndef _I386_CURRENT_H
+#define _I386_CURRENT_H
+
+#include <linux/thread_info.h>
+
+struct task_struct;
+
+static inline struct task_struct * get_current(void)
+{
+	return current_thread_info()->task;
+}
+ 
+#define current get_current()
+
+#endif /* !(_I386_CURRENT_H) */
diff --git a/include/asm-i386/debugreg.h b/include/asm-i386/debugreg.h
new file mode 100644
index 0000000..f0b2b06
--- /dev/null
+++ b/include/asm-i386/debugreg.h
@@ -0,0 +1,64 @@
+#ifndef _I386_DEBUGREG_H
+#define _I386_DEBUGREG_H
+
+
+/* Indicate the register numbers for a number of the specific
+   debug registers.  Registers 0-3 contain the addresses we wish to trap on */
+#define DR_FIRSTADDR 0        /* u_debugreg[DR_FIRSTADDR] */
+#define DR_LASTADDR 3         /* u_debugreg[DR_LASTADDR]  */
+
+#define DR_STATUS 6           /* u_debugreg[DR_STATUS]     */
+#define DR_CONTROL 7          /* u_debugreg[DR_CONTROL] */
+
+/* Define a few things for the status register.  We can use this to determine
+   which debugging register was responsible for the trap.  The other bits
+   are either reserved or not of interest to us. */
+
+#define DR_TRAP0	(0x1)		/* db0 */
+#define DR_TRAP1	(0x2)		/* db1 */
+#define DR_TRAP2	(0x4)		/* db2 */
+#define DR_TRAP3	(0x8)		/* db3 */
+
+#define DR_STEP		(0x4000)	/* single-step */
+#define DR_SWITCH	(0x8000)	/* task switch */
+
+/* Now define a bunch of things for manipulating the control register.
+   The top two bytes of the control register consist of 4 fields of 4
+   bits - each field corresponds to one of the four debug registers,
+   and indicates what types of access we trap on, and how large the data
+   field is that we are looking at */
+
+#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
+#define DR_CONTROL_SIZE 4   /* 4 control bits per register */
+
+#define DR_RW_EXECUTE (0x0)   /* Settings for the access types to trap on */
+#define DR_RW_WRITE (0x1)
+#define DR_RW_READ (0x3)
+
+#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
+#define DR_LEN_2 (0x4)
+#define DR_LEN_4 (0xC)
+
+/* The low byte to the control register determine which registers are
+   enabled.  There are 4 fields of two bits.  One bit is "local", meaning
+   that the processor will reset the bit after a task switch and the other
+   is global meaning that we have to explicitly reset the bit.  With linux,
+   you can use either one, since we explicitly zero the register when we enter
+   kernel mode. */
+
+#define DR_LOCAL_ENABLE_SHIFT 0    /* Extra shift to the local enable bit */
+#define DR_GLOBAL_ENABLE_SHIFT 1   /* Extra shift to the global enable bit */
+#define DR_ENABLE_SIZE 2           /* 2 enable bits per register */
+
+#define DR_LOCAL_ENABLE_MASK (0x55)  /* Set  local bits for all 4 regs */
+#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
+
+/* The second byte to the control register has a few special things.
+   We can slow the instruction pipeline for instructions coming via the
+   gdt or the ldt if we want to.  I am not sure why this is an advantage */
+
+#define DR_CONTROL_RESERVED (0xFC00) /* Reserved by Intel */
+#define DR_LOCAL_SLOWDOWN (0x100)   /* Local slow the pipeline */
+#define DR_GLOBAL_SLOWDOWN (0x200)  /* Global slow the pipeline */
+
+#endif
diff --git a/include/asm-i386/delay.h b/include/asm-i386/delay.h
new file mode 100644
index 0000000..456db85
--- /dev/null
+++ b/include/asm-i386/delay.h
@@ -0,0 +1,26 @@
+#ifndef _I386_DELAY_H
+#define _I386_DELAY_H
+
+/*
+ * Copyright (C) 1993 Linus Torvalds
+ *
+ * Delay routines calling functions in arch/i386/lib/delay.c
+ */
+ 
+extern void __bad_udelay(void);
+extern void __bad_ndelay(void);
+
+extern void __udelay(unsigned long usecs);
+extern void __ndelay(unsigned long nsecs);
+extern void __const_udelay(unsigned long usecs);
+extern void __delay(unsigned long loops);
+
+#define udelay(n) (__builtin_constant_p(n) ? \
+	((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
+	__udelay(n))
+	
+#define ndelay(n) (__builtin_constant_p(n) ? \
+	((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
+	__ndelay(n))
+
+#endif /* defined(_I386_DELAY_H) */
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
new file mode 100644
index 0000000..11e6781
--- /dev/null
+++ b/include/asm-i386/desc.h
@@ -0,0 +1,144 @@
+#ifndef __ARCH_DESC_H
+#define __ARCH_DESC_H
+
+#include <asm/ldt.h>
+#include <asm/segment.h>
+
+#define CPU_16BIT_STACK_SIZE 1024
+
+#ifndef __ASSEMBLY__
+
+#include <linux/preempt.h>
+#include <linux/smp.h>
+#include <linux/percpu.h>
+
+#include <asm/mmu.h>
+
+extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
+DECLARE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
+
+DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
+
+struct Xgt_desc_struct {
+	unsigned short size;
+	unsigned long address __attribute__((packed));
+	unsigned short pad;
+} __attribute__ ((packed));
+
+extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS];
+
+#define load_TR_desc() __asm__ __volatile__("ltr %%ax"::"a" (GDT_ENTRY_TSS*8))
+#define load_LDT_desc() __asm__ __volatile__("lldt %%ax"::"a" (GDT_ENTRY_LDT*8))
+
+/*
+ * This is the ldt that every process will get unless we need
+ * something other than this.
+ */
+extern struct desc_struct default_ldt[];
+extern void set_intr_gate(unsigned int irq, void * addr);
+
+#define _set_tssldt_desc(n,addr,limit,type) \
+__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
+	"movw %%ax,2(%2)\n\t" \
+	"rorl $16,%%eax\n\t" \
+	"movb %%al,4(%2)\n\t" \
+	"movb %4,5(%2)\n\t" \
+	"movb $0,6(%2)\n\t" \
+	"movb %%ah,7(%2)\n\t" \
+	"rorl $16,%%eax" \
+	: "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
+
+static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
+{
+	_set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[entry], (int)addr,
+		offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
+}
+
+#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
+
+static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
+{
+	_set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
+}
+
+#define LDT_entry_a(info) \
+	((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
+
+#define LDT_entry_b(info) \
+	(((info)->base_addr & 0xff000000) | \
+	(((info)->base_addr & 0x00ff0000) >> 16) | \
+	((info)->limit & 0xf0000) | \
+	(((info)->read_exec_only ^ 1) << 9) | \
+	((info)->contents << 10) | \
+	(((info)->seg_not_present ^ 1) << 15) | \
+	((info)->seg_32bit << 22) | \
+	((info)->limit_in_pages << 23) | \
+	((info)->useable << 20) | \
+	0x7000)
+
+#define LDT_empty(info) (\
+	(info)->base_addr	== 0	&& \
+	(info)->limit		== 0	&& \
+	(info)->contents	== 0	&& \
+	(info)->read_exec_only	== 1	&& \
+	(info)->seg_32bit	== 0	&& \
+	(info)->limit_in_pages	== 0	&& \
+	(info)->seg_not_present	== 1	&& \
+	(info)->useable		== 0	)
+
+#if TLS_SIZE != 24
+# error update this code.
+#endif
+
+static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
+{
+#define C(i) per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
+	C(0); C(1); C(2);
+#undef C
+}
+
+static inline void clear_LDT(void)
+{
+	int cpu = get_cpu();
+
+	set_ldt_desc(cpu, &default_ldt[0], 5);
+	load_LDT_desc();
+	put_cpu();
+}
+
+/*
+ * load one particular LDT into the current CPU
+ */
+static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
+{
+	void *segments = pc->ldt;
+	int count = pc->size;
+
+	if (likely(!count)) {
+		segments = &default_ldt[0];
+		count = 5;
+	}
+		
+	set_ldt_desc(cpu, segments, count);
+	load_LDT_desc();
+}
+
+static inline void load_LDT(mm_context_t *pc)
+{
+	int cpu = get_cpu();
+	load_LDT_nolock(pc, cpu);
+	put_cpu();
+}
+
+static inline unsigned long get_desc_base(unsigned long *desc)
+{
+	unsigned long base;
+	base = ((desc[0] >> 16)  & 0x0000ffff) |
+		((desc[1] << 16) & 0x00ff0000) |
+		(desc[1] & 0xff000000);
+	return base;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/include/asm-i386/div64.h b/include/asm-i386/div64.h
new file mode 100644
index 0000000..28ed8b2
--- /dev/null
+++ b/include/asm-i386/div64.h
@@ -0,0 +1,48 @@
+#ifndef __I386_DIV64
+#define __I386_DIV64
+
+/*
+ * do_div() is NOT a C function. It wants to return
+ * two values (the quotient and the remainder), but
+ * since that doesn't work very well in C, what it
+ * does is:
+ *
+ * - modifies the 64-bit dividend _in_place_
+ * - returns the 32-bit remainder
+ *
+ * This ends up being the most efficient "calling
+ * convention" on x86.
+ */
+#define do_div(n,base) ({ \
+	unsigned long __upper, __low, __high, __mod, __base; \
+	__base = (base); \
+	asm("":"=a" (__low), "=d" (__high):"A" (n)); \
+	__upper = __high; \
+	if (__high) { \
+		__upper = __high % (__base); \
+		__high = __high / (__base); \
+	} \
+	asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \
+	asm("":"=A" (n):"a" (__low),"d" (__high)); \
+	__mod; \
+})
+
+/*
+ * (long)X = ((long long)divs) / (long)div
+ * (long)rem = ((long long)divs) % (long)div
+ *
+ * Warning, this will do an exception if X overflows.
+ */
+#define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c)
+
+extern inline long
+div_ll_X_l_rem(long long divs, long div, long *rem)
+{
+	long dum2;
+      __asm__("divl %2":"=a"(dum2), "=d"(*rem)
+      :	"rm"(div), "A"(divs));
+
+	return dum2;
+
+}
+#endif
diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h
new file mode 100644
index 0000000..563964b
--- /dev/null
+++ b/include/asm-i386/dma-mapping.h
@@ -0,0 +1,177 @@
+#ifndef _ASM_I386_DMA_MAPPING_H
+#define _ASM_I386_DMA_MAPPING_H
+
+#include <linux/mm.h>
+
+#include <asm/cache.h>
+#include <asm/io.h>
+#include <asm/scatterlist.h>
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+			   dma_addr_t *dma_handle, unsigned int __nocast flag);
+
+void dma_free_coherent(struct device *dev, size_t size,
+			 void *vaddr, dma_addr_t dma_handle);
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *ptr, size_t size,
+	       enum dma_data_direction direction)
+{
+	BUG_ON(direction == DMA_NONE);
+	flush_write_buffers();
+	return virt_to_phys(ptr);
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+		 enum dma_data_direction direction)
+{
+	BUG_ON(direction == DMA_NONE);
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+	   enum dma_data_direction direction)
+{
+	int i;
+
+	BUG_ON(direction == DMA_NONE);
+
+	for (i = 0; i < nents; i++ ) {
+		BUG_ON(!sg[i].page);
+
+		sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
+	}
+
+	flush_write_buffers();
+	return nents;
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+	     size_t size, enum dma_data_direction direction)
+{
+	BUG_ON(direction == DMA_NONE);
+	return page_to_phys(page) + offset;
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+	       enum dma_data_direction direction)
+{
+	BUG_ON(direction == DMA_NONE);
+}
+
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+	     enum dma_data_direction direction)
+{
+	BUG_ON(direction == DMA_NONE);
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+			enum dma_data_direction direction)
+{
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
+			enum dma_data_direction direction)
+{
+	flush_write_buffers();
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+			      unsigned long offset, size_t size,
+			      enum dma_data_direction direction)
+{
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+				 unsigned long offset, size_t size,
+				 enum dma_data_direction direction)
+{
+	flush_write_buffers();
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+		    enum dma_data_direction direction)
+{
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+		    enum dma_data_direction direction)
+{
+	flush_write_buffers();
+}
+
+static inline int
+dma_mapping_error(dma_addr_t dma_addr)
+{
+	return 0;
+}
+
+static inline int
+dma_supported(struct device *dev, u64 mask)
+{
+        /*
+         * we fall back to GFP_DMA when the mask isn't all 1s,
+         * so we can't guarantee allocations that must be
+         * within a tighter range than GFP_DMA..
+         */
+        if(mask < 0x00ffffff)
+                return 0;
+
+	return 1;
+}
+
+static inline int
+dma_set_mask(struct device *dev, u64 mask)
+{
+	if(!dev->dma_mask || !dma_supported(dev, mask))
+		return -EIO;
+
+	*dev->dma_mask = mask;
+
+	return 0;
+}
+
+static inline int
+dma_get_cache_alignment(void)
+{
+	/* no easy way to get cache size on all x86, so return the
+	 * maximum possible, to be safe */
+	return (1 << L1_CACHE_SHIFT_MAX);
+}
+
+#define dma_is_consistent(d)	(1)
+
+static inline void
+dma_cache_sync(void *vaddr, size_t size,
+	       enum dma_data_direction direction)
+{
+	flush_write_buffers();
+}
+
+#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+extern int
+dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+			    dma_addr_t device_addr, size_t size, int flags);
+
+extern void
+dma_release_declared_memory(struct device *dev);
+
+extern void *
+dma_mark_declared_memory_occupied(struct device *dev,
+				  dma_addr_t device_addr, size_t size);
+
+#endif
diff --git a/include/asm-i386/dma.h b/include/asm-i386/dma.h
new file mode 100644
index 0000000..f24b2bb
--- /dev/null
+++ b/include/asm-i386/dma.h
@@ -0,0 +1,298 @@
+/* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $
+ * linux/include/asm/dma.h: Defines for using and allocating dma channels.
+ * Written by Hennus Bergman, 1992.
+ * High DMA channel support & info by Hannu Savolainen
+ * and John Boyd, Nov. 1992.
+ */
+
+#ifndef _ASM_DMA_H
+#define _ASM_DMA_H
+
+#include <linux/config.h>
+#include <linux/spinlock.h>	/* And spinlocks */
+#include <asm/io.h>		/* need byte IO */
+#include <linux/delay.h>
+
+
+#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
+#define dma_outb	outb_p
+#else
+#define dma_outb	outb
+#endif
+
+#define dma_inb		inb
+
+/*
+ * NOTES about DMA transfers:
+ *
+ *  controller 1: channels 0-3, byte operations, ports 00-1F
+ *  controller 2: channels 4-7, word operations, ports C0-DF
+ *
+ *  - ALL registers are 8 bits only, regardless of transfer size
+ *  - channel 4 is not used - cascades 1 into 2.
+ *  - channels 0-3 are byte - addresses/counts are for physical bytes
+ *  - channels 5-7 are word - addresses/counts are for physical words
+ *  - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
+ *  - transfer count loaded to registers is 1 less than actual count
+ *  - controller 2 offsets are all even (2x offsets for controller 1)
+ *  - page registers for 5-7 don't use data bit 0, represent 128K pages
+ *  - page registers for 0-3 use bit 0, represent 64K pages
+ *
+ * DMA transfers are limited to the lower 16MB of _physical_ memory.  
+ * Note that addresses loaded into registers must be _physical_ addresses,
+ * not logical addresses (which may differ if paging is active).
+ *
+ *  Address mapping for channels 0-3:
+ *
+ *   A23 ... A16 A15 ... A8  A7 ... A0    (Physical addresses)
+ *    |  ...  |   |  ... |   |  ... |
+ *    |  ...  |   |  ... |   |  ... |
+ *    |  ...  |   |  ... |   |  ... |
+ *   P7  ...  P0  A7 ... A0  A7 ... A0   
+ * |    Page    | Addr MSB | Addr LSB |   (DMA registers)
+ *
+ *  Address mapping for channels 5-7:
+ *
+ *   A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0    (Physical addresses)
+ *    |  ...  |   \   \   ... \  \  \  ... \  \
+ *    |  ...  |    \   \   ... \  \  \  ... \  (not used)
+ *    |  ...  |     \   \   ... \  \  \  ... \
+ *   P7  ...  P1 (0) A7 A6  ... A0 A7 A6 ... A0   
+ * |      Page      |  Addr MSB   |  Addr LSB  |   (DMA registers)
+ *
+ * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
+ * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
+ * the hardware level, so odd-byte transfers aren't possible).
+ *
+ * Transfer count (_not # bytes_) is limited to 64K, represented as actual
+ * count - 1 : 64K => 0xFFFF, 1 => 0x0000.  Thus, count is always 1 or more,
+ * and up to 128K bytes may be transferred on channels 5-7 in one operation. 
+ *
+ */
+
+#define MAX_DMA_CHANNELS	8
+
+/* The maximum address that we can perform a DMA transfer to on this platform */
+#define MAX_DMA_ADDRESS      (PAGE_OFFSET+0x1000000)
+
+/* 8237 DMA controllers */
+#define IO_DMA1_BASE	0x00	/* 8 bit slave DMA, channels 0..3 */
+#define IO_DMA2_BASE	0xC0	/* 16 bit master DMA, ch 4(=slave input)..7 */
+
+/* DMA controller registers */
+#define DMA1_CMD_REG		0x08	/* command register (w) */
+#define DMA1_STAT_REG		0x08	/* status register (r) */
+#define DMA1_REQ_REG            0x09    /* request register (w) */
+#define DMA1_MASK_REG		0x0A	/* single-channel mask (w) */
+#define DMA1_MODE_REG		0x0B	/* mode register (w) */
+#define DMA1_CLEAR_FF_REG	0x0C	/* clear pointer flip-flop (w) */
+#define DMA1_TEMP_REG           0x0D    /* Temporary Register (r) */
+#define DMA1_RESET_REG		0x0D	/* Master Clear (w) */
+#define DMA1_CLR_MASK_REG       0x0E    /* Clear Mask */
+#define DMA1_MASK_ALL_REG       0x0F    /* all-channels mask (w) */
+
+#define DMA2_CMD_REG		0xD0	/* command register (w) */
+#define DMA2_STAT_REG		0xD0	/* status register (r) */
+#define DMA2_REQ_REG            0xD2    /* request register (w) */
+#define DMA2_MASK_REG		0xD4	/* single-channel mask (w) */
+#define DMA2_MODE_REG		0xD6	/* mode register (w) */
+#define DMA2_CLEAR_FF_REG	0xD8	/* clear pointer flip-flop (w) */
+#define DMA2_TEMP_REG           0xDA    /* Temporary Register (r) */
+#define DMA2_RESET_REG		0xDA	/* Master Clear (w) */
+#define DMA2_CLR_MASK_REG       0xDC    /* Clear Mask */
+#define DMA2_MASK_ALL_REG       0xDE    /* all-channels mask (w) */
+
+#define DMA_ADDR_0              0x00    /* DMA address registers */
+#define DMA_ADDR_1              0x02
+#define DMA_ADDR_2              0x04
+#define DMA_ADDR_3              0x06
+#define DMA_ADDR_4              0xC0
+#define DMA_ADDR_5              0xC4
+#define DMA_ADDR_6              0xC8
+#define DMA_ADDR_7              0xCC
+
+#define DMA_CNT_0               0x01    /* DMA count registers */
+#define DMA_CNT_1               0x03
+#define DMA_CNT_2               0x05
+#define DMA_CNT_3               0x07
+#define DMA_CNT_4               0xC2
+#define DMA_CNT_5               0xC6
+#define DMA_CNT_6               0xCA
+#define DMA_CNT_7               0xCE
+
+#define DMA_PAGE_0              0x87    /* DMA page registers */
+#define DMA_PAGE_1              0x83
+#define DMA_PAGE_2              0x81
+#define DMA_PAGE_3              0x82
+#define DMA_PAGE_5              0x8B
+#define DMA_PAGE_6              0x89
+#define DMA_PAGE_7              0x8A
+
+#define DMA_MODE_READ	0x44	/* I/O to memory, no autoinit, increment, single mode */
+#define DMA_MODE_WRITE	0x48	/* memory to I/O, no autoinit, increment, single mode */
+#define DMA_MODE_CASCADE 0xC0   /* pass thru DREQ->HRQ, DACK<-HLDA only */
+
+#define DMA_AUTOINIT	0x10
+
+
+extern spinlock_t  dma_spin_lock;
+
+static __inline__ unsigned long claim_dma_lock(void)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&dma_spin_lock, flags);
+	return flags;
+}
+
+static __inline__ void release_dma_lock(unsigned long flags)
+{
+	spin_unlock_irqrestore(&dma_spin_lock, flags);
+}
+
+/* enable/disable a specific DMA channel */
+static __inline__ void enable_dma(unsigned int dmanr)
+{
+	if (dmanr<=3)
+		dma_outb(dmanr,  DMA1_MASK_REG);
+	else
+		dma_outb(dmanr & 3,  DMA2_MASK_REG);
+}
+
+static __inline__ void disable_dma(unsigned int dmanr)
+{
+	if (dmanr<=3)
+		dma_outb(dmanr | 4,  DMA1_MASK_REG);
+	else
+		dma_outb((dmanr & 3) | 4,  DMA2_MASK_REG);
+}
+
+/* Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ * Use this once to initialize the FF to a known state.
+ * After that, keep track of it. :-)
+ * --- In order to do that, the DMA routines below should ---
+ * --- only be used while holding the DMA lock ! ---
+ */
+static __inline__ void clear_dma_ff(unsigned int dmanr)
+{
+	if (dmanr<=3)
+		dma_outb(0,  DMA1_CLEAR_FF_REG);
+	else
+		dma_outb(0,  DMA2_CLEAR_FF_REG);
+}
+
+/* set mode (above) for a specific DMA channel */
+static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+{
+	if (dmanr<=3)
+		dma_outb(mode | dmanr,  DMA1_MODE_REG);
+	else
+		dma_outb(mode | (dmanr&3),  DMA2_MODE_REG);
+}
+
+/* Set only the page register bits of the transfer address.
+ * This is used for successive transfers when we know the contents of
+ * the lower 16 bits of the DMA current address register, but a 64k boundary
+ * may have been crossed.
+ */
+static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
+{
+	switch(dmanr) {
+		case 0:
+			dma_outb(pagenr, DMA_PAGE_0);
+			break;
+		case 1:
+			dma_outb(pagenr, DMA_PAGE_1);
+			break;
+		case 2:
+			dma_outb(pagenr, DMA_PAGE_2);
+			break;
+		case 3:
+			dma_outb(pagenr, DMA_PAGE_3);
+			break;
+		case 5:
+			dma_outb(pagenr & 0xfe, DMA_PAGE_5);
+			break;
+		case 6:
+			dma_outb(pagenr & 0xfe, DMA_PAGE_6);
+			break;
+		case 7:
+			dma_outb(pagenr & 0xfe, DMA_PAGE_7);
+			break;
+	}
+}
+
+
+/* Set transfer address & page bits for specific DMA channel.
+ * Assumes dma flipflop is clear.
+ */
+static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
+{
+	set_dma_page(dmanr, a>>16);
+	if (dmanr <= 3)  {
+	    dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+            dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+	}  else  {
+	    dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
+	    dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
+	}
+}
+
+
+/* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for
+ * a specific DMA channel.
+ * You must ensure the parameters are valid.
+ * NOTE: from a manual: "the number of transfers is one more
+ * than the initial word count"! This is taken into account.
+ * Assumes dma flip-flop is clear.
+ * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
+ */
+static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+        count--;
+	if (dmanr <= 3)  {
+	    dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
+	    dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
+        } else {
+	    dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
+	    dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
+        }
+}
+
+
+/* Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * If called before the channel has been used, it may return 1.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ *
+ * Assumes DMA flip-flop is clear.
+ */
+static __inline__ int get_dma_residue(unsigned int dmanr)
+{
+	unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
+					 : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
+
+	/* using short to get 16-bit wrap around */
+	unsigned short count;
+
+	count = 1 + dma_inb(io_port);
+	count += dma_inb(io_port) << 8;
+	
+	return (dmanr<=3)? count : (count<<1);
+}
+
+
+/* These are in kernel/dma.c: */
+extern int request_dma(unsigned int dmanr, const char * device_id);	/* reserve a DMA channel */
+extern void free_dma(unsigned int dmanr);	/* release it again */
+
+/* From PCI */
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy 	(0)
+#endif
+
+#endif /* _ASM_DMA_H */
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h
new file mode 100644
index 0000000..5c285ae
--- /dev/null
+++ b/include/asm-i386/e820.h
@@ -0,0 +1,40 @@
+/*
+ * structures and definitions for the int 15, ax=e820 memory map
+ * scheme.
+ *
+ * In a nutshell, arch/i386/boot/setup.S populates a scratch table
+ * in the empty_zero_block that contains a list of usable address/size
+ * duples.   In arch/i386/kernel/setup.c, this information is
+ * transferred into the e820map, and in arch/i386/mm/init.c, that
+ * new information is used to mark pages reserved or not.
+ *
+ */
+#ifndef __E820_HEADER
+#define __E820_HEADER
+
+#define E820MAP	0x2d0		/* our map */
+#define E820MAX	32		/* number of entries in E820MAP */
+#define E820NR	0x1e8		/* # entries in E820MAP */
+
+#define E820_RAM	1
+#define E820_RESERVED	2
+#define E820_ACPI	3 /* usable as RAM once ACPI tables have been read */
+#define E820_NVS	4
+
+#define HIGH_MEMORY	(1024*1024)
+
+#ifndef __ASSEMBLY__
+
+struct e820map {
+    int nr_map;
+    struct e820entry {
+	unsigned long long addr;	/* start of memory segment */
+	unsigned long long size;	/* size of memory segment */
+	unsigned long type;		/* type of memory segment */
+    } map[E820MAX];
+};
+
+extern struct e820map e820;
+#endif/*!__ASSEMBLY__*/
+
+#endif/*__E820_HEADER*/
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
new file mode 100644
index 0000000..130bdc8
--- /dev/null
+++ b/include/asm-i386/elf.h
@@ -0,0 +1,193 @@
+#ifndef __ASMi386_ELF_H
+#define __ASMi386_ELF_H
+
+/*
+ * ELF register definitions..
+ */
+
+#include <asm/ptrace.h>
+#include <asm/user.h>
+#include <asm/processor.h>
+#include <asm/system.h>		/* for savesegment */
+
+#include <linux/utsname.h>
+
+#define R_386_NONE	0
+#define R_386_32	1
+#define R_386_PC32	2
+#define R_386_GOT32	3
+#define R_386_PLT32	4
+#define R_386_COPY	5
+#define R_386_GLOB_DAT	6
+#define R_386_JMP_SLOT	7
+#define R_386_RELATIVE	8
+#define R_386_GOTOFF	9
+#define R_386_GOTPC	10
+#define R_386_NUM	11
+
+typedef unsigned long elf_greg_t;
+
+#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct user_i387_struct elf_fpregset_t;
+typedef struct user_fxsr_struct elf_fpxregset_t;
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) \
+	(((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS	ELFCLASS32
+#define ELF_DATA	ELFDATA2LSB
+#define ELF_ARCH	EM_386
+
+/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
+   contains a pointer to a function which might be registered using `atexit'.
+   This provides a mean for the dynamic linker to call DT_FINI functions for
+   shared libraries that have been loaded before the code runs.
+
+   A value of 0 tells we have no such handler. 
+
+   We might as well make sure everything else is cleared too (except for %esp),
+   just to make things more deterministic.
+ */
+#define ELF_PLAT_INIT(_r, load_addr)	do { \
+	_r->ebx = 0; _r->ecx = 0; _r->edx = 0; \
+	_r->esi = 0; _r->edi = 0; _r->ebp = 0; \
+	_r->eax = 0; \
+} while (0)
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE	4096
+
+/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+   use of this is to invoke "./ld.so someprog" to test out a new version of
+   the loader.  We need to make sure that it is out of the way of the program
+   that it will "exec", and that there is sufficient room for the brk.  */
+
+#define ELF_ET_DYN_BASE         (TASK_SIZE / 3 * 2)
+
+/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
+   now struct_user_regs, they are different) */
+
+#define ELF_CORE_COPY_REGS(pr_reg, regs)		\
+	pr_reg[0] = regs->ebx;				\
+	pr_reg[1] = regs->ecx;				\
+	pr_reg[2] = regs->edx;				\
+	pr_reg[3] = regs->esi;				\
+	pr_reg[4] = regs->edi;				\
+	pr_reg[5] = regs->ebp;				\
+	pr_reg[6] = regs->eax;				\
+	pr_reg[7] = regs->xds;				\
+	pr_reg[8] = regs->xes;				\
+	savesegment(fs,pr_reg[9]);			\
+	savesegment(gs,pr_reg[10]);			\
+	pr_reg[11] = regs->orig_eax;			\
+	pr_reg[12] = regs->eip;				\
+	pr_reg[13] = regs->xcs;				\
+	pr_reg[14] = regs->eflags;			\
+	pr_reg[15] = regs->esp;				\
+	pr_reg[16] = regs->xss;
+
+/* This yields a mask that user programs can use to figure out what
+   instruction set this CPU supports.  This could be done in user space,
+   but it's not easy, and we've already done it here.  */
+
+#define ELF_HWCAP	(boot_cpu_data.x86_capability[0])
+
+/* This yields a string that ld.so will use to load implementation
+   specific libraries for optimization.  This is more specific in
+   intent than poking at uname or /proc/cpuinfo.
+
+   For the moment, we have only optimizations for the Intel generations,
+   but that could change... */
+
+#define ELF_PLATFORM  (system_utsname.machine)
+
+/*
+ * Architecture-neutral AT_ values in 0-17, leave some room
+ * for more of them, start the x86-specific ones at 32.
+ */
+#define AT_SYSINFO		32
+#define AT_SYSINFO_EHDR		33
+
+#ifdef __KERNEL__
+#define SET_PERSONALITY(ex, ibcs2) do { } while (0)
+
+/*
+ * An executable for which elf_read_implies_exec() returns TRUE will
+ * have the READ_IMPLIES_EXEC personality flag set automatically.
+ */
+#define elf_read_implies_exec(ex, executable_stack)	(executable_stack != EXSTACK_DISABLE_X)
+
+extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
+extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
+extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct *);
+
+#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
+#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
+#define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs)
+
+#define VSYSCALL_BASE	(__fix_to_virt(FIX_VSYSCALL))
+#define VSYSCALL_EHDR	((const struct elfhdr *) VSYSCALL_BASE)
+#define VSYSCALL_ENTRY	((unsigned long) &__kernel_vsyscall)
+extern void __kernel_vsyscall;
+
+#define ARCH_DLINFO						\
+do {								\
+		NEW_AUX_ENT(AT_SYSINFO,	VSYSCALL_ENTRY);	\
+		NEW_AUX_ENT(AT_SYSINFO_EHDR, VSYSCALL_BASE);	\
+} while (0)
+
+/*
+ * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out
+ * extra segments containing the vsyscall DSO contents.  Dumping its
+ * contents makes post-mortem fully interpretable later without matching up
+ * the same kernel and hardware config to see what PC values meant.
+ * Dumping its extra ELF program headers includes all the other information
+ * a debugger needs to easily find how the vsyscall DSO was being used.
+ */
+#define ELF_CORE_EXTRA_PHDRS		(VSYSCALL_EHDR->e_phnum)
+#define ELF_CORE_WRITE_EXTRA_PHDRS					      \
+do {									      \
+	const struct elf_phdr *const vsyscall_phdrs =			      \
+		(const struct elf_phdr *) (VSYSCALL_BASE		      \
+					   + VSYSCALL_EHDR->e_phoff);	      \
+	int i;								      \
+	Elf32_Off ofs = 0;						      \
+	for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) {			      \
+		struct elf_phdr phdr = vsyscall_phdrs[i];		      \
+		if (phdr.p_type == PT_LOAD) {				      \
+			BUG_ON(ofs != 0);				      \
+			ofs = phdr.p_offset = offset;			      \
+			phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz);	      \
+			phdr.p_filesz = phdr.p_memsz;			      \
+			offset += phdr.p_filesz;			      \
+		}							      \
+		else							      \
+			phdr.p_offset += ofs;				      \
+		phdr.p_paddr = 0; /* match other core phdrs */		      \
+		DUMP_WRITE(&phdr, sizeof(phdr));			      \
+	}								      \
+} while (0)
+#define ELF_CORE_WRITE_EXTRA_DATA					      \
+do {									      \
+	const struct elf_phdr *const vsyscall_phdrs =			      \
+		(const struct elf_phdr *) (VSYSCALL_BASE		      \
+					   + VSYSCALL_EHDR->e_phoff);	      \
+	int i;								      \
+	for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) {			      \
+		if (vsyscall_phdrs[i].p_type == PT_LOAD)		      \
+			DUMP_WRITE((void *) vsyscall_phdrs[i].p_vaddr,	      \
+				   PAGE_ALIGN(vsyscall_phdrs[i].p_memsz));    \
+	}								      \
+} while (0)
+
+#endif
+
+#endif
diff --git a/include/asm-i386/errno.h b/include/asm-i386/errno.h
new file mode 100644
index 0000000..969b343
--- /dev/null
+++ b/include/asm-i386/errno.h
@@ -0,0 +1,6 @@
+#ifndef _I386_ERRNO_H
+#define _I386_ERRNO_H
+
+#include <asm-generic/errno.h>
+
+#endif
diff --git a/include/asm-i386/fcntl.h b/include/asm-i386/fcntl.h
new file mode 100644
index 0000000..511cde9
--- /dev/null
+++ b/include/asm-i386/fcntl.h
@@ -0,0 +1,88 @@
+#ifndef _I386_FCNTL_H
+#define _I386_FCNTL_H
+
+/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
+   located on an ext2 file system */
+#define O_ACCMODE	   0003
+#define O_RDONLY	     00
+#define O_WRONLY	     01
+#define O_RDWR		     02
+#define O_CREAT		   0100	/* not fcntl */
+#define O_EXCL		   0200	/* not fcntl */
+#define O_NOCTTY	   0400	/* not fcntl */
+#define O_TRUNC		  01000	/* not fcntl */
+#define O_APPEND	  02000
+#define O_NONBLOCK	  04000
+#define O_NDELAY	O_NONBLOCK
+#define O_SYNC		 010000
+#define FASYNC		 020000	/* fcntl, for BSD compatibility */
+#define O_DIRECT	 040000	/* direct disk access hint */
+#define O_LARGEFILE	0100000
+#define O_DIRECTORY	0200000	/* must be a directory */
+#define O_NOFOLLOW	0400000 /* don't follow links */
+#define O_NOATIME	01000000
+
+#define F_DUPFD		0	/* dup */
+#define F_GETFD		1	/* get close_on_exec */
+#define F_SETFD		2	/* set/clear close_on_exec */
+#define F_GETFL		3	/* get file->f_flags */
+#define F_SETFL		4	/* set file->f_flags */
+#define F_GETLK		5
+#define F_SETLK		6
+#define F_SETLKW	7
+
+#define F_SETOWN	8	/*  for sockets. */
+#define F_GETOWN	9	/*  for sockets. */
+#define F_SETSIG	10	/*  for sockets. */
+#define F_GETSIG	11	/*  for sockets. */
+
+#define F_GETLK64	12	/*  using 'struct flock64' */
+#define F_SETLK64	13
+#define F_SETLKW64	14
+
+/* for F_[GET|SET]FL */
+#define FD_CLOEXEC	1	/* actually anything with low bit set goes */
+
+/* for posix fcntl() and lockf() */
+#define F_RDLCK		0
+#define F_WRLCK		1
+#define F_UNLCK		2
+
+/* for old implementation of bsd flock () */
+#define F_EXLCK		4	/* or 3 */
+#define F_SHLCK		8	/* or 4 */
+
+/* for leases */
+#define F_INPROGRESS	16
+
+/* operations for bsd flock(), also used by the kernel implementation */
+#define LOCK_SH		1	/* shared lock */
+#define LOCK_EX		2	/* exclusive lock */
+#define LOCK_NB		4	/* or'd with one of the above to prevent
+				   blocking */
+#define LOCK_UN		8	/* remove lock */
+
+#define LOCK_MAND	32	/* This is a mandatory flock */
+#define LOCK_READ	64	/* ... Which allows concurrent read operations */
+#define LOCK_WRITE	128	/* ... Which allows concurrent write operations */
+#define LOCK_RW		192	/* ... Which allows concurrent read & write ops */
+
+struct flock {
+	short l_type;
+	short l_whence;
+	off_t l_start;
+	off_t l_len;
+	pid_t l_pid;
+};
+
+struct flock64 {
+	short  l_type;
+	short  l_whence;
+	loff_t l_start;
+	loff_t l_len;
+	pid_t  l_pid;
+};
+
+#define F_LINUX_SPECIFIC_BASE	1024
+
+#endif
diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h
new file mode 100644
index 0000000..c94cac9
--- /dev/null
+++ b/include/asm-i386/fixmap.h
@@ -0,0 +1,158 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <linux/config.h>
+
+/* used by vmalloc.c, vsyscall.lds.S.
+ *
+ * Leave one empty page between vmalloc'ed areas and
+ * the start of the fixmap.
+ */
+#define __FIXADDR_TOP	0xfffff000
+
+#ifndef __ASSEMBLY__
+#include <linux/kernel.h>
+#include <asm/acpi.h>
+#include <asm/apicdef.h>
+#include <asm/page.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special addresses
+ * from the end of virtual memory (0xfffff000) backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * highger than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ * TLB entries of such buffers will not be flushed across
+ * task switches.
+ */
+enum fixed_addresses {
+	FIX_HOLE,
+	FIX_VSYSCALL,
+#ifdef CONFIG_X86_LOCAL_APIC
+	FIX_APIC_BASE,	/* local (CPU) APIC) -- required for SMP or not */
+#endif
+#ifdef CONFIG_X86_IO_APIC
+	FIX_IO_APIC_BASE_0,
+	FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
+#endif
+#ifdef CONFIG_X86_VISWS_APIC
+	FIX_CO_CPU,	/* Cobalt timer */
+	FIX_CO_APIC,	/* Cobalt APIC Redirection Table */ 
+	FIX_LI_PCIA,	/* Lithium PCI Bridge A */
+	FIX_LI_PCIB,	/* Lithium PCI Bridge B */
+#endif
+#ifdef CONFIG_X86_F00F_BUG
+	FIX_F00F_IDT,	/* Virtual mapping for IDT */
+#endif
+#ifdef CONFIG_X86_CYCLONE_TIMER
+	FIX_CYCLONE_TIMER, /*cyclone timer register*/
+#endif 
+#ifdef CONFIG_HIGHMEM
+	FIX_KMAP_BEGIN,	/* reserved pte's for temporary kernel mappings */
+	FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+#endif
+#ifdef CONFIG_ACPI_BOOT
+	FIX_ACPI_BEGIN,
+	FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
+#endif
+#ifdef CONFIG_PCI_MMCONFIG
+	FIX_PCIE_MCFG,
+#endif
+	__end_of_permanent_fixed_addresses,
+	/* temporary boot-time mappings, used before ioremap() is functional */
+#define NR_FIX_BTMAPS	16
+	FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
+	FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
+	FIX_WP_TEST,
+	__end_of_fixed_addresses
+};
+
+extern void __set_fixmap (enum fixed_addresses idx,
+					unsigned long phys, pgprot_t flags);
+
+#define set_fixmap(idx, phys) \
+		__set_fixmap(idx, phys, PAGE_KERNEL)
+/*
+ * Some hardware wants to get fixmapped without caching.
+ */
+#define set_fixmap_nocache(idx, phys) \
+		__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
+
+#define clear_fixmap(idx) \
+		__set_fixmap(idx, 0, __pgprot(0))
+
+#define FIXADDR_TOP	((unsigned long)__FIXADDR_TOP)
+
+#define __FIXADDR_SIZE	(__end_of_permanent_fixed_addresses << PAGE_SHIFT)
+#define __FIXADDR_BOOT_SIZE	(__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START		(FIXADDR_TOP - __FIXADDR_SIZE)
+#define FIXADDR_BOOT_START	(FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
+
+#define __fix_to_virt(x)	(FIXADDR_TOP - ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x)	((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
+
+/*
+ * This is the range that is readable by user mode, and things
+ * acting like user mode such as get_user_pages.
+ */
+#define FIXADDR_USER_START	(__fix_to_virt(FIX_VSYSCALL))
+#define FIXADDR_USER_END	(FIXADDR_USER_START + PAGE_SIZE)
+
+
+extern void __this_fixmap_does_not_exist(void);
+
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without tranlation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
+{
+	/*
+	 * this branch gets completely eliminated after inlining,
+	 * except when someone tries to use fixaddr indices in an
+	 * illegal way. (such as mixing up address types or using
+	 * out-of-range indices).
+	 *
+	 * If it doesn't get removed, the linker will complain
+	 * loudly with a reasonably clear error message..
+	 */
+	if (idx >= __end_of_fixed_addresses)
+		__this_fixmap_does_not_exist();
+
+        return __fix_to_virt(idx);
+}
+
+static inline unsigned long virt_to_fix(const unsigned long vaddr)
+{
+	BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+	return __virt_to_fix(vaddr);
+}
+
+#endif /* !__ASSEMBLY__ */
+#endif
diff --git a/include/asm-i386/floppy.h b/include/asm-i386/floppy.h
new file mode 100644
index 0000000..f478228
--- /dev/null
+++ b/include/asm-i386/floppy.h
@@ -0,0 +1,319 @@
+/*
+ * Architecture specific parts of the Floppy driver
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995
+ */
+#ifndef __ASM_I386_FLOPPY_H
+#define __ASM_I386_FLOPPY_H
+
+#include <linux/vmalloc.h>
+
+
+/*
+ * The DMA channel used by the floppy controller cannot access data at
+ * addresses >= 16MB
+ *
+ * Went back to the 1MB limit, as some people had problems with the floppy
+ * driver otherwise. It doesn't matter much for performance anyway, as most
+ * floppy accesses go through the track buffer.
+ */
+#define _CROSS_64KB(a,s,vdma) \
+(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
+
+#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
+
+
+#define SW fd_routine[use_virtual_dma&1]
+#define CSW fd_routine[can_use_virtual_dma & 1]
+
+
+#define fd_inb(port)			inb_p(port)
+#define fd_outb(value,port)		outb_p(value,port)
+
+#define fd_request_dma()        CSW._request_dma(FLOPPY_DMA,"floppy")
+#define fd_free_dma()           CSW._free_dma(FLOPPY_DMA)
+#define fd_enable_irq()         enable_irq(FLOPPY_IRQ)
+#define fd_disable_irq()        disable_irq(FLOPPY_IRQ)
+#define fd_free_irq()		free_irq(FLOPPY_IRQ, NULL)
+#define fd_get_dma_residue()    SW._get_dma_residue(FLOPPY_DMA)
+#define fd_dma_mem_alloc(size)	SW._dma_mem_alloc(size)
+#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
+
+#define FLOPPY_CAN_FALLBACK_ON_NODMA
+
+static int virtual_dma_count;
+static int virtual_dma_residue;
+static char *virtual_dma_addr;
+static int virtual_dma_mode;
+static int doing_pdma;
+
+static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
+{
+	register unsigned char st;
+
+#undef TRACE_FLPY_INT
+#define NO_FLOPPY_ASSEMBLER
+
+#ifdef TRACE_FLPY_INT
+	static int calls=0;
+	static int bytes=0;
+	static int dma_wait=0;
+#endif
+	if (!doing_pdma)
+		return floppy_interrupt(irq, dev_id, regs);
+
+#ifdef TRACE_FLPY_INT
+	if(!calls)
+		bytes = virtual_dma_count;
+#endif
+
+#ifndef NO_FLOPPY_ASSEMBLER
+	__asm__ (
+       "testl %1,%1"
+	"je 3f"
+"1:	inb %w4,%b0"
+	"andb $160,%b0"
+	"cmpb $160,%b0"
+	"jne 2f"
+	"incw %w4"
+	"testl %3,%3"
+	"jne 4f"
+	"inb %w4,%b0"
+	"movb %0,(%2)"
+	"jmp 5f"
+"4:    	movb (%2),%0"
+	"outb %b0,%w4"
+"5:	decw %w4"
+	"outb %0,$0x80"
+	"decl %1"
+	"incl %2"
+	"testl %1,%1"
+	"jne 1b"
+"3:	inb %w4,%b0"
+"2:	"
+       : "=a" ((char) st), 
+       "=c" ((long) virtual_dma_count), 
+       "=S" ((long) virtual_dma_addr)
+       : "b" ((long) virtual_dma_mode),
+       "d" ((short) virtual_dma_port+4), 
+       "1" ((long) virtual_dma_count),
+       "2" ((long) virtual_dma_addr));
+#else	
+	{
+		register int lcount;
+		register char *lptr;
+
+		st = 1;
+		for(lcount=virtual_dma_count, lptr=virtual_dma_addr; 
+		    lcount; lcount--, lptr++) {
+			st=inb(virtual_dma_port+4) & 0xa0 ;
+			if(st != 0xa0) 
+				break;
+			if(virtual_dma_mode)
+				outb_p(*lptr, virtual_dma_port+5);
+			else
+				*lptr = inb_p(virtual_dma_port+5);
+		}
+		virtual_dma_count = lcount;
+		virtual_dma_addr = lptr;
+		st = inb(virtual_dma_port+4);
+	}
+#endif
+
+#ifdef TRACE_FLPY_INT
+	calls++;
+#endif
+	if(st == 0x20)
+		return IRQ_HANDLED;
+	if(!(st & 0x20)) {
+		virtual_dma_residue += virtual_dma_count;
+		virtual_dma_count=0;
+#ifdef TRACE_FLPY_INT
+		printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", 
+		       virtual_dma_count, virtual_dma_residue, calls, bytes,
+		       dma_wait);
+		calls = 0;
+		dma_wait=0;
+#endif
+		doing_pdma = 0;
+		floppy_interrupt(irq, dev_id, regs);
+		return IRQ_HANDLED;
+	}
+#ifdef TRACE_FLPY_INT
+	if(!virtual_dma_count)
+		dma_wait++;
+#endif
+	return IRQ_HANDLED;
+}
+
+static void fd_disable_dma(void)
+{
+	if(! (can_use_virtual_dma & 1))
+		disable_dma(FLOPPY_DMA);
+	doing_pdma = 0;
+	virtual_dma_residue += virtual_dma_count;
+	virtual_dma_count=0;
+}
+
+static int vdma_request_dma(unsigned int dmanr, const char * device_id)
+{
+	return 0;
+}
+
+static void vdma_nop(unsigned int dummy)
+{
+}
+
+
+static int vdma_get_dma_residue(unsigned int dummy)
+{
+	return virtual_dma_count + virtual_dma_residue;
+}
+
+
+static int fd_request_irq(void)
+{
+	if(can_use_virtual_dma)
+		return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT,
+						   "floppy", NULL);
+	else
+		return request_irq(FLOPPY_IRQ, floppy_interrupt,
+						   SA_INTERRUPT|SA_SAMPLE_RANDOM,
+						   "floppy", NULL);	
+
+}
+
+static unsigned long dma_mem_alloc(unsigned long size)
+{
+	return __get_dma_pages(GFP_KERNEL,get_order(size));
+}
+
+
+static unsigned long vdma_mem_alloc(unsigned long size)
+{
+	return (unsigned long) vmalloc(size);
+
+}
+
+#define nodma_mem_alloc(size) vdma_mem_alloc(size)
+
+static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
+{
+	if((unsigned int) addr >= (unsigned int) high_memory)
+		vfree((void *)addr);
+	else
+		free_pages(addr, get_order(size));		
+}
+
+#define fd_dma_mem_free(addr, size)  _fd_dma_mem_free(addr, size) 
+
+static void _fd_chose_dma_mode(char *addr, unsigned long size)
+{
+	if(can_use_virtual_dma == 2) {
+		if((unsigned int) addr >= (unsigned int) high_memory ||
+		   isa_virt_to_bus(addr) >= 0x1000000 ||
+		   _CROSS_64KB(addr, size, 0))
+			use_virtual_dma = 1;
+		else
+			use_virtual_dma = 0;
+	} else {
+		use_virtual_dma = can_use_virtual_dma & 1;
+	}
+}
+
+#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
+
+
+static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+	doing_pdma = 1;
+	virtual_dma_port = io;
+	virtual_dma_mode = (mode  == DMA_MODE_WRITE);
+	virtual_dma_addr = addr;
+	virtual_dma_count = size;
+	virtual_dma_residue = 0;
+	return 0;
+}
+
+static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+#ifdef FLOPPY_SANITY_CHECK
+	if (CROSS_64KB(addr, size)) {
+		printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
+		return -1;
+	}
+#endif
+	/* actual, physical DMA */
+	doing_pdma = 0;
+	clear_dma_ff(FLOPPY_DMA);
+	set_dma_mode(FLOPPY_DMA,mode);
+	set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr));
+	set_dma_count(FLOPPY_DMA,size);
+	enable_dma(FLOPPY_DMA);
+	return 0;
+}
+
+struct fd_routine_l {
+	int (*_request_dma)(unsigned int dmanr, const char * device_id);
+	void (*_free_dma)(unsigned int dmanr);
+	int (*_get_dma_residue)(unsigned int dummy);
+	unsigned long (*_dma_mem_alloc) (unsigned long size);
+	int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
+} fd_routine[] = {
+	{
+		request_dma,
+		free_dma,
+		get_dma_residue,
+		dma_mem_alloc,
+		hard_dma_setup
+	},
+	{
+		vdma_request_dma,
+		vdma_nop,
+		vdma_get_dma_residue,
+		vdma_mem_alloc,
+		vdma_dma_setup
+	}
+};
+
+
+static int FDC1 = 0x3f0;
+static int FDC2 = -1;
+
+/*
+ * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
+ * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
+ * coincides with another rtc CMOS user.		Paul G.
+ */
+#define FLOPPY0_TYPE	({				\
+	unsigned long flags;				\
+	unsigned char val;				\
+	spin_lock_irqsave(&rtc_lock, flags);		\
+	val = (CMOS_READ(0x10) >> 4) & 15;		\
+	spin_unlock_irqrestore(&rtc_lock, flags);	\
+	val;						\
+})
+
+#define FLOPPY1_TYPE	({				\
+	unsigned long flags;				\
+	unsigned char val;				\
+	spin_lock_irqsave(&rtc_lock, flags);		\
+	val = CMOS_READ(0x10) & 15;			\
+	spin_unlock_irqrestore(&rtc_lock, flags);	\
+	val;						\
+})
+
+#define N_FDC 2
+#define N_DRIVE 8
+
+#define FLOPPY_MOTOR_MASK 0xf0
+
+#define AUTO_DMA
+
+#define EXTRA_FLOPPY_PARAMS
+
+#endif /* __ASM_I386_FLOPPY_H */
diff --git a/include/asm-i386/genapic.h b/include/asm-i386/genapic.h
new file mode 100644
index 0000000..fc813b2
--- /dev/null
+++ b/include/asm-i386/genapic.h
@@ -0,0 +1,115 @@
+#ifndef _ASM_GENAPIC_H
+#define _ASM_GENAPIC_H 1
+
+/*
+ * Generic APIC driver interface.
+ *
+ * An straight forward mapping of the APIC related parts of the
+ * x86 subarchitecture interface to a dynamic object.
+ *
+ * This is used by the "generic" x86 subarchitecture.
+ *
+ * Copyright 2003 Andi Kleen, SuSE Labs.
+ */
+
+struct mpc_config_translation;
+struct mpc_config_bus;
+struct mp_config_table;
+struct mpc_config_processor;
+
+struct genapic { 
+	char *name; 
+	int (*probe)(void); 
+
+	int (*apic_id_registered)(void);
+	cpumask_t (*target_cpus)(void);
+	int int_delivery_mode;
+	int int_dest_mode; 
+	int ESR_DISABLE;
+	int apic_destination_logical;
+	unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
+	unsigned long (*check_apicid_present)(int apicid); 
+	int no_balance_irq;
+	int no_ioapic_check;
+	void (*init_apic_ldr)(void);
+	physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
+
+	void (*clustered_apic_check)(void);
+	int (*multi_timer_check)(int apic, int irq);
+	int (*apicid_to_node)(int logical_apicid); 
+	int (*cpu_to_logical_apicid)(int cpu);
+	int (*cpu_present_to_apicid)(int mps_cpu);
+	physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
+	int (*mpc_apic_id)(struct mpc_config_processor *m, 
+			   struct mpc_config_translation *t); 
+	void (*setup_portio_remap)(void); 
+	int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
+	void (*enable_apic_mode)(void);
+	u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);
+
+	/* mpparse */
+	void (*mpc_oem_bus_info)(struct mpc_config_bus *, char *, 
+				 struct mpc_config_translation *);
+	void (*mpc_oem_pci_bus)(struct mpc_config_bus *, 
+				struct mpc_config_translation *); 
+
+	/* When one of the next two hooks returns 1 the genapic
+	   is switched to this. Essentially they are additional probe 
+	   functions. */
+	int (*mps_oem_check)(struct mp_config_table *mpc, char *oem, 
+			      char *productid);
+	int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
+
+	unsigned (*get_apic_id)(unsigned long x);
+	unsigned long apic_id_mask;
+	unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
+	
+	/* ipi */
+	void (*send_IPI_mask)(cpumask_t mask, int vector);
+	void (*send_IPI_allbutself)(int vector);
+	void (*send_IPI_all)(int vector);
+}; 
+
+#define APICFUNC(x) .x = x
+
+#define APIC_INIT(aname, aprobe) { \
+	.name = aname, \
+	.probe = aprobe, \
+	.int_delivery_mode = INT_DELIVERY_MODE, \
+	.int_dest_mode = INT_DEST_MODE, \
+	.no_balance_irq = NO_BALANCE_IRQ, \
+	.no_ioapic_check = NO_IOAPIC_CHECK, \
+	.ESR_DISABLE = esr_disable, \
+	.apic_destination_logical = APIC_DEST_LOGICAL, \
+	APICFUNC(apic_id_registered), \
+	APICFUNC(target_cpus), \
+	APICFUNC(check_apicid_used), \
+	APICFUNC(check_apicid_present), \
+	APICFUNC(init_apic_ldr), \
+	APICFUNC(ioapic_phys_id_map), \
+	APICFUNC(clustered_apic_check), \
+	APICFUNC(multi_timer_check), \
+	APICFUNC(apicid_to_node), \
+	APICFUNC(cpu_to_logical_apicid), \
+	APICFUNC(cpu_present_to_apicid), \
+	APICFUNC(apicid_to_cpu_present), \
+	APICFUNC(mpc_apic_id), \
+	APICFUNC(setup_portio_remap), \
+	APICFUNC(check_phys_apicid_present), \
+	APICFUNC(mpc_oem_bus_info), \
+	APICFUNC(mpc_oem_pci_bus), \
+	APICFUNC(mps_oem_check), \
+	APICFUNC(get_apic_id), \
+	.apic_id_mask = APIC_ID_MASK, \
+	APICFUNC(cpu_mask_to_apicid), \
+	APICFUNC(acpi_madt_oem_check), \
+	APICFUNC(send_IPI_mask), \
+	APICFUNC(send_IPI_allbutself), \
+	APICFUNC(send_IPI_all), \
+	APICFUNC(enable_apic_mode), \
+	APICFUNC(phys_pkg_id), \
+	}
+
+extern struct genapic *genapic;
+
+#endif
diff --git a/include/asm-i386/hardirq.h b/include/asm-i386/hardirq.h
new file mode 100644
index 0000000..ee754d3
--- /dev/null
+++ b/include/asm-i386/hardirq.h
@@ -0,0 +1,24 @@
+#ifndef __ASM_HARDIRQ_H
+#define __ASM_HARDIRQ_H
+
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+typedef struct {
+	unsigned int __softirq_pending;
+	unsigned long idle_timestamp;
+	unsigned int __nmi_count;	/* arch dependent */
+	unsigned int apic_timer_irqs;	/* arch dependent */
+} ____cacheline_aligned irq_cpustat_t;
+
+DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
+extern irq_cpustat_t irq_stat[];
+
+#define __ARCH_IRQ_STAT
+#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
+
+void ack_bad_irq(unsigned int irq);
+#include <linux/irq_cpustat.h>
+
+#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-i386/hdreg.h b/include/asm-i386/hdreg.h
new file mode 100644
index 0000000..5989bbc
--- /dev/null
+++ b/include/asm-i386/hdreg.h
@@ -0,0 +1 @@
+#warning this file is obsolete, please do not use it
diff --git a/include/asm-i386/highmem.h b/include/asm-i386/highmem.h
new file mode 100644
index 0000000..1df42bf
--- /dev/null
+++ b/include/asm-i386/highmem.h
@@ -0,0 +1,79 @@
+/*
+ * highmem.h: virtual kernel memory mappings for high memory
+ *
+ * Used in CONFIG_HIGHMEM systems for memory pages which
+ * are not addressable by direct kernel virtual addresses.
+ *
+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
+ *		      Gerhard.Wichert@pdb.siemens.de
+ *
+ *
+ * Redesigned the x86 32-bit VM architecture to deal with 
+ * up to 16 Terabyte physical memory. With current x86 CPUs
+ * we now support up to 64 Gigabytes physical RAM.
+ *
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ */
+
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/interrupt.h>
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#include <asm/tlbflush.h>
+
+/* declarations for highmem.c */
+extern unsigned long highstart_pfn, highend_pfn;
+
+extern pte_t *kmap_pte;
+extern pgprot_t kmap_prot;
+extern pte_t *pkmap_page_table;
+
+/*
+ * Right now we initialize only a single pte table. It can be extended
+ * easily, subsequent pte tables have to be allocated in one physical
+ * chunk of RAM.
+ */
+#ifdef CONFIG_X86_PAE
+#define LAST_PKMAP 512
+#else
+#define LAST_PKMAP 1024
+#endif
+/*
+ * Ordering is:
+ *
+ * FIXADDR_TOP
+ * 			fixed_addresses
+ * FIXADDR_START
+ * 			temp fixed addresses
+ * FIXADDR_BOOT_START
+ * 			Persistent kmap area
+ * PKMAP_BASE
+ * VMALLOC_END
+ * 			Vmalloc area
+ * VMALLOC_START
+ * high_memory
+ */
+#define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK )
+#define LAST_PKMAP_MASK (LAST_PKMAP-1)
+#define PKMAP_NR(virt)  ((virt-PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+extern void * FASTCALL(kmap_high(struct page *page));
+extern void FASTCALL(kunmap_high(struct page *page));
+
+void *kmap(struct page *page);
+void kunmap(struct page *page);
+void *kmap_atomic(struct page *page, enum km_type type);
+void kunmap_atomic(void *kvaddr, enum km_type type);
+struct page *kmap_atomic_to_page(void *ptr);
+
+#define flush_cache_kmaps()	do { } while (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_HIGHMEM_H */
diff --git a/include/asm-i386/hpet.h b/include/asm-i386/hpet.h
new file mode 100644
index 0000000..6e20b07
--- /dev/null
+++ b/include/asm-i386/hpet.h
@@ -0,0 +1,113 @@
+
+#ifndef _I386_HPET_H
+#define _I386_HPET_H
+
+#ifdef CONFIG_HPET_TIMER
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/irq.h>
+#include <asm/msr.h>
+#include <asm/delay.h>
+#include <asm/mpspec.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+
+#include <linux/timex.h>
+#include <linux/config.h>
+
+#include <asm/fixmap.h>
+
+/*
+ * Documentation on HPET can be found at:
+ *      http://www.intel.com/ial/home/sp/pcmmspec.htm
+ *      ftp://download.intel.com/ial/home/sp/mmts098.pdf
+ */
+
+#define HPET_MMAP_SIZE	1024
+
+#define HPET_ID		0x000
+#define HPET_PERIOD	0x004
+#define HPET_CFG	0x010
+#define HPET_STATUS	0x020
+#define HPET_COUNTER	0x0f0
+#define HPET_T0_CFG	0x100
+#define HPET_T0_CMP	0x108
+#define HPET_T0_ROUTE	0x110
+#define HPET_T1_CFG	0x120
+#define HPET_T1_CMP	0x128
+#define HPET_T1_ROUTE	0x130
+#define HPET_T2_CFG	0x140
+#define HPET_T2_CMP	0x148
+#define HPET_T2_ROUTE	0x150
+
+#define HPET_ID_LEGSUP	0x00008000
+#define HPET_ID_NUMBER	0x00001f00
+#define HPET_ID_REV	0x000000ff
+#define	HPET_ID_NUMBER_SHIFT	8
+
+#define HPET_CFG_ENABLE	0x001
+#define HPET_CFG_LEGACY	0x002
+#define	HPET_LEGACY_8254	2
+#define	HPET_LEGACY_RTC		8
+
+#define HPET_TN_ENABLE		0x004
+#define HPET_TN_PERIODIC	0x008
+#define HPET_TN_PERIODIC_CAP	0x010
+#define HPET_TN_SETVAL		0x040
+#define HPET_TN_32BIT		0x100
+
+/* Use our own asm for 64 bit multiply/divide */
+#define ASM_MUL64_REG(eax_out,edx_out,reg_in,eax_in) 			\
+		__asm__ __volatile__("mull %2" 				\
+				:"=a" (eax_out), "=d" (edx_out) 	\
+				:"r" (reg_in), "0" (eax_in))
+
+#define ASM_DIV64_REG(eax_out,edx_out,reg_in,eax_in,edx_in) 		\
+		__asm__ __volatile__("divl %2" 				\
+				:"=a" (eax_out), "=d" (edx_out) 	\
+				:"r" (reg_in), "0" (eax_in), "1" (edx_in))
+
+#define KERNEL_TICK_USEC 	(1000000UL/HZ)	/* tick value in microsec */
+/* Max HPET Period is 10^8 femto sec as in HPET spec */
+#define HPET_MAX_PERIOD (100000000UL)
+/*
+ * Min HPET period is 10^5 femto sec just for safety. If it is less than this,
+ * then 32 bit HPET counter wrapsaround in less than 0.5 sec.
+ */
+#define HPET_MIN_PERIOD (100000UL)
+
+extern unsigned long hpet_tick;  	/* hpet clks count per tick */
+extern unsigned long hpet_address;	/* hpet memory map physical address */
+
+extern int hpet_rtc_timer_init(void);
+extern int hpet_enable(void);
+extern int hpet_reenable(void);
+extern int is_hpet_enabled(void);
+extern int is_hpet_capable(void);
+extern int hpet_readl(unsigned long a);
+
+#ifdef CONFIG_HPET_EMULATE_RTC
+extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
+extern int hpet_set_rtc_irq_bit(unsigned long bit_mask);
+extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec);
+extern int hpet_set_periodic_freq(unsigned long freq);
+extern int hpet_rtc_dropped_irq(void);
+extern int hpet_rtc_timer_init(void);
+extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+#endif /* CONFIG_HPET_EMULATE_RTC */
+#endif /* CONFIG_HPET_TIMER */
+#endif /* _I386_HPET_H */
diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h
new file mode 100644
index 0000000..4ac84cc
--- /dev/null
+++ b/include/asm-i386/hw_irq.h
@@ -0,0 +1,79 @@
+#ifndef _ASM_HW_IRQ_H
+#define _ASM_HW_IRQ_H
+
+/*
+ *	linux/include/asm/hw_irq.h
+ *
+ *	(C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
+ *
+ *	moved some of the old arch/i386/kernel/irq.h to here. VY
+ *
+ *	IRQ/IPI changes taken from work by Thomas Radke
+ *	<tomsoft@informatik.tu-chemnitz.de>
+ */
+
+#include <linux/config.h>
+#include <linux/profile.h>
+#include <asm/atomic.h>
+#include <asm/irq.h>
+#include <asm/sections.h>
+
+/*
+ * Various low-level irq details needed by irq.c, process.c,
+ * time.c, io_apic.c and smp.c
+ *
+ * Interrupt entry/exit code at both C and assembly level
+ */
+
+extern u8 irq_vector[NR_IRQ_VECTORS];
+#define IO_APIC_VECTOR(irq)	(irq_vector[irq])
+#define AUTO_ASSIGN		-1
+
+extern void (*interrupt[NR_IRQS])(void);
+
+#ifdef CONFIG_SMP
+fastcall void reschedule_interrupt(void);
+fastcall void invalidate_interrupt(void);
+fastcall void call_function_interrupt(void);
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+fastcall void apic_timer_interrupt(void);
+fastcall void error_interrupt(void);
+fastcall void spurious_interrupt(void);
+fastcall void thermal_interrupt(struct pt_regs *);
+#define platform_legacy_irq(irq)	((irq) < 16)
+#endif
+
+void disable_8259A_irq(unsigned int irq);
+void enable_8259A_irq(unsigned int irq);
+int i8259A_irq_pending(unsigned int irq);
+void make_8259A_irq(unsigned int irq);
+void init_8259A(int aeoi);
+void FASTCALL(send_IPI_self(int vector));
+void init_VISWS_APIC_irqs(void);
+void setup_IO_APIC(void);
+void disable_IO_APIC(void);
+void print_IO_APIC(void);
+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
+void send_IPI(int dest, int vector);
+void setup_ioapic_dest(void);
+
+extern unsigned long io_apic_irqs;
+
+extern atomic_t irq_err_count;
+extern atomic_t irq_mis_count;
+
+#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
+
+#if defined(CONFIG_X86_IO_APIC)
+static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
+{
+	if (IO_APIC_IRQ(i))
+		send_IPI_self(IO_APIC_VECTOR(i));
+}
+#else
+static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
+#endif
+
+#endif /* _ASM_HW_IRQ_H */
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h
new file mode 100644
index 0000000..f6feb98
--- /dev/null
+++ b/include/asm-i386/i387.h
@@ -0,0 +1,115 @@
+/*
+ * include/asm-i386/i387.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * General FPU state handling cleanups
+ *	Gareth Hughes <gareth@valinux.com>, May 2000
+ */
+
+#ifndef __ASM_I386_I387_H
+#define __ASM_I386_I387_H
+
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <asm/processor.h>
+#include <asm/sigcontext.h>
+#include <asm/user.h>
+
+extern void mxcsr_feature_mask_init(void);
+extern void init_fpu(struct task_struct *);
+/*
+ * FPU lazy state save handling...
+ */
+extern void restore_fpu( struct task_struct *tsk );
+
+extern void kernel_fpu_begin(void);
+#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0)
+
+/*
+ * These must be called with preempt disabled
+ */
+static inline void __save_init_fpu( struct task_struct *tsk )
+{
+	if ( cpu_has_fxsr ) {
+		asm volatile( "fxsave %0 ; fnclex"
+			      : "=m" (tsk->thread.i387.fxsave) );
+	} else {
+		asm volatile( "fnsave %0 ; fwait"
+			      : "=m" (tsk->thread.i387.fsave) );
+	}
+	tsk->thread_info->status &= ~TS_USEDFPU;
+}
+
+#define __unlazy_fpu( tsk ) do { \
+	if ((tsk)->thread_info->status & TS_USEDFPU) \
+		save_init_fpu( tsk ); \
+} while (0)
+
+#define __clear_fpu( tsk )					\
+do {								\
+	if ((tsk)->thread_info->status & TS_USEDFPU) {		\
+		asm volatile("fnclex ; fwait");				\
+		(tsk)->thread_info->status &= ~TS_USEDFPU;	\
+		stts();						\
+	}							\
+} while (0)
+
+
+/*
+ * These disable preemption on their own and are safe
+ */
+static inline void save_init_fpu( struct task_struct *tsk )
+{
+	preempt_disable();
+	__save_init_fpu(tsk);
+	stts();
+	preempt_enable();
+}
+
+#define unlazy_fpu( tsk ) do {	\
+	preempt_disable();	\
+	__unlazy_fpu(tsk);	\
+	preempt_enable();	\
+} while (0)
+
+#define clear_fpu( tsk ) do {	\
+	preempt_disable();	\
+	__clear_fpu( tsk );	\
+	preempt_enable();	\
+} while (0)
+					\
+/*
+ * FPU state interaction...
+ */
+extern unsigned short get_fpu_cwd( struct task_struct *tsk );
+extern unsigned short get_fpu_swd( struct task_struct *tsk );
+extern unsigned short get_fpu_mxcsr( struct task_struct *tsk );
+
+/*
+ * Signal frame handlers...
+ */
+extern int save_i387( struct _fpstate __user *buf );
+extern int restore_i387( struct _fpstate __user *buf );
+
+/*
+ * ptrace request handers...
+ */
+extern int get_fpregs( struct user_i387_struct __user *buf,
+		       struct task_struct *tsk );
+extern int set_fpregs( struct task_struct *tsk,
+		       struct user_i387_struct __user *buf );
+
+extern int get_fpxregs( struct user_fxsr_struct __user *buf,
+			struct task_struct *tsk );
+extern int set_fpxregs( struct task_struct *tsk,
+			struct user_fxsr_struct __user *buf );
+
+/*
+ * FPU state for core dumps...
+ */
+extern int dump_fpu( struct pt_regs *regs,
+		     struct user_i387_struct *fpu );
+
+#endif /* __ASM_I386_I387_H */
diff --git a/include/asm-i386/i8259.h b/include/asm-i386/i8259.h
new file mode 100644
index 0000000..29d8f9a
--- /dev/null
+++ b/include/asm-i386/i8259.h
@@ -0,0 +1,17 @@
+#ifndef __ASM_I8259_H__
+#define __ASM_I8259_H__
+
+extern unsigned int cached_irq_mask;
+
+#define __byte(x,y) 		(((unsigned char *) &(y))[x])
+#define cached_master_mask	(__byte(0, cached_irq_mask))
+#define cached_slave_mask	(__byte(1, cached_irq_mask))
+
+extern spinlock_t i8259A_lock;
+
+extern void init_8259A(int auto_eoi);
+extern void enable_8259A_irq(unsigned int irq);
+extern void disable_8259A_irq(unsigned int irq);
+extern unsigned int startup_8259A_irq(unsigned int irq);
+
+#endif	/* __ASM_I8259_H__ */
diff --git a/include/asm-i386/ide.h b/include/asm-i386/ide.h
new file mode 100644
index 0000000..859ebf4
--- /dev/null
+++ b/include/asm-i386/ide.h
@@ -0,0 +1,69 @@
+/*
+ *  linux/include/asm-i386/ide.h
+ *
+ *  Copyright (C) 1994-1996  Linus Torvalds & authors
+ */
+
+/*
+ *  This file contains the i386 architecture specific IDE code.
+ */
+
+#ifndef __ASMi386_IDE_H
+#define __ASMi386_IDE_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+
+#ifndef MAX_HWIFS
+# ifdef CONFIG_BLK_DEV_IDEPCI
+#define MAX_HWIFS	10
+# else
+#define MAX_HWIFS	6
+# endif
+#endif
+
+#define IDE_ARCH_OBSOLETE_DEFAULTS
+
+static __inline__ int ide_default_irq(unsigned long base)
+{
+	switch (base) {
+		case 0x1f0: return 14;
+		case 0x170: return 15;
+		case 0x1e8: return 11;
+		case 0x168: return 10;
+		case 0x1e0: return 8;
+		case 0x160: return 12;
+		default:
+			return 0;
+	}
+}
+
+static __inline__ unsigned long ide_default_io_base(int index)
+{
+	switch (index) {
+		case 0:	return 0x1f0;
+		case 1:	return 0x170;
+		case 2: return 0x1e8;
+		case 3: return 0x168;
+		case 4: return 0x1e0;
+		case 5: return 0x160;
+		default:
+			return 0;
+	}
+}
+
+#define IDE_ARCH_OBSOLETE_INIT
+#define ide_default_io_ctl(base)	((base) + 0x206) /* obsolete */
+
+#ifdef CONFIG_BLK_DEV_IDEPCI
+#define ide_init_default_irq(base)	(0)
+#else
+#define ide_init_default_irq(base)	ide_default_irq(base)
+#endif
+
+#include <asm-generic/ide_iops.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASMi386_IDE_H */
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
new file mode 100644
index 0000000..7babb97
--- /dev/null
+++ b/include/asm-i386/io.h
@@ -0,0 +1,381 @@
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/compiler.h>
+
+/*
+ * This file contains the definitions for the x86 IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated
+ * to (a) handle it all in a way that makes gcc able to optimize it
+ * as well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ */
+
+/*
+ * Thanks to James van Artsdalen for a better timing-fix than
+ * the two short jumps: using outb's to a nonexistent port seems
+ * to guarantee better timings even on fast machines.
+ *
+ * On the other hand, I'd like to be sure of a non-existent port:
+ * I feel a bit unsafe about using 0x80 (should be safe, though)
+ *
+ *		Linus
+ */
+
+ /*
+  *  Bit simplified and optimized by Jan Hubicka
+  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
+  *
+  *  isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
+  *  isa_read[wl] and isa_write[wl] fixed
+  *  - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+  */
+
+#define IO_SPACE_LIMIT 0xffff
+
+#define XQUAD_PORTIO_BASE 0xfe400000
+#define XQUAD_PORTIO_QUAD 0x40000  /* 256k per quad. */
+
+#ifdef __KERNEL__
+
+#include <asm-generic/iomap.h>
+
+#include <linux/vmalloc.h>
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p)	__va(p)
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p)	p
+
+/**
+ *	virt_to_phys	-	map virtual addresses to physical
+ *	@address: address to remap
+ *
+ *	The returned physical address is the physical (CPU) mapping for
+ *	the memory address given. It is only valid to use this function on
+ *	addresses directly mapped or allocated via kmalloc. 
+ *
+ *	This function does not give bus mappings for DMA transfers. In
+ *	almost all conceivable cases a device driver should not be using
+ *	this function
+ */
+ 
+static inline unsigned long virt_to_phys(volatile void * address)
+{
+	return __pa(address);
+}
+
+/**
+ *	phys_to_virt	-	map physical address to virtual
+ *	@address: address to remap
+ *
+ *	The returned virtual address is a current CPU mapping for
+ *	the memory address given. It is only valid to use this function on
+ *	addresses that have a kernel mapping
+ *
+ *	This function does not handle bus mappings for DMA transfers. In
+ *	almost all conceivable cases a device driver should not be using
+ *	this function
+ */
+
+static inline void * phys_to_virt(unsigned long address)
+{
+	return __va(address);
+}
+
+/*
+ * Change "struct page" to physical address.
+ */
+#define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+
+extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
+
+/**
+ * ioremap     -   map bus memory into CPU space
+ * @offset:    bus address of the memory
+ * @size:      size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address. 
+ */
+
+static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
+{
+	return __ioremap(offset, size, 0);
+}
+
+extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size);
+extern void iounmap(volatile void __iomem *addr);
+
+/*
+ * bt_ioremap() and bt_iounmap() are for temporary early boot-time
+ * mappings, before the real ioremap() is functional.
+ * A boot-time mapping is currently limited to at most 16 pages.
+ */
+extern void *bt_ioremap(unsigned long offset, unsigned long size);
+extern void bt_iounmap(void *addr, unsigned long size);
+
+/*
+ * ISA I/O bus memory addresses are 1:1 with the physical address.
+ */
+#define isa_virt_to_bus virt_to_phys
+#define isa_page_to_bus page_to_phys
+#define isa_bus_to_virt phys_to_virt
+
+/*
+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
+ * are forbidden in portable PCI drivers.
+ *
+ * Allow them on x86 for legacy drivers, though.
+ */
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+/*
+ * readX/writeX() are used to access memory mapped devices. On some
+ * architectures the memory mapped IO stuff needs to be accessed
+ * differently. On the x86 architecture, we just read/write the
+ * memory location directly.
+ */
+
+static inline unsigned char readb(const volatile void __iomem *addr)
+{
+	return *(volatile unsigned char __force *) addr;
+}
+static inline unsigned short readw(const volatile void __iomem *addr)
+{
+	return *(volatile unsigned short __force *) addr;
+}
+static inline unsigned int readl(const volatile void __iomem *addr)
+{
+	return *(volatile unsigned int __force *) addr;
+}
+#define readb_relaxed(addr) readb(addr)
+#define readw_relaxed(addr) readw(addr)
+#define readl_relaxed(addr) readl(addr)
+#define __raw_readb readb
+#define __raw_readw readw
+#define __raw_readl readl
+
+static inline void writeb(unsigned char b, volatile void __iomem *addr)
+{
+	*(volatile unsigned char __force *) addr = b;
+}
+static inline void writew(unsigned short b, volatile void __iomem *addr)
+{
+	*(volatile unsigned short __force *) addr = b;
+}
+static inline void writel(unsigned int b, volatile void __iomem *addr)
+{
+	*(volatile unsigned int __force *) addr = b;
+}
+#define __raw_writeb writeb
+#define __raw_writew writew
+#define __raw_writel writel
+
+#define mmiowb()
+
+static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
+{
+	memset((void __force *) addr, val, count);
+}
+static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
+{
+	__memcpy(dst, (void __force *) src, count);
+}
+static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
+{
+	__memcpy((void __force *) dst, src, count);
+}
+
+/*
+ * ISA space is 'always mapped' on a typical x86 system, no need to
+ * explicitly ioremap() it. The fact that the ISA IO space is mapped
+ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
+ * are physical addresses. The following constant pointer can be
+ * used as the IO-area pointer (it can be iounmapped as well, so the
+ * analogy with PCI is quite large):
+ */
+#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
+
+#define isa_readb(a) readb(__ISA_IO_base + (a))
+#define isa_readw(a) readw(__ISA_IO_base + (a))
+#define isa_readl(a) readl(__ISA_IO_base + (a))
+#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
+#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
+#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
+#define isa_memset_io(a,b,c)		memset_io(__ISA_IO_base + (a),(b),(c))
+#define isa_memcpy_fromio(a,b,c)	memcpy_fromio((a),__ISA_IO_base + (b),(c))
+#define isa_memcpy_toio(a,b,c)		memcpy_toio(__ISA_IO_base + (a),(b),(c))
+
+
+/*
+ * Again, i386 does not require mem IO specific function.
+ */
+
+#define eth_io_copy_and_sum(a,b,c,d)		eth_copy_and_sum((a),(void __force *)(b),(c),(d))
+#define isa_eth_io_copy_and_sum(a,b,c,d)	eth_copy_and_sum((a),(void __force *)(__ISA_IO_base + (b)),(c),(d))
+
+/**
+ *	check_signature		-	find BIOS signatures
+ *	@io_addr: mmio address to check 
+ *	@signature:  signature block
+ *	@length: length of signature
+ *
+ *	Perform a signature comparison with the mmio address io_addr. This
+ *	address should have been obtained by ioremap.
+ *	Returns 1 on a match.
+ */
+ 
+static inline int check_signature(volatile void __iomem * io_addr,
+	const unsigned char *signature, int length)
+{
+	int retval = 0;
+	do {
+		if (readb(io_addr) != *signature)
+			goto out;
+		io_addr++;
+		signature++;
+		length--;
+	} while (length);
+	retval = 1;
+out:
+	return retval;
+}
+
+/*
+ *	Cache management
+ *
+ *	This needed for two cases
+ *	1. Out of order aware processors
+ *	2. Accidentally out of order processors (PPro errata #51)
+ */
+ 
+#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+
+static inline void flush_write_buffers(void)
+{
+	__asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
+}
+
+#define dma_cache_inv(_start,_size)		flush_write_buffers()
+#define dma_cache_wback(_start,_size)		flush_write_buffers()
+#define dma_cache_wback_inv(_start,_size)	flush_write_buffers()
+
+#else
+
+/* Nothing to do */
+
+#define dma_cache_inv(_start,_size)		do { } while (0)
+#define dma_cache_wback(_start,_size)		do { } while (0)
+#define dma_cache_wback_inv(_start,_size)	do { } while (0)
+#define flush_write_buffers()
+
+#endif
+
+#endif /* __KERNEL__ */
+
+#ifdef SLOW_IO_BY_JUMPING
+#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:"
+#else
+#define __SLOW_DOWN_IO "outb %%al,$0x80;"
+#endif
+
+static inline void slow_down_io(void) {
+	__asm__ __volatile__(
+		__SLOW_DOWN_IO
+#ifdef REALLY_SLOW_IO
+		__SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
+#endif
+		: : );
+}
+
+#ifdef CONFIG_X86_NUMAQ
+extern void *xquad_portio;    /* Where the IO area was mapped */
+#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
+#define __BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \
+	if (xquad_portio) \
+		write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \
+	else \
+		out##bwl##_local(value, port); \
+} \
+static inline void out##bwl(unsigned type value, int port) { \
+	out##bwl##_quad(value, port, 0); \
+} \
+static inline unsigned type in##bwl##_quad(int port, int quad) { \
+	if (xquad_portio) \
+		return read##bwl(XQUAD_PORT_ADDR(port, quad)); \
+	else \
+		return in##bwl##_local(port); \
+} \
+static inline unsigned type in##bwl(int port) { \
+	return in##bwl##_quad(port, 0); \
+}
+#else
+#define __BUILDIO(bwl,bw,type) \
+static inline void out##bwl(unsigned type value, int port) { \
+	out##bwl##_local(value, port); \
+} \
+static inline unsigned type in##bwl(int port) { \
+	return in##bwl##_local(port); \
+}
+#endif
+
+
+#define BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_local(unsigned type value, int port) { \
+	__asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \
+} \
+static inline unsigned type in##bwl##_local(int port) { \
+	unsigned type value; \
+	__asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \
+	return value; \
+} \
+static inline void out##bwl##_local_p(unsigned type value, int port) { \
+	out##bwl##_local(value, port); \
+	slow_down_io(); \
+} \
+static inline unsigned type in##bwl##_local_p(int port) { \
+	unsigned type value = in##bwl##_local(port); \
+	slow_down_io(); \
+	return value; \
+} \
+__BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_p(unsigned type value, int port) { \
+	out##bwl(value, port); \
+	slow_down_io(); \
+} \
+static inline unsigned type in##bwl##_p(int port) { \
+	unsigned type value = in##bwl(port); \
+	slow_down_io(); \
+	return value; \
+} \
+static inline void outs##bwl(int port, const void *addr, unsigned long count) { \
+	__asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \
+} \
+static inline void ins##bwl(int port, void *addr, unsigned long count) { \
+	__asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \
+}
+
+BUILDIO(b,b,char)
+BUILDIO(w,w,short)
+BUILDIO(l,,int)
+
+#endif
diff --git a/include/asm-i386/io_apic.h b/include/asm-i386/io_apic.h
new file mode 100644
index 0000000..002c203
--- /dev/null
+++ b/include/asm-i386/io_apic.h
@@ -0,0 +1,213 @@
+#ifndef __ASM_IO_APIC_H
+#define __ASM_IO_APIC_H
+
+#include <linux/config.h>
+#include <asm/types.h>
+#include <asm/mpspec.h>
+
+/*
+ * Intel IO-APIC support for SMP and UP systems.
+ *
+ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar
+ */
+
+#ifdef CONFIG_X86_IO_APIC
+
+#ifdef CONFIG_PCI_MSI
+static inline int use_pci_vector(void)	{return 1;}
+static inline void disable_edge_ioapic_vector(unsigned int vector) { }
+static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { }
+static inline void end_edge_ioapic_vector (unsigned int vector) { }
+#define startup_level_ioapic	startup_level_ioapic_vector
+#define shutdown_level_ioapic	mask_IO_APIC_vector
+#define enable_level_ioapic	unmask_IO_APIC_vector
+#define disable_level_ioapic	mask_IO_APIC_vector
+#define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_vector
+#define end_level_ioapic	end_level_ioapic_vector
+#define set_ioapic_affinity	set_ioapic_affinity_vector
+
+#define startup_edge_ioapic 	startup_edge_ioapic_vector
+#define shutdown_edge_ioapic 	disable_edge_ioapic_vector
+#define enable_edge_ioapic 	unmask_IO_APIC_vector
+#define disable_edge_ioapic 	disable_edge_ioapic_vector
+#define ack_edge_ioapic 	ack_edge_ioapic_vector
+#define end_edge_ioapic 	end_edge_ioapic_vector
+#else
+static inline int use_pci_vector(void)	{return 0;}
+static inline void disable_edge_ioapic_irq(unsigned int irq) { }
+static inline void mask_and_ack_level_ioapic_irq(unsigned int irq) { }
+static inline void end_edge_ioapic_irq (unsigned int irq) { }
+#define startup_level_ioapic	startup_level_ioapic_irq
+#define shutdown_level_ioapic	mask_IO_APIC_irq
+#define enable_level_ioapic	unmask_IO_APIC_irq
+#define disable_level_ioapic	mask_IO_APIC_irq
+#define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_irq
+#define end_level_ioapic	end_level_ioapic_irq
+#define set_ioapic_affinity	set_ioapic_affinity_irq
+
+#define startup_edge_ioapic 	startup_edge_ioapic_irq
+#define shutdown_edge_ioapic 	disable_edge_ioapic_irq
+#define enable_edge_ioapic 	unmask_IO_APIC_irq
+#define disable_edge_ioapic 	disable_edge_ioapic_irq
+#define ack_edge_ioapic 	ack_edge_ioapic_irq
+#define end_edge_ioapic 	end_edge_ioapic_irq
+#endif
+
+#define IO_APIC_BASE(idx) \
+		((volatile int *)(__fix_to_virt(FIX_IO_APIC_BASE_0 + idx) \
+		+ (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK)))
+
+/*
+ * The structure of the IO-APIC:
+ */
+union IO_APIC_reg_00 {
+	u32	raw;
+	struct {
+		u32	__reserved_2	: 14,
+			LTS		:  1,
+			delivery_type	:  1,
+			__reserved_1	:  8,
+			ID		:  8;
+	} __attribute__ ((packed)) bits;
+};
+
+union IO_APIC_reg_01 {
+	u32	raw;
+	struct {
+		u32	version		:  8,
+			__reserved_2	:  7,
+			PRQ		:  1,
+			entries		:  8,
+			__reserved_1	:  8;
+	} __attribute__ ((packed)) bits;
+};
+
+union IO_APIC_reg_02 {
+	u32	raw;
+	struct {
+		u32	__reserved_2	: 24,
+			arbitration	:  4,
+			__reserved_1	:  4;
+	} __attribute__ ((packed)) bits;
+};
+
+union IO_APIC_reg_03 {
+	u32	raw;
+	struct {
+		u32	boot_DT		:  1,
+			__reserved_1	: 31;
+	} __attribute__ ((packed)) bits;
+};
+
+/*
+ * # of IO-APICs and # of IRQ routing registers
+ */
+extern int nr_ioapics;
+extern int nr_ioapic_registers[MAX_IO_APICS];
+
+enum ioapic_irq_destination_types {
+	dest_Fixed = 0,
+	dest_LowestPrio = 1,
+	dest_SMI = 2,
+	dest__reserved_1 = 3,
+	dest_NMI = 4,
+	dest_INIT = 5,
+	dest__reserved_2 = 6,
+	dest_ExtINT = 7
+};
+
+struct IO_APIC_route_entry {
+	__u32	vector		:  8,
+		delivery_mode	:  3,	/* 000: FIXED
+					 * 001: lowest prio
+					 * 111: ExtINT
+					 */
+		dest_mode	:  1,	/* 0: physical, 1: logical */
+		delivery_status	:  1,
+		polarity	:  1,
+		irr		:  1,
+		trigger		:  1,	/* 0: edge, 1: level */
+		mask		:  1,	/* 0: enabled, 1: disabled */
+		__reserved_2	: 15;
+
+	union {		struct { __u32
+					__reserved_1	: 24,
+					physical_dest	:  4,
+					__reserved_2	:  4;
+			} physical;
+
+			struct { __u32
+					__reserved_1	: 24,
+					logical_dest	:  8;
+			} logical;
+	} dest;
+
+} __attribute__ ((packed));
+
+/*
+ * MP-BIOS irq configuration table structures:
+ */
+
+/* I/O APIC entries */
+extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
+
+/* # of MP IRQ source entries */
+extern int mp_irq_entries;
+
+/* MP IRQ source entries */
+extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
+
+/* non-0 if default (table-less) MP configuration */
+extern int mpc_default_type;
+
+static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
+{
+	*IO_APIC_BASE(apic) = reg;
+	return *(IO_APIC_BASE(apic)+4);
+}
+
+static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
+{
+	*IO_APIC_BASE(apic) = reg;
+	*(IO_APIC_BASE(apic)+4) = value;
+}
+
+/*
+ * Re-write a value: to be used for read-modify-write
+ * cycles where the read already set up the index register.
+ *
+ * Older SiS APIC requires we rewrite the index regiser
+ */
+extern int sis_apic_bug;
+static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
+{
+	if (sis_apic_bug)
+		*IO_APIC_BASE(apic) = reg;
+	*(IO_APIC_BASE(apic)+4) = value;
+}
+
+/* 1 if "noapic" boot option passed */
+extern int skip_ioapic_setup;
+
+/*
+ * If we use the IO-APIC for IRQ routing, disable automatic
+ * assignment of PCI IRQ's.
+ */
+#define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
+
+#ifdef CONFIG_ACPI_BOOT
+extern int io_apic_get_unique_id (int ioapic, int apic_id);
+extern int io_apic_get_version (int ioapic);
+extern int io_apic_get_redir_entries (int ioapic);
+extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low);
+#endif /*CONFIG_ACPI_BOOT*/
+
+extern int (*ioapic_renumber_irq)(int ioapic, int irq);
+
+#else  /* !CONFIG_X86_IO_APIC */
+#define io_apic_assign_pci_irqs 0
+#endif
+
+extern int assign_irq_vector(int irq);
+
+#endif
diff --git a/include/asm-i386/ioctl.h b/include/asm-i386/ioctl.h
new file mode 100644
index 0000000..543f784
--- /dev/null
+++ b/include/asm-i386/ioctl.h
@@ -0,0 +1,85 @@
+/* $Id: ioctl.h,v 1.5 1993/07/19 21:53:50 root Exp root $
+ *
+ * linux/ioctl.h for Linux by H.H. Bergman.
+ */
+
+#ifndef _ASMI386_IOCTL_H
+#define _ASMI386_IOCTL_H
+
+/* ioctl command encoding: 32 bits total, command in lower 16 bits,
+ * size of the parameter structure in the lower 14 bits of the
+ * upper 16 bits.
+ * Encoding the size of the parameter structure in the ioctl request
+ * is useful for catching programs compiled with old versions
+ * and to avoid overwriting user space outside the user buffer area.
+ * The highest 2 bits are reserved for indicating the ``access mode''.
+ * NOTE: This limits the max parameter size to 16kB -1 !
+ */
+
+/*
+ * The following is for compatibility across the various Linux
+ * platforms.  The i386 ioctl numbering scheme doesn't really enforce
+ * a type field.  De facto, however, the top 8 bits of the lower 16
+ * bits are indeed used as a type field, so we might just as well make
+ * this explicit here.  Please be sure to use the decoding macros
+ * below from now on.
+ */
+#define _IOC_NRBITS	8
+#define _IOC_TYPEBITS	8
+#define _IOC_SIZEBITS	14
+#define _IOC_DIRBITS	2
+
+#define _IOC_NRMASK	((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK	((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK	((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK	((1 << _IOC_DIRBITS)-1)
+
+#define _IOC_NRSHIFT	0
+#define _IOC_TYPESHIFT	(_IOC_NRSHIFT+_IOC_NRBITS)
+#define _IOC_SIZESHIFT	(_IOC_TYPESHIFT+_IOC_TYPEBITS)
+#define _IOC_DIRSHIFT	(_IOC_SIZESHIFT+_IOC_SIZEBITS)
+
+/*
+ * Direction bits.
+ */
+#define _IOC_NONE	0U
+#define _IOC_WRITE	1U
+#define _IOC_READ	2U
+
+#define _IOC(dir,type,nr,size) \
+	(((dir)  << _IOC_DIRSHIFT) | \
+	 ((type) << _IOC_TYPESHIFT) | \
+	 ((nr)   << _IOC_NRSHIFT) | \
+	 ((size) << _IOC_SIZESHIFT))
+
+/* provoke compile error for invalid uses of size argument */
+extern unsigned int __invalid_size_argument_for_IOC;
+#define _IOC_TYPECHECK(t) \
+	((sizeof(t) == sizeof(t[1]) && \
+	  sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
+	  sizeof(t) : __invalid_size_argument_for_IOC)
+
+/* used to create numbers */
+#define _IO(type,nr)		_IOC(_IOC_NONE,(type),(nr),0)
+#define _IOR(type,nr,size)	_IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size)))
+#define _IOW(type,nr,size)	_IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
+#define _IOWR(type,nr,size)	_IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
+#define _IOR_BAD(type,nr,size)	_IOC(_IOC_READ,(type),(nr),sizeof(size))
+#define _IOW_BAD(type,nr,size)	_IOC(_IOC_WRITE,(type),(nr),sizeof(size))
+#define _IOWR_BAD(type,nr,size)	_IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
+
+/* used to decode ioctl numbers.. */
+#define _IOC_DIR(nr)		(((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
+#define _IOC_TYPE(nr)		(((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
+#define _IOC_NR(nr)		(((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+#define _IOC_SIZE(nr)		(((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+/* ...and for the drivers/sound files... */
+
+#define IOC_IN		(_IOC_WRITE << _IOC_DIRSHIFT)
+#define IOC_OUT		(_IOC_READ << _IOC_DIRSHIFT)
+#define IOC_INOUT	((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
+#define IOCSIZE_MASK	(_IOC_SIZEMASK << _IOC_SIZESHIFT)
+#define IOCSIZE_SHIFT	(_IOC_SIZESHIFT)
+
+#endif /* _ASMI386_IOCTL_H */
diff --git a/include/asm-i386/ioctls.h b/include/asm-i386/ioctls.h
new file mode 100644
index 0000000..f962fad
--- /dev/null
+++ b/include/asm-i386/ioctls.h
@@ -0,0 +1,83 @@
+#ifndef __ARCH_I386_IOCTLS_H__
+#define __ARCH_I386_IOCTLS_H__
+
+#include <asm/ioctl.h>
+
+/* 0x54 is just a magic number to make these relatively unique ('T') */
+
+#define TCGETS		0x5401
+#define TCSETS		0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
+#define TCSETSW		0x5403
+#define TCSETSF		0x5404
+#define TCGETA		0x5405
+#define TCSETA		0x5406
+#define TCSETAW		0x5407
+#define TCSETAF		0x5408
+#define TCSBRK		0x5409
+#define TCXONC		0x540A
+#define TCFLSH		0x540B
+#define TIOCEXCL	0x540C
+#define TIOCNXCL	0x540D
+#define TIOCSCTTY	0x540E
+#define TIOCGPGRP	0x540F
+#define TIOCSPGRP	0x5410
+#define TIOCOUTQ	0x5411
+#define TIOCSTI		0x5412
+#define TIOCGWINSZ	0x5413
+#define TIOCSWINSZ	0x5414
+#define TIOCMGET	0x5415
+#define TIOCMBIS	0x5416
+#define TIOCMBIC	0x5417
+#define TIOCMSET	0x5418
+#define TIOCGSOFTCAR	0x5419
+#define TIOCSSOFTCAR	0x541A
+#define FIONREAD	0x541B
+#define TIOCINQ		FIONREAD
+#define TIOCLINUX	0x541C
+#define TIOCCONS	0x541D
+#define TIOCGSERIAL	0x541E
+#define TIOCSSERIAL	0x541F
+#define TIOCPKT		0x5420
+#define FIONBIO		0x5421
+#define TIOCNOTTY	0x5422
+#define TIOCSETD	0x5423
+#define TIOCGETD	0x5424
+#define TCSBRKP		0x5425	/* Needed for POSIX tcsendbreak() */
+/* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */
+#define TIOCSBRK	0x5427  /* BSD compatibility */
+#define TIOCCBRK	0x5428  /* BSD compatibility */
+#define TIOCGSID	0x5429  /* Return the session ID of FD */
+#define TIOCGPTN	_IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK	_IOW('T',0x31, int)  /* Lock/unlock Pty */
+
+#define FIONCLEX	0x5450
+#define FIOCLEX		0x5451
+#define FIOASYNC	0x5452
+#define TIOCSERCONFIG	0x5453
+#define TIOCSERGWILD	0x5454
+#define TIOCSERSWILD	0x5455
+#define TIOCGLCKTRMIOS	0x5456
+#define TIOCSLCKTRMIOS	0x5457
+#define TIOCSERGSTRUCT	0x5458 /* For debugging only */
+#define TIOCSERGETLSR   0x5459 /* Get line status register */
+#define TIOCSERGETMULTI 0x545A /* Get multiport config  */
+#define TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TIOCMIWAIT	0x545C	/* wait for a change on serial input line(s) */
+#define TIOCGICOUNT	0x545D	/* read serial port inline interrupt counts */
+#define TIOCGHAYESESP   0x545E  /* Get Hayes ESP configuration */
+#define TIOCSHAYESESP   0x545F  /* Set Hayes ESP configuration */
+#define FIOQSIZE	0x5460
+
+/* Used for packet mode */
+#define TIOCPKT_DATA		 0
+#define TIOCPKT_FLUSHREAD	 1
+#define TIOCPKT_FLUSHWRITE	 2
+#define TIOCPKT_STOP		 4
+#define TIOCPKT_START		 8
+#define TIOCPKT_NOSTOP		16
+#define TIOCPKT_DOSTOP		32
+
+#define TIOCSER_TEMT    0x01	/* Transmitter physically empty */
+
+#endif
diff --git a/include/asm-i386/ipc.h b/include/asm-i386/ipc.h
new file mode 100644
index 0000000..a46e3d9
--- /dev/null
+++ b/include/asm-i386/ipc.h
@@ -0,0 +1 @@
+#include <asm-generic/ipc.h>
diff --git a/include/asm-i386/ipcbuf.h b/include/asm-i386/ipcbuf.h
new file mode 100644
index 0000000..0dcad4f
--- /dev/null
+++ b/include/asm-i386/ipcbuf.h
@@ -0,0 +1,29 @@
+#ifndef __i386_IPCBUF_H__
+#define __i386_IPCBUF_H__
+
+/*
+ * The ipc64_perm structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 32-bit mode_t and seq
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct ipc64_perm
+{
+	__kernel_key_t		key;
+	__kernel_uid32_t	uid;
+	__kernel_gid32_t	gid;
+	__kernel_uid32_t	cuid;
+	__kernel_gid32_t	cgid;
+	__kernel_mode_t		mode;
+	unsigned short		__pad1;
+	unsigned short		seq;
+	unsigned short		__pad2;
+	unsigned long		__unused1;
+	unsigned long		__unused2;
+};
+
+#endif /* __i386_IPCBUF_H__ */
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h
new file mode 100644
index 0000000..05b9e61
--- /dev/null
+++ b/include/asm-i386/irq.h
@@ -0,0 +1,41 @@
+#ifndef _ASM_IRQ_H
+#define _ASM_IRQ_H
+
+/*
+ *	linux/include/asm/irq.h
+ *
+ *	(C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
+ *
+ *	IRQ/IPI changes taken from work by Thomas Radke
+ *	<tomsoft@informatik.tu-chemnitz.de>
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+/* include comes from machine specific directory */
+#include "irq_vectors.h"
+#include <asm/thread_info.h>
+
+static __inline__ int irq_canonicalize(int irq)
+{
+	return ((irq == 2) ? 9 : irq);
+}
+
+extern void release_vm86_irqs(struct task_struct *);
+
+#ifdef CONFIG_X86_LOCAL_APIC
+# define ARCH_HAS_NMI_WATCHDOG		/* See include/linux/nmi.h */
+#endif
+
+#ifdef CONFIG_4KSTACKS
+  extern void irq_ctx_init(int cpu);
+# define __ARCH_HAS_DO_SOFTIRQ
+#else
+# define irq_ctx_init(cpu) do { } while (0)
+#endif
+
+#ifdef CONFIG_IRQBALANCE
+extern int irqbalance_disable(char *str);
+#endif
+
+#endif /* _ASM_IRQ_H */
diff --git a/include/asm-i386/ist.h b/include/asm-i386/ist.h
new file mode 100644
index 0000000..d13d1e6
--- /dev/null
+++ b/include/asm-i386/ist.h
@@ -0,0 +1,32 @@
+#ifndef _ASM_IST_H
+#define _ASM_IST_H
+
+/*
+ * Include file for the interface to IST BIOS
+ * Copyright 2002 Andy Grover <andrew.grover@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+
+#ifdef __KERNEL__
+
+struct ist_info {
+	unsigned long	signature;
+	unsigned long	command;
+	unsigned long	event;
+	unsigned long	perf_level;
+};
+
+extern struct ist_info ist_info;
+
+#endif	/* __KERNEL__ */
+#endif	/* _ASM_IST_H */
diff --git a/include/asm-i386/kdebug.h b/include/asm-i386/kdebug.h
new file mode 100644
index 0000000..de6498b
--- /dev/null
+++ b/include/asm-i386/kdebug.h
@@ -0,0 +1,50 @@
+#ifndef _I386_KDEBUG_H
+#define _I386_KDEBUG_H 1
+
+/*
+ * Aug-05 2004 Ported by Prasanna S Panchamukhi <prasanna@in.ibm.com>
+ * from x86_64 architecture.
+ */
+#include <linux/notifier.h>
+
+struct pt_regs;
+
+struct die_args {
+	struct pt_regs *regs;
+	const char *str;
+	long err;
+	int trapnr;
+	int signr;
+};
+
+/* Note - you should never unregister because that can race with NMIs.
+   If you really want to do it first unregister - then synchronize_kernel - then free.
+  */
+int register_die_notifier(struct notifier_block *nb);
+extern struct notifier_block *i386die_chain;
+
+
+/* Grossly misnamed. */
+enum die_val {
+	DIE_OOPS = 1,
+	DIE_INT3,
+	DIE_DEBUG,
+	DIE_PANIC,
+	DIE_NMI,
+	DIE_DIE,
+	DIE_NMIWATCHDOG,
+	DIE_KERNELDEBUG,
+	DIE_TRAP,
+	DIE_GPF,
+	DIE_CALL,
+	DIE_NMI_IPI,
+	DIE_PAGE_FAULT,
+};
+
+static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig)
+{
+	struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig };
+	return notifier_call_chain(&i386die_chain, val, &args);
+}
+
+#endif
diff --git a/include/asm-i386/kmap_types.h b/include/asm-i386/kmap_types.h
new file mode 100644
index 0000000..6886a0c
--- /dev/null
+++ b/include/asm-i386/kmap_types.h
@@ -0,0 +1,31 @@
+#ifndef _ASM_KMAP_TYPES_H
+#define _ASM_KMAP_TYPES_H
+
+#include <linux/config.h>
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+# define D(n) __KM_FENCE_##n ,
+#else
+# define D(n)
+#endif
+
+enum km_type {
+D(0)	KM_BOUNCE_READ,
+D(1)	KM_SKB_SUNRPC_DATA,
+D(2)	KM_SKB_DATA_SOFTIRQ,
+D(3)	KM_USER0,
+D(4)	KM_USER1,
+D(5)	KM_BIO_SRC_IRQ,
+D(6)	KM_BIO_DST_IRQ,
+D(7)	KM_PTE0,
+D(8)	KM_PTE1,
+D(9)	KM_IRQ0,
+D(10)	KM_IRQ1,
+D(11)	KM_SOFTIRQ0,
+D(12)	KM_SOFTIRQ1,
+D(13)	KM_TYPE_NR
+};
+
+#undef D
+
+#endif
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h
new file mode 100644
index 0000000..4092f68
--- /dev/null
+++ b/include/asm-i386/kprobes.h
@@ -0,0 +1,69 @@
+#ifndef _ASM_KPROBES_H
+#define _ASM_KPROBES_H
+/*
+ *  Kernel Probes (KProbes)
+ *  include/asm-i386/kprobes.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ *
+ * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
+ *		Probes initial implementation ( includes suggestions from
+ *		Rusty Russell).
+ */
+#include <linux/types.h>
+#include <linux/ptrace.h>
+
+struct pt_regs;
+
+typedef u8 kprobe_opcode_t;
+#define BREAKPOINT_INSTRUCTION	0xcc
+#define MAX_INSN_SIZE 16
+#define MAX_STACK_SIZE 64
+#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
+	(((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
+	? (MAX_STACK_SIZE) \
+	: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
+
+#define JPROBE_ENTRY(pentry)	(kprobe_opcode_t *)pentry
+
+/* Architecture specific copy of original instruction*/
+struct arch_specific_insn {
+	/* copy of the original instruction */
+	kprobe_opcode_t insn[MAX_INSN_SIZE];
+};
+
+
+/* trap3/1 are intr gates for kprobes.  So, restore the status of IF,
+ * if necessary, before executing the original int3/1 (trap) handler.
+ */
+static inline void restore_interrupts(struct pt_regs *regs)
+{
+	if (regs->eflags & IF_MASK)
+		local_irq_enable();
+}
+
+#ifdef CONFIG_KPROBES
+extern int kprobe_exceptions_notify(struct notifier_block *self,
+				    unsigned long val, void *data);
+#else				/* !CONFIG_KPROBES */
+static inline int kprobe_exceptions_notify(struct notifier_block *self,
+					   unsigned long val, void *data)
+{
+	return 0;
+}
+#endif
+#endif				/* _ASM_KPROBES_H */
diff --git a/include/asm-i386/ldt.h b/include/asm-i386/ldt.h
new file mode 100644
index 0000000..e9d3de1
--- /dev/null
+++ b/include/asm-i386/ldt.h
@@ -0,0 +1,32 @@
+/*
+ * ldt.h
+ *
+ * Definitions of structures used with the modify_ldt system call.
+ */
+#ifndef _LINUX_LDT_H
+#define _LINUX_LDT_H
+
+/* Maximum number of LDT entries supported. */
+#define LDT_ENTRIES	8192
+/* The size of each LDT entry. */
+#define LDT_ENTRY_SIZE	8
+
+#ifndef __ASSEMBLY__
+struct user_desc {
+	unsigned int  entry_number;
+	unsigned long base_addr;
+	unsigned int  limit;
+	unsigned int  seg_32bit:1;
+	unsigned int  contents:2;
+	unsigned int  read_exec_only:1;
+	unsigned int  limit_in_pages:1;
+	unsigned int  seg_not_present:1;
+	unsigned int  useable:1;
+};
+
+#define MODIFY_LDT_CONTENTS_DATA	0
+#define MODIFY_LDT_CONTENTS_STACK	1
+#define MODIFY_LDT_CONTENTS_CODE	2
+
+#endif /* !__ASSEMBLY__ */
+#endif
diff --git a/include/asm-i386/linkage.h b/include/asm-i386/linkage.h
new file mode 100644
index 0000000..af3d857
--- /dev/null
+++ b/include/asm-i386/linkage.h
@@ -0,0 +1,17 @@
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
+#define FASTCALL(x)	x __attribute__((regparm(3)))
+#define fastcall	__attribute__((regparm(3)))
+
+#ifdef CONFIG_REGPARM
+# define prevent_tail_call(ret) __asm__ ("" : "=r" (ret) : "0" (ret))
+#endif
+
+#ifdef CONFIG_X86_ALIGNMENT_16
+#define __ALIGN .align 16,0x90
+#define __ALIGN_STR ".align 16,0x90"
+#endif
+
+#endif
diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h
new file mode 100644
index 0000000..0177da8
--- /dev/null
+++ b/include/asm-i386/local.h
@@ -0,0 +1,70 @@
+#ifndef _ARCH_I386_LOCAL_H
+#define _ARCH_I386_LOCAL_H
+
+#include <linux/percpu.h>
+
+typedef struct
+{
+	volatile unsigned long counter;
+} local_t;
+
+#define LOCAL_INIT(i)	{ (i) }
+
+#define local_read(v)	((v)->counter)
+#define local_set(v,i)	(((v)->counter) = (i))
+
+static __inline__ void local_inc(local_t *v)
+{
+	__asm__ __volatile__(
+		"incl %0"
+		:"=m" (v->counter)
+		:"m" (v->counter));
+}
+
+static __inline__ void local_dec(local_t *v)
+{
+	__asm__ __volatile__(
+		"decl %0"
+		:"=m" (v->counter)
+		:"m" (v->counter));
+}
+
+static __inline__ void local_add(unsigned long i, local_t *v)
+{
+	__asm__ __volatile__(
+		"addl %1,%0"
+		:"=m" (v->counter)
+		:"ir" (i), "m" (v->counter));
+}
+
+static __inline__ void local_sub(unsigned long i, local_t *v)
+{
+	__asm__ __volatile__(
+		"subl %1,%0"
+		:"=m" (v->counter)
+		:"ir" (i), "m" (v->counter));
+}
+
+/* On x86, these are no better than the atomic variants. */
+#define __local_inc(l)		local_inc(l)
+#define __local_dec(l)		local_dec(l)
+#define __local_add(i,l)	local_add((i),(l))
+#define __local_sub(i,l)	local_sub((i),(l))
+
+/* Use these for per-cpu local_t variables: on some archs they are
+ * much more efficient than these naive implementations.  Note they take
+ * a variable, not an address.
+ */
+#define cpu_local_read(v)	local_read(&__get_cpu_var(v))
+#define cpu_local_set(v, i)	local_set(&__get_cpu_var(v), (i))
+#define cpu_local_inc(v)	local_inc(&__get_cpu_var(v))
+#define cpu_local_dec(v)	local_dec(&__get_cpu_var(v))
+#define cpu_local_add(i, v)	local_add((i), &__get_cpu_var(v))
+#define cpu_local_sub(i, v)	local_sub((i), &__get_cpu_var(v))
+
+#define __cpu_local_inc(v)	cpu_local_inc(v)
+#define __cpu_local_dec(v)	cpu_local_dec(v)
+#define __cpu_local_add(i, v)	cpu_local_add((i), (v))
+#define __cpu_local_sub(i, v)	cpu_local_sub((i), (v))
+
+#endif /* _ARCH_I386_LOCAL_H */
diff --git a/include/asm-i386/mach-bigsmp/mach_apic.h b/include/asm-i386/mach-bigsmp/mach_apic.h
new file mode 100644
index 0000000..2339868
--- /dev/null
+++ b/include/asm-i386/mach-bigsmp/mach_apic.h
@@ -0,0 +1,167 @@
+#ifndef __ASM_MACH_APIC_H
+#define __ASM_MACH_APIC_H
+#include <asm/smp.h>
+
+#define SEQUENTIAL_APICID
+#ifdef SEQUENTIAL_APICID
+#define xapic_phys_to_log_apicid(phys_apic) ( (1ul << ((phys_apic) & 0x3)) |\
+		((phys_apic<<2) & (~0xf)) )
+#elif CLUSTERED_APICID
+#define xapic_phys_to_log_apicid(phys_apic) ( (1ul << ((phys_apic) & 0x3)) |\
+		((phys_apic) & (~0xf)) )
+#endif
+
+#define NO_BALANCE_IRQ (1)
+#define esr_disable (1)
+
+#define NO_IOAPIC_CHECK (0)
+
+static inline int apic_id_registered(void)
+{
+	return (1);
+}
+
+#define APIC_DFR_VALUE	(APIC_DFR_CLUSTER)
+/* Round robin the irqs amoung the online cpus */
+static inline cpumask_t target_cpus(void)
+{ 
+	static unsigned long cpu = NR_CPUS;
+	do {
+		if (cpu >= NR_CPUS)
+			cpu = first_cpu(cpu_online_map);
+		else
+			cpu = next_cpu(cpu, cpu_online_map);
+	} while (cpu >= NR_CPUS);
+	return cpumask_of_cpu(cpu);
+}
+#define TARGET_CPUS	(target_cpus())
+
+#define INT_DELIVERY_MODE dest_Fixed
+#define INT_DEST_MODE 1     /* logical delivery broadcast to all procs */
+
+static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+	return 0;
+}
+
+/* we don't use the phys_cpu_present_map to indicate apicid presence */
+static inline unsigned long check_apicid_present(int bit) 
+{
+	return 1;
+}
+
+#define apicid_cluster(apicid) (apicid & 0xF0)
+
+static inline unsigned long calculate_ldr(unsigned long old)
+{
+	unsigned long id;
+	id = xapic_phys_to_log_apicid(hard_smp_processor_id());
+	return ((old & ~APIC_LDR_MASK) | SET_APIC_LOGICAL_ID(id));
+}
+
+/*
+ * Set up the logical destination ID.
+ *
+ * Intel recommends to set DFR, LDR and TPR before enabling
+ * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
+ * document number 292116).  So here it goes...
+ */
+static inline void init_apic_ldr(void)
+{
+	unsigned long val;
+
+	apic_write_around(APIC_DFR, APIC_DFR_VALUE);
+	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+	val = calculate_ldr(val);
+	apic_write_around(APIC_LDR, val);
+}
+
+static inline void clustered_apic_check(void)
+{
+	printk("Enabling APIC mode:  %s.  Using %d I/O APICs\n",
+		"Cluster", nr_ioapics);
+}
+
+static inline int multi_timer_check(int apic, int irq)
+{
+	return 0;
+}
+
+static inline int apicid_to_node(int logical_apicid)
+{
+	return 0;
+}
+
+extern u8 bios_cpu_apicid[];
+
+static inline int cpu_present_to_apicid(int mps_cpu)
+{
+	if (mps_cpu < NR_CPUS)
+		return (int)bios_cpu_apicid[mps_cpu];
+	else
+		return BAD_APICID;
+}
+
+static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
+{
+	return physid_mask_of_physid(phys_apicid);
+}
+
+extern u8 cpu_2_logical_apicid[];
+/* Mapping from cpu number to logical apicid */
+static inline int cpu_to_logical_apicid(int cpu)
+{
+       if (cpu >= NR_CPUS)
+	       return BAD_APICID;
+       return (int)cpu_2_logical_apicid[cpu];
+ }
+
+static inline int mpc_apic_id(struct mpc_config_processor *m,
+			struct mpc_config_translation *translation_record)
+{
+	printk("Processor #%d %ld:%ld APIC version %d\n",
+	        m->mpc_apicid,
+	        (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
+	        (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
+	        m->mpc_apicver);
+	return m->mpc_apicid;
+}
+
+static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
+{
+	/* For clustered we don't have a good way to do this yet - hack */
+	return physids_promote(0xFUL);
+}
+
+#define WAKE_SECONDARY_VIA_INIT
+
+static inline void setup_portio_remap(void)
+{
+}
+
+static inline void enable_apic_mode(void)
+{
+}
+
+static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+	return (1);
+}
+
+/* As we are using single CPU as destination, pick only one CPU here */
+static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
+{
+	int cpu;
+	int apicid;	
+
+	cpu = first_cpu(cpumask);
+	apicid = cpu_to_logical_apicid(cpu);
+	return apicid;
+}
+
+static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
+{
+	return cpuid_apic >> index_msb;
+}
+
+#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-i386/mach-bigsmp/mach_apicdef.h b/include/asm-i386/mach-bigsmp/mach_apicdef.h
new file mode 100644
index 0000000..23e58b3
--- /dev/null
+++ b/include/asm-i386/mach-bigsmp/mach_apicdef.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_MACH_APICDEF_H
+#define __ASM_MACH_APICDEF_H
+
+#define		APIC_ID_MASK		(0x0F<<24)
+
+static inline unsigned get_apic_id(unsigned long x) 
+{ 
+	return (((x)>>24)&0x0F);
+} 
+
+#define		GET_APIC_ID(x)	get_apic_id(x)
+
+#endif
diff --git a/include/asm-i386/mach-bigsmp/mach_ipi.h b/include/asm-i386/mach-bigsmp/mach_ipi.h
new file mode 100644
index 0000000..9404c53
--- /dev/null
+++ b/include/asm-i386/mach-bigsmp/mach_ipi.h
@@ -0,0 +1,25 @@
+#ifndef __ASM_MACH_IPI_H
+#define __ASM_MACH_IPI_H
+
+void send_IPI_mask_sequence(cpumask_t mask, int vector);
+
+static inline void send_IPI_mask(cpumask_t mask, int vector)
+{
+	send_IPI_mask_sequence(mask, vector);
+}
+
+static inline void send_IPI_allbutself(int vector)
+{
+	cpumask_t mask = cpu_online_map;
+	cpu_clear(smp_processor_id(), mask);
+
+	if (!cpus_empty(mask))
+		send_IPI_mask(mask, vector);
+}
+
+static inline void send_IPI_all(int vector)
+{
+	send_IPI_mask(cpu_online_map, vector);
+}
+
+#endif /* __ASM_MACH_IPI_H */
diff --git a/include/asm-i386/mach-bigsmp/mach_mpspec.h b/include/asm-i386/mach-bigsmp/mach_mpspec.h
new file mode 100644
index 0000000..6b5dadc
--- /dev/null
+++ b/include/asm-i386/mach-bigsmp/mach_mpspec.h
@@ -0,0 +1,8 @@
+#ifndef __ASM_MACH_MPSPEC_H
+#define __ASM_MACH_MPSPEC_H
+
+#define MAX_IRQ_SOURCES 256
+
+#define MAX_MP_BUSSES 32
+
+#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-i386/mach-default/apm.h b/include/asm-i386/mach-default/apm.h
new file mode 100644
index 0000000..1f730b8
--- /dev/null
+++ b/include/asm-i386/mach-default/apm.h
@@ -0,0 +1,75 @@
+/*
+ *  include/asm-i386/mach-default/apm.h
+ *
+ *  Machine specific APM BIOS functions for generic.
+ *  Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+
+#ifndef _ASM_APM_H
+#define _ASM_APM_H
+
+#ifdef APM_ZERO_SEGS
+#	define APM_DO_ZERO_SEGS \
+		"pushl %%ds\n\t" \
+		"pushl %%es\n\t" \
+		"xorl %%edx, %%edx\n\t" \
+		"mov %%dx, %%ds\n\t" \
+		"mov %%dx, %%es\n\t" \
+		"mov %%dx, %%fs\n\t" \
+		"mov %%dx, %%gs\n\t"
+#	define APM_DO_POP_SEGS \
+		"popl %%es\n\t" \
+		"popl %%ds\n\t"
+#else
+#	define APM_DO_ZERO_SEGS
+#	define APM_DO_POP_SEGS
+#endif
+
+static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
+					u32 *eax, u32 *ebx, u32 *ecx,
+					u32 *edx, u32 *esi)
+{
+	/*
+	 * N.B. We do NOT need a cld after the BIOS call
+	 * because we always save and restore the flags.
+	 */
+	__asm__ __volatile__(APM_DO_ZERO_SEGS
+		"pushl %%edi\n\t"
+		"pushl %%ebp\n\t"
+		"lcall *%%cs:apm_bios_entry\n\t"
+		"setc %%al\n\t"
+		"popl %%ebp\n\t"
+		"popl %%edi\n\t"
+		APM_DO_POP_SEGS
+		: "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx),
+		  "=S" (*esi)
+		: "a" (func), "b" (ebx_in), "c" (ecx_in)
+		: "memory", "cc");
+}
+
+static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
+						u32 ecx_in, u32 *eax)
+{
+	int	cx, dx, si;
+	u8	error;
+
+	/*
+	 * N.B. We do NOT need a cld after the BIOS call
+	 * because we always save and restore the flags.
+	 */
+	__asm__ __volatile__(APM_DO_ZERO_SEGS
+		"pushl %%edi\n\t"
+		"pushl %%ebp\n\t"
+		"lcall *%%cs:apm_bios_entry\n\t"
+		"setc %%bl\n\t"
+		"popl %%ebp\n\t"
+		"popl %%edi\n\t"
+		APM_DO_POP_SEGS
+		: "=a" (*eax), "=b" (error), "=c" (cx), "=d" (dx),
+		  "=S" (si)
+		: "a" (func), "b" (ebx_in), "c" (ecx_in)
+		: "memory", "cc");
+	return error;
+}
+
+#endif /* _ASM_APM_H */
diff --git a/include/asm-i386/mach-default/bios_ebda.h b/include/asm-i386/mach-default/bios_ebda.h
new file mode 100644
index 0000000..9cbd9a6
--- /dev/null
+++ b/include/asm-i386/mach-default/bios_ebda.h
@@ -0,0 +1,15 @@
+#ifndef _MACH_BIOS_EBDA_H
+#define _MACH_BIOS_EBDA_H
+
+/*
+ * there is a real-mode segmented pointer pointing to the
+ * 4K EBDA area at 0x40E.
+ */
+static inline unsigned int get_bios_ebda(void)
+{
+	unsigned int address = *(unsigned short *)phys_to_virt(0x40E);
+	address <<= 4;
+	return address;	/* 0 means none */
+}
+
+#endif /* _MACH_BIOS_EBDA_H */
diff --git a/include/asm-i386/mach-default/do_timer.h b/include/asm-i386/mach-default/do_timer.h
new file mode 100644
index 0000000..03dd13a
--- /dev/null
+++ b/include/asm-i386/mach-default/do_timer.h
@@ -0,0 +1,85 @@
+/* defines for inline arch setup functions */
+
+#include <asm/apic.h>
+
+/**
+ * do_timer_interrupt_hook - hook into timer tick
+ * @regs:	standard registers from interrupt
+ *
+ * Description:
+ *	This hook is called immediately after the timer interrupt is ack'd.
+ *	It's primary purpose is to allow architectures that don't possess
+ *	individual per CPU clocks (like the CPU APICs supply) to broadcast the
+ *	timer interrupt as a means of triggering reschedules etc.
+ **/
+
+static inline void do_timer_interrupt_hook(struct pt_regs *regs)
+{
+	do_timer(regs);
+#ifndef CONFIG_SMP
+	update_process_times(user_mode(regs));
+#endif
+/*
+ * In the SMP case we use the local APIC timer interrupt to do the
+ * profiling, except when we simulate SMP mode on a uniprocessor
+ * system, in that case we have to call the local interrupt handler.
+ */
+#ifndef CONFIG_X86_LOCAL_APIC
+	profile_tick(CPU_PROFILING, regs);
+#else
+	if (!using_apic_timer)
+		smp_local_timer_interrupt(regs);
+#endif
+}
+
+
+/* you can safely undefine this if you don't have the Neptune chipset */
+
+#define BUGGY_NEPTUN_TIMER
+
+/**
+ * do_timer_overflow - process a detected timer overflow condition
+ * @count:	hardware timer interrupt count on overflow
+ *
+ * Description:
+ *	This call is invoked when the jiffies count has not incremented but
+ *	the hardware timer interrupt has.  It means that a timer tick interrupt
+ *	came along while the previous one was pending, thus a tick was missed
+ **/
+static inline int do_timer_overflow(int count)
+{
+	int i;
+
+	spin_lock(&i8259A_lock);
+	/*
+	 * This is tricky when I/O APICs are used;
+	 * see do_timer_interrupt().
+	 */
+	i = inb(0x20);
+	spin_unlock(&i8259A_lock);
+	
+	/* assumption about timer being IRQ0 */
+	if (i & 0x01) {
+		/*
+		 * We cannot detect lost timer interrupts ... 
+		 * well, that's why we call them lost, don't we? :)
+		 * [hmm, on the Pentium and Alpha we can ... sort of]
+		 */
+		count -= LATCH;
+	} else {
+#ifdef BUGGY_NEPTUN_TIMER
+		/*
+		 * for the Neptun bug we know that the 'latch'
+		 * command doesn't latch the high and low value
+		 * of the counter atomically. Thus we have to 
+		 * substract 256 from the counter 
+		 * ... funny, isnt it? :)
+		 */
+		
+		count -= 256;
+#else
+		printk("do_slow_gettimeoffset(): hardware timer problem?\n");
+#endif
+	}
+	return count;
+}
diff --git a/include/asm-i386/mach-default/entry_arch.h b/include/asm-i386/mach-default/entry_arch.h
new file mode 100644
index 0000000..bc86146
--- /dev/null
+++ b/include/asm-i386/mach-default/entry_arch.h
@@ -0,0 +1,34 @@
+/*
+ * This file is designed to contain the BUILD_INTERRUPT specifications for
+ * all of the extra named interrupt vectors used by the architecture.
+ * Usually this is the Inter Process Interrupts (IPIs)
+ */
+
+/*
+ * The following vectors are part of the Linux architecture, there
+ * is no hardware IRQ pin equivalent for them, they are triggered
+ * through the ICC by us (IPIs)
+ */
+#ifdef CONFIG_X86_SMP
+BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
+BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
+BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
+#endif
+
+/*
+ * every pentium local APIC has two 'local interrupts', with a
+ * soft-definable vector attached to both interrupts, one of
+ * which is a timer interrupt, the other one is error counter
+ * overflow. Linux uses the local APIC timer interrupt to get
+ * a much simpler SMP time architecture:
+ */
+#ifdef CONFIG_X86_LOCAL_APIC
+BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
+BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
+BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
+
+#ifdef CONFIG_X86_MCE_P4THERMAL
+BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
+#endif
+
+#endif
diff --git a/include/asm-i386/mach-default/io_ports.h b/include/asm-i386/mach-default/io_ports.h
new file mode 100644
index 0000000..a96d9f6
--- /dev/null
+++ b/include/asm-i386/mach-default/io_ports.h
@@ -0,0 +1,30 @@
+/*
+ *  arch/i386/mach-generic/io_ports.h
+ *
+ *  Machine specific IO port address definition for generic.
+ *  Written by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef _MACH_IO_PORTS_H
+#define _MACH_IO_PORTS_H
+
+/* i8253A PIT registers */
+#define PIT_MODE		0x43
+#define PIT_CH0			0x40
+#define PIT_CH2			0x42
+
+/* i8259A PIC registers */
+#define PIC_MASTER_CMD		0x20
+#define PIC_MASTER_IMR		0x21
+#define PIC_MASTER_ISR		PIC_MASTER_CMD
+#define PIC_MASTER_POLL		PIC_MASTER_ISR
+#define PIC_MASTER_OCW3		PIC_MASTER_ISR
+#define PIC_SLAVE_CMD		0xa0
+#define PIC_SLAVE_IMR		0xa1
+
+/* i8259A PIC related value */
+#define PIC_CASCADE_IR		2
+#define MASTER_ICW4_DEFAULT	0x01
+#define SLAVE_ICW4_DEFAULT	0x01
+#define PIC_ICW4_AEOI		2
+
+#endif /* !_MACH_IO_PORTS_H */
diff --git a/include/asm-i386/mach-default/irq_vectors.h b/include/asm-i386/mach-default/irq_vectors.h
new file mode 100644
index 0000000..881c63c
--- /dev/null
+++ b/include/asm-i386/mach-default/irq_vectors.h
@@ -0,0 +1,96 @@
+/*
+ * This file should contain #defines for all of the interrupt vector
+ * numbers used by this architecture.
+ *
+ * In addition, there are some standard defines:
+ *
+ *	FIRST_EXTERNAL_VECTOR:
+ *		The first free place for external interrupts
+ *
+ *	SYSCALL_VECTOR:
+ *		The IRQ vector a syscall makes the user to kernel transition
+ *		under.
+ *
+ *	TIMER_IRQ:
+ *		The IRQ number the timer interrupt comes in at.
+ *
+ *	NR_IRQS:
+ *		The total number of interrupt vectors (including all the
+ *		architecture specific interrupts) needed.
+ *
+ */			
+#ifndef _ASM_IRQ_VECTORS_H
+#define _ASM_IRQ_VECTORS_H
+
+/*
+ * IDT vectors usable for external interrupt sources start
+ * at 0x20:
+ */
+#define FIRST_EXTERNAL_VECTOR	0x20
+
+#define SYSCALL_VECTOR		0x80
+
+/*
+ * Vectors 0x20-0x2f are used for ISA interrupts.
+ */
+
+/*
+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
+ *
+ *  some of the following vectors are 'rare', they are merged
+ *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
+ *  TLB, reschedule and local APIC vectors are performance-critical.
+ *
+ *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
+ */
+#define SPURIOUS_APIC_VECTOR	0xff
+#define ERROR_APIC_VECTOR	0xfe
+#define INVALIDATE_TLB_VECTOR	0xfd
+#define RESCHEDULE_VECTOR	0xfc
+#define CALL_FUNCTION_VECTOR	0xfb
+
+#define THERMAL_APIC_VECTOR	0xf0
+/*
+ * Local APIC timer IRQ vector is on a different priority level,
+ * to work around the 'lost local interrupt if more than 2 IRQ
+ * sources per level' errata.
+ */
+#define LOCAL_TIMER_VECTOR	0xef
+
+/*
+ * First APIC vector available to drivers: (vectors 0x30-0xee)
+ * we start at 0x31 to spread out vectors evenly between priority
+ * levels. (0x80 is the syscall vector)
+ */
+#define FIRST_DEVICE_VECTOR	0x31
+#define FIRST_SYSTEM_VECTOR	0xef
+
+#define TIMER_IRQ 0
+
+/*
+ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
+ * Right now the APIC is mostly only used for SMP.
+ * 256 vectors is an architectural limit. (we can have
+ * more than 256 devices theoretically, but they will
+ * have to use shared interrupts)
+ * Since vectors 0x00-0x1f are used/reserved for the CPU,
+ * the usable vector space is 0x20-0xff (224 vectors)
+ */
+
+/*
+ * The maximum number of vectors supported by i386 processors
+ * is limited to 256. For processors other than i386, NR_VECTORS
+ * should be changed accordingly.
+ */
+#define NR_VECTORS 256
+
+#include "irq_vectors_limits.h"
+
+#define FPU_IRQ			13
+
+#define	FIRST_VM86_IRQ		3
+#define LAST_VM86_IRQ		15
+#define invalid_vm86_irq(irq)	((irq) < 3 || (irq) > 15)
+
+
+#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/include/asm-i386/mach-default/irq_vectors_limits.h b/include/asm-i386/mach-default/irq_vectors_limits.h
new file mode 100644
index 0000000..b330026
--- /dev/null
+++ b/include/asm-i386/mach-default/irq_vectors_limits.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_IRQ_VECTORS_LIMITS_H
+#define _ASM_IRQ_VECTORS_LIMITS_H
+
+#ifdef CONFIG_PCI_MSI
+#define NR_IRQS FIRST_SYSTEM_VECTOR
+#define NR_IRQ_VECTORS NR_IRQS
+#else
+#ifdef CONFIG_X86_IO_APIC
+#define NR_IRQS 224
+# if (224 >= 32 * NR_CPUS)
+# define NR_IRQ_VECTORS NR_IRQS
+# else
+# define NR_IRQ_VECTORS (32 * NR_CPUS)
+# endif
+#else
+#define NR_IRQS 16
+#define NR_IRQ_VECTORS NR_IRQS
+#endif
+#endif
+
+#endif /* _ASM_IRQ_VECTORS_LIMITS_H */
diff --git a/include/asm-i386/mach-default/mach_apic.h b/include/asm-i386/mach-default/mach_apic.h
new file mode 100644
index 0000000..627f1cd
--- /dev/null
+++ b/include/asm-i386/mach-default/mach_apic.h
@@ -0,0 +1,133 @@
+#ifndef __ASM_MACH_APIC_H
+#define __ASM_MACH_APIC_H
+
+#include <mach_apicdef.h>
+#include <asm/smp.h>
+
+#define APIC_DFR_VALUE	(APIC_DFR_FLAT)
+
+static inline cpumask_t target_cpus(void)
+{ 
+#ifdef CONFIG_SMP
+	return cpu_online_map;
+#else
+	return cpumask_of_cpu(0);
+#endif
+} 
+#define TARGET_CPUS (target_cpus())
+
+#define NO_BALANCE_IRQ (0)
+#define esr_disable (0)
+
+#define NO_IOAPIC_CHECK (0)
+
+#define INT_DELIVERY_MODE dest_LowestPrio
+#define INT_DEST_MODE 1     /* logical delivery broadcast to all procs */
+
+static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+	return physid_isset(apicid, bitmap);
+}
+
+static inline unsigned long check_apicid_present(int bit)
+{
+	return physid_isset(bit, phys_cpu_present_map);
+}
+
+/*
+ * Set up the logical destination ID.
+ *
+ * Intel recommends to set DFR, LDR and TPR before enabling
+ * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
+ * document number 292116).  So here it goes...
+ */
+static inline void init_apic_ldr(void)
+{
+	unsigned long val;
+
+	apic_write_around(APIC_DFR, APIC_DFR_VALUE);
+	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+	val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
+	apic_write_around(APIC_LDR, val);
+}
+
+static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
+{
+	return phys_map;
+}
+
+static inline void clustered_apic_check(void)
+{
+	printk("Enabling APIC mode:  %s.  Using %d I/O APICs\n",
+					"Flat", nr_ioapics);
+}
+
+static inline int multi_timer_check(int apic, int irq)
+{
+	return 0;
+}
+
+static inline int apicid_to_node(int logical_apicid)
+{
+	return 0;
+}
+
+/* Mapping from cpu number to logical apicid */
+static inline int cpu_to_logical_apicid(int cpu)
+{
+	return 1 << cpu;
+}
+
+static inline int cpu_present_to_apicid(int mps_cpu)
+{
+	if (mps_cpu < get_physical_broadcast())
+		return  mps_cpu;
+	else
+		return BAD_APICID;
+}
+
+static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
+{
+	return physid_mask_of_physid(phys_apicid);
+}
+
+static inline int mpc_apic_id(struct mpc_config_processor *m, 
+			struct mpc_config_translation *translation_record)
+{
+	printk("Processor #%d %ld:%ld APIC version %d\n",
+			m->mpc_apicid,
+			(m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
+			(m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
+			m->mpc_apicver);
+	return (m->mpc_apicid);
+}
+
+static inline void setup_portio_remap(void)
+{
+}
+
+static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+	return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
+}
+
+static inline int apic_id_registered(void)
+{
+	return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map);
+}
+
+static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
+{
+	return cpus_addr(cpumask)[0];
+}
+
+static inline void enable_apic_mode(void)
+{
+}
+
+static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
+{
+	return cpuid_apic >> index_msb;
+}
+
+#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-i386/mach-default/mach_apicdef.h b/include/asm-i386/mach-default/mach_apicdef.h
new file mode 100644
index 0000000..7bcb350
--- /dev/null
+++ b/include/asm-i386/mach-default/mach_apicdef.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_MACH_APICDEF_H
+#define __ASM_MACH_APICDEF_H
+
+#define		APIC_ID_MASK		(0xF<<24)
+
+static inline unsigned get_apic_id(unsigned long x) 
+{ 
+	return (((x)>>24)&0xF);
+} 
+
+#define		GET_APIC_ID(x)	get_apic_id(x)
+
+#endif
diff --git a/include/asm-i386/mach-default/mach_ipi.h b/include/asm-i386/mach-default/mach_ipi.h
new file mode 100644
index 0000000..6f2b17a
--- /dev/null
+++ b/include/asm-i386/mach-default/mach_ipi.h
@@ -0,0 +1,30 @@
+#ifndef __ASM_MACH_IPI_H
+#define __ASM_MACH_IPI_H
+
+void send_IPI_mask_bitmask(cpumask_t mask, int vector);
+void __send_IPI_shortcut(unsigned int shortcut, int vector);
+
+static inline void send_IPI_mask(cpumask_t mask, int vector)
+{
+	send_IPI_mask_bitmask(mask, vector);
+}
+
+static inline void send_IPI_allbutself(int vector)
+{
+	/*
+	 * if there are no other CPUs in the system then we get an APIC send 
+	 * error if we try to broadcast, thus avoid sending IPIs in this case.
+	 */
+	if (!(num_online_cpus() > 1))
+		return;
+
+	__send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
+	return;
+}
+
+static inline void send_IPI_all(int vector)
+{
+	__send_IPI_shortcut(APIC_DEST_ALLINC, vector);
+}
+
+#endif /* __ASM_MACH_IPI_H */
diff --git a/include/asm-i386/mach-default/mach_mpparse.h b/include/asm-i386/mach-default/mach_mpparse.h
new file mode 100644
index 0000000..1d38324
--- /dev/null
+++ b/include/asm-i386/mach-default/mach_mpparse.h
@@ -0,0 +1,28 @@
+#ifndef __ASM_MACH_MPPARSE_H
+#define __ASM_MACH_MPPARSE_H
+
+static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, 
+				struct mpc_config_translation *translation)
+{
+//	Dprintk("Bus #%d is %s\n", m->mpc_busid, name);
+}
+
+static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, 
+				struct mpc_config_translation *translation)
+{
+}
+
+static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, 
+		char *productid)
+{
+	return 0;
+}
+
+/* Hook from generic ACPI tables.c */
+static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+	return 0;
+}
+
+
+#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/include/asm-i386/mach-default/mach_mpspec.h b/include/asm-i386/mach-default/mach_mpspec.h
new file mode 100644
index 0000000..6b5dadc
--- /dev/null
+++ b/include/asm-i386/mach-default/mach_mpspec.h
@@ -0,0 +1,8 @@
+#ifndef __ASM_MACH_MPSPEC_H
+#define __ASM_MACH_MPSPEC_H
+
+#define MAX_IRQ_SOURCES 256
+
+#define MAX_MP_BUSSES 32
+
+#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-i386/mach-default/mach_reboot.h b/include/asm-i386/mach-default/mach_reboot.h
new file mode 100644
index 0000000..521e227
--- /dev/null
+++ b/include/asm-i386/mach-default/mach_reboot.h
@@ -0,0 +1,30 @@
+/*
+ *  arch/i386/mach-generic/mach_reboot.h
+ *
+ *  Machine specific reboot functions for generic.
+ *  Split out from reboot.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef _MACH_REBOOT_H
+#define _MACH_REBOOT_H
+
+static inline void kb_wait(void)
+{
+	int i;
+
+	for (i = 0; i < 0x10000; i++)
+		if ((inb_p(0x64) & 0x02) == 0)
+			break;
+}
+
+static inline void mach_reboot(void)
+{
+	int i;
+	for (i = 0; i < 100; i++) {
+		kb_wait();
+		udelay(50);
+		outb(0xfe, 0x64);         /* pulse reset low */
+		udelay(50);
+	}
+}
+
+#endif /* !_MACH_REBOOT_H */
diff --git a/include/asm-i386/mach-default/mach_time.h b/include/asm-i386/mach-default/mach_time.h
new file mode 100644
index 0000000..b749aa4
--- /dev/null
+++ b/include/asm-i386/mach-default/mach_time.h
@@ -0,0 +1,122 @@
+/*
+ *  include/asm-i386/mach-default/mach_time.h
+ *
+ *  Machine specific set RTC function for generic.
+ *  Split out from time.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef _MACH_TIME_H
+#define _MACH_TIME_H
+
+#include <linux/mc146818rtc.h>
+
+/* for check timing call set_rtc_mmss() 500ms     */
+/* used in arch/i386/time.c::do_timer_interrupt() */
+#define USEC_AFTER	500000
+#define USEC_BEFORE	500000
+
+/*
+ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
+ * called 500 ms after the second nowtime has started, because when
+ * nowtime is written into the registers of the CMOS clock, it will
+ * jump to the next second precisely 500 ms later. Check the Motorola
+ * MC146818A or Dallas DS12887 data sheet for details.
+ *
+ * BUG: This routine does not handle hour overflow properly; it just
+ *      sets the minutes. Usually you'll only notice that after reboot!
+ */
+static inline int mach_set_rtc_mmss(unsigned long nowtime)
+{
+	int retval = 0;
+	int real_seconds, real_minutes, cmos_minutes;
+	unsigned char save_control, save_freq_select;
+
+	save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
+	CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+
+	save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
+	CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+	cmos_minutes = CMOS_READ(RTC_MINUTES);
+	if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+		BCD_TO_BIN(cmos_minutes);
+
+	/*
+	 * since we're only adjusting minutes and seconds,
+	 * don't interfere with hour overflow. This avoids
+	 * messing with unknown time zones but requires your
+	 * RTC not to be off by more than 15 minutes
+	 */
+	real_seconds = nowtime % 60;
+	real_minutes = nowtime / 60;
+	if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
+		real_minutes += 30;		/* correct for half hour time zone */
+	real_minutes %= 60;
+
+	if (abs(real_minutes - cmos_minutes) < 30) {
+		if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+			BIN_TO_BCD(real_seconds);
+			BIN_TO_BCD(real_minutes);
+		}
+		CMOS_WRITE(real_seconds,RTC_SECONDS);
+		CMOS_WRITE(real_minutes,RTC_MINUTES);
+	} else {
+		printk(KERN_WARNING
+		       "set_rtc_mmss: can't update from %d to %d\n",
+		       cmos_minutes, real_minutes);
+		retval = -1;
+	}
+
+	/* The following flags have to be released exactly in this order,
+	 * otherwise the DS12887 (popular MC146818A clone with integrated
+	 * battery and quartz) will not reset the oscillator and will not
+	 * update precisely 500 ms later. You won't find this mentioned in
+	 * the Dallas Semiconductor data sheets, but who believes data
+	 * sheets anyway ...                           -- Markus Kuhn
+	 */
+	CMOS_WRITE(save_control, RTC_CONTROL);
+	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+
+	return retval;
+}
+
+static inline unsigned long mach_get_cmos_time(void)
+{
+	unsigned int year, mon, day, hour, min, sec;
+	int i;
+
+	/* The Linux interpretation of the CMOS clock register contents:
+	 * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
+	 * RTC registers show the second which has precisely just started.
+	 * Let's hope other operating systems interpret the RTC the same way.
+	 */
+	/* read RTC exactly on falling edge of update flag */
+	for (i = 0 ; i < 1000000 ; i++)	/* may take up to 1 second... */
+		if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
+			break;
+	for (i = 0 ; i < 1000000 ; i++)	/* must try at least 2.228 ms */
+		if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
+			break;
+	do { /* Isn't this overkill ? UIP above should guarantee consistency */
+		sec = CMOS_READ(RTC_SECONDS);
+		min = CMOS_READ(RTC_MINUTES);
+		hour = CMOS_READ(RTC_HOURS);
+		day = CMOS_READ(RTC_DAY_OF_MONTH);
+		mon = CMOS_READ(RTC_MONTH);
+		year = CMOS_READ(RTC_YEAR);
+	} while (sec != CMOS_READ(RTC_SECONDS));
+	if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+	  {
+	    BCD_TO_BIN(sec);
+	    BCD_TO_BIN(min);
+	    BCD_TO_BIN(hour);
+	    BCD_TO_BIN(day);
+	    BCD_TO_BIN(mon);
+	    BCD_TO_BIN(year);
+	  }
+	if ((year += 1900) < 1970)
+		year += 100;
+
+	return mktime(year, mon, day, hour, min, sec);
+}
+
+#endif /* !_MACH_TIME_H */
diff --git a/include/asm-i386/mach-default/mach_timer.h b/include/asm-i386/mach-default/mach_timer.h
new file mode 100644
index 0000000..4b9703b
--- /dev/null
+++ b/include/asm-i386/mach-default/mach_timer.h
@@ -0,0 +1,48 @@
+/*
+ *  include/asm-i386/mach-default/mach_timer.h
+ *
+ *  Machine specific calibrate_tsc() for generic.
+ *  Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+/* ------ Calibrate the TSC ------- 
+ * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
+ * Too much 64-bit arithmetic here to do this cleanly in C, and for
+ * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
+ * output busy loop as low as possible. We avoid reading the CTC registers
+ * directly because of the awkward 8-bit access mechanism of the 82C54
+ * device.
+ */
+#ifndef _MACH_TIMER_H
+#define _MACH_TIMER_H
+
+#define CALIBRATE_LATCH	(5 * LATCH)
+
+static inline void mach_prepare_counter(void)
+{
+       /* Set the Gate high, disable speaker */
+	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
+
+	/*
+	 * Now let's take care of CTC channel 2
+	 *
+	 * Set the Gate high, program CTC channel 2 for mode 0,
+	 * (interrupt on terminal count mode), binary count,
+	 * load 5 * LATCH count, (LSB and MSB) to begin countdown.
+	 *
+	 * Some devices need a delay here.
+	 */
+	outb(0xb0, 0x43);			/* binary, mode 0, LSB/MSB, Ch 2 */
+	outb_p(CALIBRATE_LATCH & 0xff, 0x42);	/* LSB of count */
+	outb_p(CALIBRATE_LATCH >> 8, 0x42);       /* MSB of count */
+}
+
+static inline void mach_countup(unsigned long *count_p)
+{
+	unsigned long count = 0;
+	do {
+		count++;
+	} while ((inb_p(0x61) & 0x20) == 0);
+	*count_p = count;
+}
+
+#endif /* !_MACH_TIMER_H */
diff --git a/include/asm-i386/mach-default/mach_traps.h b/include/asm-i386/mach-default/mach_traps.h
new file mode 100644
index 0000000..625438b
--- /dev/null
+++ b/include/asm-i386/mach-default/mach_traps.h
@@ -0,0 +1,41 @@
+/*
+ *  include/asm-i386/mach-default/mach_traps.h
+ *
+ *  Machine specific NMI handling for generic.
+ *  Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef _MACH_TRAPS_H
+#define _MACH_TRAPS_H
+
+#include <asm/mc146818rtc.h>
+
+static inline void clear_mem_error(unsigned char reason)
+{
+	reason = (reason & 0xf) | 4;
+	outb(reason, 0x61);
+}
+
+static inline unsigned char get_nmi_reason(void)
+{
+	return inb(0x61);
+}
+
+static inline void reassert_nmi(void)
+{
+	int old_reg = -1;
+
+	if (do_i_have_lock_cmos())
+		old_reg = current_lock_cmos_reg();
+	else
+		lock_cmos(0); /* register doesn't matter here */
+	outb(0x8f, 0x70);
+	inb(0x71);		/* dummy */
+	outb(0x0f, 0x70);
+	inb(0x71);		/* dummy */
+	if (old_reg >= 0)
+		outb(old_reg, 0x70);
+	else
+		unlock_cmos();
+}
+
+#endif /* !_MACH_TRAPS_H */
diff --git a/include/asm-i386/mach-default/mach_wakecpu.h b/include/asm-i386/mach-default/mach_wakecpu.h
new file mode 100644
index 0000000..673b85c
--- /dev/null
+++ b/include/asm-i386/mach-default/mach_wakecpu.h
@@ -0,0 +1,41 @@
+#ifndef __ASM_MACH_WAKECPU_H
+#define __ASM_MACH_WAKECPU_H
+
+/* 
+ * This file copes with machines that wakeup secondary CPUs by the
+ * INIT, INIT, STARTUP sequence.
+ */
+
+#define WAKE_SECONDARY_VIA_INIT
+
+#define TRAMPOLINE_LOW phys_to_virt(0x467)
+#define TRAMPOLINE_HIGH phys_to_virt(0x469)
+
+#define boot_cpu_apicid boot_cpu_physical_apicid
+
+static inline void wait_for_init_deassert(atomic_t *deassert)
+{
+	while (!atomic_read(deassert));
+	return;
+}
+
+/* Nothing to do for most platforms, since cleared by the INIT cycle */
+static inline void smp_callin_clear_local_apic(void)
+{
+}
+
+static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
+{
+}
+
+static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
+{
+}
+
+#if APIC_DEBUG
+ #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid)
+#else
+ #define inquire_remote_apic(apicid) {}
+#endif
+
+#endif /* __ASM_MACH_WAKECPU_H */
diff --git a/include/asm-i386/mach-default/pci-functions.h b/include/asm-i386/mach-default/pci-functions.h
new file mode 100644
index 0000000..ed0bab4
--- /dev/null
+++ b/include/asm-i386/mach-default/pci-functions.h
@@ -0,0 +1,19 @@
+/*
+ *	PCI BIOS function numbering for conventional PCI BIOS 
+ *	systems
+ */
+
+#define PCIBIOS_PCI_FUNCTION_ID 	0xb1XX
+#define PCIBIOS_PCI_BIOS_PRESENT 	0xb101
+#define PCIBIOS_FIND_PCI_DEVICE		0xb102
+#define PCIBIOS_FIND_PCI_CLASS_CODE	0xb103
+#define PCIBIOS_GENERATE_SPECIAL_CYCLE	0xb106
+#define PCIBIOS_READ_CONFIG_BYTE	0xb108
+#define PCIBIOS_READ_CONFIG_WORD	0xb109
+#define PCIBIOS_READ_CONFIG_DWORD	0xb10a
+#define PCIBIOS_WRITE_CONFIG_BYTE	0xb10b
+#define PCIBIOS_WRITE_CONFIG_WORD	0xb10c
+#define PCIBIOS_WRITE_CONFIG_DWORD	0xb10d
+#define PCIBIOS_GET_ROUTING_OPTIONS	0xb10e
+#define PCIBIOS_SET_PCI_HW_INT		0xb10f
+
diff --git a/include/asm-i386/mach-default/setup_arch_post.h b/include/asm-i386/mach-default/setup_arch_post.h
new file mode 100644
index 0000000..2fc4888
--- /dev/null
+++ b/include/asm-i386/mach-default/setup_arch_post.h
@@ -0,0 +1,40 @@
+/**
+ * machine_specific_memory_setup - Hook for machine specific memory setup.
+ *
+ * Description:
+ *	This is included late in kernel/setup.c so that it can make
+ *	use of all of the static functions.
+ **/
+
+static char * __init machine_specific_memory_setup(void)
+{
+	char *who;
+
+
+	who = "BIOS-e820";
+
+	/*
+	 * Try to copy the BIOS-supplied E820-map.
+	 *
+	 * Otherwise fake a memory map; one section from 0k->640k,
+	 * the next section from 1mb->appropriate_mem_k
+	 */
+	sanitize_e820_map(E820_MAP, &E820_MAP_NR);
+	if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
+		unsigned long mem_size;
+
+		/* compare results from other methods and take the greater */
+		if (ALT_MEM_K < EXT_MEM_K) {
+			mem_size = EXT_MEM_K;
+			who = "BIOS-88";
+		} else {
+			mem_size = ALT_MEM_K;
+			who = "BIOS-e801";
+		}
+
+		e820.nr_map = 0;
+		add_memory_region(0, LOWMEMSIZE(), E820_RAM);
+		add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
+  	}
+	return who;
+}
diff --git a/include/asm-i386/mach-default/setup_arch_pre.h b/include/asm-i386/mach-default/setup_arch_pre.h
new file mode 100644
index 0000000..fb42099e
--- /dev/null
+++ b/include/asm-i386/mach-default/setup_arch_pre.h
@@ -0,0 +1,5 @@
+/* Hook to call BIOS initialisation function */
+
+/* no action for generic */
+
+#define ARCH_SETUP
diff --git a/include/asm-i386/mach-default/smpboot_hooks.h b/include/asm-i386/mach-default/smpboot_hooks.h
new file mode 100644
index 0000000..7f45f63
--- /dev/null
+++ b/include/asm-i386/mach-default/smpboot_hooks.h
@@ -0,0 +1,44 @@
+/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws
+ * which needs to alter them. */
+
+static inline void smpboot_clear_io_apic_irqs(void)
+{
+	io_apic_irqs = 0;
+}
+
+static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
+{
+	CMOS_WRITE(0xa, 0xf);
+	local_flush_tlb();
+	Dprintk("1.\n");
+	*((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
+	Dprintk("2.\n");
+	*((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
+	Dprintk("3.\n");
+}
+
+static inline void smpboot_restore_warm_reset_vector(void)
+{
+	/*
+	 * Install writable page 0 entry to set BIOS data area.
+	 */
+	local_flush_tlb();
+
+	/*
+	 * Paranoid:  Set warm reset code and vector here back
+	 * to default values.
+	 */
+	CMOS_WRITE(0, 0xf);
+
+	*((volatile long *) phys_to_virt(0x467)) = 0;
+}
+
+static inline void smpboot_setup_io_apic(void)
+{
+	/*
+	 * Here we can be sure that there is an IO-APIC in the system. Let's
+	 * go and set it up:
+	 */
+	if (!skip_ioapic_setup && nr_ioapics)
+		setup_IO_APIC();
+}
diff --git a/include/asm-i386/mach-es7000/mach_apic.h b/include/asm-i386/mach-es7000/mach_apic.h
new file mode 100644
index 0000000..ceab2c4
--- /dev/null
+++ b/include/asm-i386/mach-es7000/mach_apic.h
@@ -0,0 +1,207 @@
+#ifndef __ASM_MACH_APIC_H
+#define __ASM_MACH_APIC_H
+
+extern u8 bios_cpu_apicid[];
+
+#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu])
+#define esr_disable (1)
+
+static inline int apic_id_registered(void)
+{
+	        return (1);
+}
+
+static inline cpumask_t target_cpus(void)
+{ 
+#if defined CONFIG_ES7000_CLUSTERED_APIC
+	return CPU_MASK_ALL;
+#else
+	return cpumask_of_cpu(smp_processor_id());
+#endif
+}
+#define TARGET_CPUS	(target_cpus())
+
+#if defined CONFIG_ES7000_CLUSTERED_APIC
+#define APIC_DFR_VALUE		(APIC_DFR_CLUSTER)
+#define INT_DELIVERY_MODE	(dest_LowestPrio)
+#define INT_DEST_MODE		(1)    /* logical delivery broadcast to all procs */
+#define NO_BALANCE_IRQ 		(1)
+#undef  WAKE_SECONDARY_VIA_INIT
+#define WAKE_SECONDARY_VIA_MIP
+#else
+#define APIC_DFR_VALUE		(APIC_DFR_FLAT)
+#define INT_DELIVERY_MODE	(dest_Fixed)
+#define INT_DEST_MODE		(0)    /* phys delivery to target procs */
+#define NO_BALANCE_IRQ 		(0)
+#undef  APIC_DEST_LOGICAL
+#define APIC_DEST_LOGICAL	0x0
+#define WAKE_SECONDARY_VIA_INIT
+#endif
+
+#define NO_IOAPIC_CHECK (1)
+
+static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
+{ 
+	return 0;
+} 
+static inline unsigned long check_apicid_present(int bit) 
+{
+	return physid_isset(bit, phys_cpu_present_map);
+}
+
+#define apicid_cluster(apicid) (apicid & 0xF0)
+
+static inline unsigned long calculate_ldr(int cpu)
+{
+	unsigned long id;
+	id = xapic_phys_to_log_apicid(cpu);
+	return (SET_APIC_LOGICAL_ID(id));
+}
+
+/*
+ * Set up the logical destination ID.
+ *
+ * Intel recommends to set DFR, LdR and TPR before enabling
+ * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
+ * document number 292116).  So here it goes...
+ */
+static inline void init_apic_ldr(void)
+{
+	unsigned long val;
+	int cpu = smp_processor_id();
+
+	apic_write_around(APIC_DFR, APIC_DFR_VALUE);
+	val = calculate_ldr(cpu);
+	apic_write_around(APIC_LDR, val);
+}
+
+extern void es7000_sw_apic(void);
+static inline void enable_apic_mode(void)
+{
+	es7000_sw_apic();
+	return;
+}
+
+extern int apic_version [MAX_APICS];
+static inline void clustered_apic_check(void)
+{
+	int apic = bios_cpu_apicid[smp_processor_id()];
+	printk("Enabling APIC mode:  %s.  Using %d I/O APICs, target cpus %lx\n",
+		(apic_version[apic] == 0x14) ? 
+		"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]);
+}
+
+static inline int multi_timer_check(int apic, int irq)
+{
+	return 0;
+}
+
+static inline int apicid_to_node(int logical_apicid)
+{
+	return 0;
+}
+
+
+static inline int cpu_present_to_apicid(int mps_cpu)
+{
+	if (!mps_cpu)
+		return boot_cpu_physical_apicid;
+	else if (mps_cpu < NR_CPUS)
+		return (int) bios_cpu_apicid[mps_cpu];
+	else
+		return BAD_APICID;
+}
+
+static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
+{
+	static int id = 0;
+	physid_mask_t mask;
+	mask = physid_mask_of_physid(id);
+	++id;
+	return mask;
+}
+
+extern u8 cpu_2_logical_apicid[];
+/* Mapping from cpu number to logical apicid */
+static inline int cpu_to_logical_apicid(int cpu)
+{
+       if (cpu >= NR_CPUS)
+	       return BAD_APICID;
+       return (int)cpu_2_logical_apicid[cpu];
+}
+
+static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused)
+{
+	printk("Processor #%d %ld:%ld APIC version %d\n",
+	        m->mpc_apicid,
+	        (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
+	        (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
+	        m->mpc_apicver);
+	return (m->mpc_apicid);
+}
+
+static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
+{
+	/* For clustered we don't have a good way to do this yet - hack */
+	return physids_promote(0xff);
+}
+
+
+static inline void setup_portio_remap(void)
+{
+}
+
+extern unsigned int boot_cpu_physical_apicid;
+static inline int check_phys_apicid_present(int cpu_physical_apicid)
+{
+	boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
+	return (1);
+}
+
+static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
+{
+	int num_bits_set;
+	int cpus_found = 0;
+	int cpu;
+	int apicid;	
+
+	num_bits_set = cpus_weight(cpumask);
+	/* Return id to all */
+	if (num_bits_set == NR_CPUS)
+#if defined CONFIG_ES7000_CLUSTERED_APIC
+		return 0xFF;
+#else
+		return cpu_to_logical_apicid(0);
+#endif
+	/* 
+	 * The cpus in the mask must all be on the apic cluster.  If are not 
+	 * on the same apicid cluster return default value of TARGET_CPUS. 
+	 */
+	cpu = first_cpu(cpumask);
+	apicid = cpu_to_logical_apicid(cpu);
+	while (cpus_found < num_bits_set) {
+		if (cpu_isset(cpu, cpumask)) {
+			int new_apicid = cpu_to_logical_apicid(cpu);
+			if (apicid_cluster(apicid) != 
+					apicid_cluster(new_apicid)){
+				printk ("%s: Not a valid mask!\n",__FUNCTION__);
+#if defined CONFIG_ES7000_CLUSTERED_APIC
+				return 0xFF;
+#else
+				return cpu_to_logical_apicid(0);
+#endif
+			}
+			apicid = new_apicid;
+			cpus_found++;
+		}
+		cpu++;
+	}
+	return apicid;
+}
+
+static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
+{
+	return cpuid_apic >> index_msb;
+}
+
+#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-i386/mach-es7000/mach_apicdef.h b/include/asm-i386/mach-es7000/mach_apicdef.h
new file mode 100644
index 0000000..a58ab5a
--- /dev/null
+++ b/include/asm-i386/mach-es7000/mach_apicdef.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_MACH_APICDEF_H
+#define __ASM_MACH_APICDEF_H
+
+#define		APIC_ID_MASK		(0xFF<<24)
+
+static inline unsigned get_apic_id(unsigned long x) 
+{ 
+	return (((x)>>24)&0xFF);
+} 
+
+#define		GET_APIC_ID(x)	get_apic_id(x)
+
+#endif
diff --git a/include/asm-i386/mach-es7000/mach_ipi.h b/include/asm-i386/mach-es7000/mach_ipi.h
new file mode 100644
index 0000000..5e61bd2
--- /dev/null
+++ b/include/asm-i386/mach-es7000/mach_ipi.h
@@ -0,0 +1,24 @@
+#ifndef __ASM_MACH_IPI_H
+#define __ASM_MACH_IPI_H
+
+void send_IPI_mask_sequence(cpumask_t mask, int vector);
+
+static inline void send_IPI_mask(cpumask_t mask, int vector)
+{
+	send_IPI_mask_sequence(mask, vector);
+}
+
+static inline void send_IPI_allbutself(int vector)
+{
+	cpumask_t mask = cpu_online_map;
+	cpu_clear(smp_processor_id(), mask);
+	if (!cpus_empty(mask))
+		send_IPI_mask(mask, vector);
+}
+
+static inline void send_IPI_all(int vector)
+{
+	send_IPI_mask(cpu_online_map, vector);
+}
+
+#endif /* __ASM_MACH_IPI_H */
diff --git a/include/asm-i386/mach-es7000/mach_mpparse.h b/include/asm-i386/mach-es7000/mach_mpparse.h
new file mode 100644
index 0000000..85809e0
--- /dev/null
+++ b/include/asm-i386/mach-es7000/mach_mpparse.h
@@ -0,0 +1,41 @@
+#ifndef __ASM_MACH_MPPARSE_H
+#define __ASM_MACH_MPPARSE_H
+
+static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, 
+				struct mpc_config_translation *translation)
+{
+	Dprintk("Bus #%d is %s\n", m->mpc_busid, name);
+}
+
+static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, 
+				struct mpc_config_translation *translation)
+{
+}
+
+extern int parse_unisys_oem (char *oemptr, int oem_entries);
+extern int find_unisys_acpi_oem_table(unsigned long *oem_addr, int *length);
+
+static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
+		char *productid)
+{
+	if (mpc->mpc_oemptr) {
+		struct mp_config_oemtable *oem_table = 
+			(struct mp_config_oemtable *)mpc->mpc_oemptr;
+		if (!strncmp(oem, "UNISYS", 6))
+			return parse_unisys_oem((char *)oem_table, oem_table->oem_length);
+	}
+	return 0;
+}
+
+/* Hook from generic ACPI tables.c */
+static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+	unsigned long oem_addr; 
+	int oem_entries;
+	if (!find_unisys_acpi_oem_table(&oem_addr, &oem_entries))
+		return parse_unisys_oem((char *)oem_addr, oem_entries);
+	return 0;
+}
+
+
+#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/include/asm-i386/mach-es7000/mach_mpspec.h b/include/asm-i386/mach-es7000/mach_mpspec.h
new file mode 100644
index 0000000..b1f5039
--- /dev/null
+++ b/include/asm-i386/mach-es7000/mach_mpspec.h
@@ -0,0 +1,8 @@
+#ifndef __ASM_MACH_MPSPEC_H
+#define __ASM_MACH_MPSPEC_H
+
+#define MAX_IRQ_SOURCES 256
+
+#define MAX_MP_BUSSES 256
+
+#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-i386/mach-es7000/mach_wakecpu.h b/include/asm-i386/mach-es7000/mach_wakecpu.h
new file mode 100644
index 0000000..efc903b
--- /dev/null
+++ b/include/asm-i386/mach-es7000/mach_wakecpu.h
@@ -0,0 +1,58 @@
+#ifndef __ASM_MACH_WAKECPU_H
+#define __ASM_MACH_WAKECPU_H
+
+/* 
+ * This file copes with machines that wakeup secondary CPUs by the
+ * INIT, INIT, STARTUP sequence.
+ */
+
+#ifdef CONFIG_ES7000_CLUSTERED_APIC
+#define WAKE_SECONDARY_VIA_MIP
+#else
+#define WAKE_SECONDARY_VIA_INIT
+#endif
+
+#ifdef WAKE_SECONDARY_VIA_MIP
+extern int es7000_start_cpu(int cpu, unsigned long eip);
+static inline int
+wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
+{
+	int boot_error = 0;
+	boot_error = es7000_start_cpu(phys_apicid, start_eip);
+	return boot_error;
+}
+#endif
+
+#define TRAMPOLINE_LOW phys_to_virt(0x467)
+#define TRAMPOLINE_HIGH phys_to_virt(0x469)
+
+#define boot_cpu_apicid boot_cpu_physical_apicid
+
+static inline void wait_for_init_deassert(atomic_t *deassert)
+{
+#ifdef WAKE_SECONDARY_VIA_INIT
+	while (!atomic_read(deassert));
+#endif
+	return;
+}
+
+/* Nothing to do for most platforms, since cleared by the INIT cycle */
+static inline void smp_callin_clear_local_apic(void)
+{
+}
+
+static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
+{
+}
+
+static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
+{
+}
+
+#if APIC_DEBUG
+ #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid)
+#else
+ #define inquire_remote_apic(apicid) {}
+#endif
+
+#endif /* __ASM_MACH_WAKECPU_H */
diff --git a/include/asm-i386/mach-generic/irq_vectors_limits.h b/include/asm-i386/mach-generic/irq_vectors_limits.h
new file mode 100644
index 0000000..890ce3f
--- /dev/null
+++ b/include/asm-i386/mach-generic/irq_vectors_limits.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_IRQ_VECTORS_LIMITS_H
+#define _ASM_IRQ_VECTORS_LIMITS_H
+
+/*
+ * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs,
+ * even with uni-proc kernels, so use a big array.
+ *
+ * This value should be the same in both the generic and summit subarches.
+ * Change one, change 'em both.
+ */
+#define NR_IRQS	224
+#define NR_IRQ_VECTORS	1024
+
+#endif /* _ASM_IRQ_VECTORS_LIMITS_H */
diff --git a/include/asm-i386/mach-generic/mach_apic.h b/include/asm-i386/mach-generic/mach_apic.h
new file mode 100644
index 0000000..ab36d02
--- /dev/null
+++ b/include/asm-i386/mach-generic/mach_apic.h
@@ -0,0 +1,32 @@
+#ifndef __ASM_MACH_APIC_H
+#define __ASM_MACH_APIC_H
+
+#include <asm/genapic.h>
+
+#define esr_disable (genapic->ESR_DISABLE)
+#define NO_BALANCE_IRQ (genapic->no_balance_irq)
+#define NO_IOAPIC_CHECK	(genapic->no_ioapic_check)
+#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
+#define INT_DEST_MODE (genapic->int_dest_mode)
+#undef APIC_DEST_LOGICAL
+#define APIC_DEST_LOGICAL (genapic->apic_destination_logical)
+#define TARGET_CPUS	  (genapic->target_cpus())
+#define apic_id_registered (genapic->apic_id_registered)
+#define init_apic_ldr (genapic->init_apic_ldr)
+#define ioapic_phys_id_map (genapic->ioapic_phys_id_map)
+#define clustered_apic_check (genapic->clustered_apic_check) 
+#define multi_timer_check (genapic->multi_timer_check)
+#define apicid_to_node (genapic->apicid_to_node)
+#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid) 
+#define cpu_present_to_apicid (genapic->cpu_present_to_apicid)
+#define apicid_to_cpu_present (genapic->apicid_to_cpu_present)
+#define mpc_apic_id (genapic->mpc_apic_id) 
+#define setup_portio_remap (genapic->setup_portio_remap)
+#define check_apicid_present (genapic->check_apicid_present)
+#define check_phys_apicid_present (genapic->check_phys_apicid_present)
+#define check_apicid_used (genapic->check_apicid_used)
+#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
+#define enable_apic_mode (genapic->enable_apic_mode)
+#define phys_pkg_id (genapic->phys_pkg_id)
+
+#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-i386/mach-generic/mach_apicdef.h b/include/asm-i386/mach-generic/mach_apicdef.h
new file mode 100644
index 0000000..28ed989
--- /dev/null
+++ b/include/asm-i386/mach-generic/mach_apicdef.h
@@ -0,0 +1,11 @@
+#ifndef _GENAPIC_MACH_APICDEF_H
+#define _GENAPIC_MACH_APICDEF_H 1
+
+#ifndef APIC_DEFINITION
+#include <asm/genapic.h>
+
+#define GET_APIC_ID (genapic->get_apic_id)
+#define APIC_ID_MASK (genapic->apic_id_mask)
+#endif
+
+#endif
diff --git a/include/asm-i386/mach-generic/mach_ipi.h b/include/asm-i386/mach-generic/mach_ipi.h
new file mode 100644
index 0000000..441b0fe
--- /dev/null
+++ b/include/asm-i386/mach-generic/mach_ipi.h
@@ -0,0 +1,10 @@
+#ifndef _MACH_IPI_H
+#define _MACH_IPI_H 1
+
+#include <asm/genapic.h>
+
+#define send_IPI_mask (genapic->send_IPI_mask)
+#define send_IPI_allbutself (genapic->send_IPI_allbutself)
+#define send_IPI_all (genapic->send_IPI_all)
+
+#endif
diff --git a/include/asm-i386/mach-generic/mach_mpparse.h b/include/asm-i386/mach-generic/mach_mpparse.h
new file mode 100644
index 0000000..dbd9fce
--- /dev/null
+++ b/include/asm-i386/mach-generic/mach_mpparse.h
@@ -0,0 +1,12 @@
+#ifndef _MACH_MPPARSE_H
+#define _MACH_MPPARSE_H 1
+
+#include <asm/genapic.h>
+
+#define mpc_oem_bus_info (genapic->mpc_oem_bus_info)
+#define mpc_oem_pci_bus (genapic->mpc_oem_pci_bus)
+
+int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid); 
+int acpi_madt_oem_check(char *oem_id, char *oem_table_id); 
+
+#endif
diff --git a/include/asm-i386/mach-generic/mach_mpspec.h b/include/asm-i386/mach-generic/mach_mpspec.h
new file mode 100644
index 0000000..9ef0b94
--- /dev/null
+++ b/include/asm-i386/mach-generic/mach_mpspec.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_MACH_MPSPEC_H
+#define __ASM_MACH_MPSPEC_H
+
+#define MAX_IRQ_SOURCES 256
+
+/* Summit or generic (i.e. installer) kernels need lots of bus entries. */
+/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
+#define MAX_MP_BUSSES 260
+
+#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-i386/mach-numaq/mach_apic.h b/include/asm-i386/mach-numaq/mach_apic.h
new file mode 100644
index 0000000..e1a0449
--- /dev/null
+++ b/include/asm-i386/mach-numaq/mach_apic.h
@@ -0,0 +1,151 @@
+#ifndef __ASM_MACH_APIC_H
+#define __ASM_MACH_APIC_H
+
+#include <asm/io.h>
+#include <linux/mmzone.h>
+#include <linux/nodemask.h>
+
+#define APIC_DFR_VALUE	(APIC_DFR_CLUSTER)
+
+static inline cpumask_t target_cpus(void)
+{
+	return CPU_MASK_ALL;
+}
+
+#define TARGET_CPUS (target_cpus())
+
+#define NO_BALANCE_IRQ (1)
+#define esr_disable (1)
+
+#define NO_IOAPIC_CHECK (0)
+
+#define INT_DELIVERY_MODE dest_LowestPrio
+#define INT_DEST_MODE 0     /* physical delivery on LOCAL quad */
+ 
+#define check_apicid_used(bitmap, apicid) physid_isset(apicid, bitmap)
+#define check_apicid_present(bit) physid_isset(bit, phys_cpu_present_map)
+#define apicid_cluster(apicid) (apicid & 0xF0)
+
+static inline int apic_id_registered(void)
+{
+	return 1;
+}
+
+static inline void init_apic_ldr(void)
+{
+	/* Already done in NUMA-Q firmware */
+}
+
+static inline void clustered_apic_check(void)
+{
+	printk("Enabling APIC mode:  %s.  Using %d I/O APICs\n",
+		"NUMA-Q", nr_ioapics);
+}
+
+/*
+ * Skip adding the timer int on secondary nodes, which causes
+ * a small but painful rift in the time-space continuum.
+ */
+static inline int multi_timer_check(int apic, int irq)
+{
+	return apic != 0 && irq == 0;
+}
+
+static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
+{
+	/* We don't have a good way to do this yet - hack */
+	return physids_promote(0xFUL);
+}
+
+/* Mapping from cpu number to logical apicid */
+extern u8 cpu_2_logical_apicid[];
+static inline int cpu_to_logical_apicid(int cpu)
+{
+       if (cpu >= NR_CPUS)
+	       return BAD_APICID;
+	return (int)cpu_2_logical_apicid[cpu];
+}
+
+/*
+ * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
+ * cpu to APIC ID relation to properly interact with the intelligent
+ * mode of the cluster controller.
+ */
+static inline int cpu_present_to_apicid(int mps_cpu)
+{
+	if (mps_cpu < 60)
+		return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
+	else
+		return BAD_APICID;
+}
+
+static inline int generate_logical_apicid(int quad, int phys_apicid)
+{
+	return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1);
+}
+
+static inline int apicid_to_node(int logical_apicid) 
+{
+	return logical_apicid >> 4;
+}
+
+static inline physid_mask_t apicid_to_cpu_present(int logical_apicid)
+{
+	int node = apicid_to_node(logical_apicid);
+	int cpu = __ffs(logical_apicid & 0xf);
+
+	return physid_mask_of_physid(cpu + 4*node);
+}
+
+static inline int mpc_apic_id(struct mpc_config_processor *m, 
+			struct mpc_config_translation *translation_record)
+{
+	int quad = translation_record->trans_quad;
+	int logical_apicid = generate_logical_apicid(quad, m->mpc_apicid);
+
+	printk("Processor #%d %ld:%ld APIC version %d (quad %d, apic %d)\n",
+			m->mpc_apicid,
+			(m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
+			(m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
+			m->mpc_apicver, quad, logical_apicid);
+	return logical_apicid;
+}
+
+static inline void setup_portio_remap(void)
+{
+	int num_quads = num_online_nodes();
+
+	if (num_quads <= 1)
+       		return;
+
+	printk("Remapping cross-quad port I/O for %d quads\n", num_quads);
+	xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
+	printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
+		(u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
+}
+
+static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+	return (1);
+}
+
+static inline void enable_apic_mode(void)
+{
+}
+
+/*
+ * We use physical apicids here, not logical, so just return the default
+ * physical broadcast to stop people from breaking us
+ */
+static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
+{
+	return (int) 0xF;
+}
+
+/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
+static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
+{
+	return cpuid_apic >> index_msb;
+}
+
+#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-i386/mach-numaq/mach_apicdef.h b/include/asm-i386/mach-numaq/mach_apicdef.h
new file mode 100644
index 0000000..bf439d0
--- /dev/null
+++ b/include/asm-i386/mach-numaq/mach_apicdef.h
@@ -0,0 +1,14 @@
+#ifndef __ASM_MACH_APICDEF_H
+#define __ASM_MACH_APICDEF_H
+
+
+#define APIC_ID_MASK (0xF<<24)
+
+static inline unsigned get_apic_id(unsigned long x)
+{
+	        return (((x)>>24)&0x0F);
+}
+
+#define         GET_APIC_ID(x)  get_apic_id(x)
+
+#endif
diff --git a/include/asm-i386/mach-numaq/mach_ipi.h b/include/asm-i386/mach-numaq/mach_ipi.h
new file mode 100644
index 0000000..1b46fd3
--- /dev/null
+++ b/include/asm-i386/mach-numaq/mach_ipi.h
@@ -0,0 +1,25 @@
+#ifndef __ASM_MACH_IPI_H
+#define __ASM_MACH_IPI_H
+
+inline void send_IPI_mask_sequence(cpumask_t, int vector);
+
+static inline void send_IPI_mask(cpumask_t mask, int vector)
+{
+	send_IPI_mask_sequence(mask, vector);
+}
+
+static inline void send_IPI_allbutself(int vector)
+{
+	cpumask_t mask = cpu_online_map;
+	cpu_clear(smp_processor_id(), mask);
+
+	if (!cpus_empty(mask))
+		send_IPI_mask(mask, vector);
+}
+
+static inline void send_IPI_all(int vector)
+{
+	send_IPI_mask(cpu_online_map, vector);
+}
+
+#endif /* __ASM_MACH_IPI_H */
diff --git a/include/asm-i386/mach-numaq/mach_mpparse.h b/include/asm-i386/mach-numaq/mach_mpparse.h
new file mode 100644
index 0000000..51bbac8
--- /dev/null
+++ b/include/asm-i386/mach-numaq/mach_mpparse.h
@@ -0,0 +1,29 @@
+#ifndef __ASM_MACH_MPPARSE_H
+#define __ASM_MACH_MPPARSE_H
+
+static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, 
+				struct mpc_config_translation *translation)
+{
+	int quad = translation->trans_quad;
+	int local = translation->trans_local;
+
+	mp_bus_id_to_node[m->mpc_busid] = quad;
+	mp_bus_id_to_local[m->mpc_busid] = local;
+	printk("Bus #%d is %s (node %d)\n", m->mpc_busid, name, quad);
+}
+
+static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, 
+				struct mpc_config_translation *translation)
+{
+	int quad = translation->trans_quad;
+	int local = translation->trans_local;
+
+	quad_local_to_mp_bus_id[quad][local] = m->mpc_busid;
+}
+
+/* Hook from generic ACPI tables.c */
+static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+}
+
+#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/include/asm-i386/mach-numaq/mach_mpspec.h b/include/asm-i386/mach-numaq/mach_mpspec.h
new file mode 100644
index 0000000..dffb098
--- /dev/null
+++ b/include/asm-i386/mach-numaq/mach_mpspec.h
@@ -0,0 +1,8 @@
+#ifndef __ASM_MACH_MPSPEC_H
+#define __ASM_MACH_MPSPEC_H
+
+#define MAX_IRQ_SOURCES 512
+
+#define MAX_MP_BUSSES 32
+
+#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-i386/mach-numaq/mach_wakecpu.h b/include/asm-i386/mach-numaq/mach_wakecpu.h
new file mode 100644
index 0000000..0053004
--- /dev/null
+++ b/include/asm-i386/mach-numaq/mach_wakecpu.h
@@ -0,0 +1,43 @@
+#ifndef __ASM_MACH_WAKECPU_H
+#define __ASM_MACH_WAKECPU_H
+
+/* This file copes with machines that wakeup secondary CPUs by NMIs */
+
+#define WAKE_SECONDARY_VIA_NMI
+
+#define TRAMPOLINE_LOW phys_to_virt(0x8)
+#define TRAMPOLINE_HIGH phys_to_virt(0xa)
+
+#define boot_cpu_apicid boot_cpu_logical_apicid
+
+/* We don't do anything here because we use NMI's to boot instead */
+static inline void wait_for_init_deassert(atomic_t *deassert)
+{
+}
+
+/*
+ * Because we use NMIs rather than the INIT-STARTUP sequence to
+ * bootstrap the CPUs, the APIC may be in a weird state. Kick it.
+ */
+static inline void smp_callin_clear_local_apic(void)
+{
+	clear_local_APIC();
+}
+
+static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
+{
+	printk("Storing NMI vector\n");
+	*high = *((volatile unsigned short *) TRAMPOLINE_HIGH);
+	*low = *((volatile unsigned short *) TRAMPOLINE_LOW);
+}
+
+static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
+{
+	printk("Restoring NMI vector\n");
+	*((volatile unsigned short *) TRAMPOLINE_HIGH) = *high;
+	*((volatile unsigned short *) TRAMPOLINE_LOW) = *low;
+}
+
+#define inquire_remote_apic(apicid) {}
+
+#endif /* __ASM_MACH_WAKECPU_H */
diff --git a/include/asm-i386/mach-summit/irq_vectors_limits.h b/include/asm-i386/mach-summit/irq_vectors_limits.h
new file mode 100644
index 0000000..890ce3f
--- /dev/null
+++ b/include/asm-i386/mach-summit/irq_vectors_limits.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_IRQ_VECTORS_LIMITS_H
+#define _ASM_IRQ_VECTORS_LIMITS_H
+
+/*
+ * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs,
+ * even with uni-proc kernels, so use a big array.
+ *
+ * This value should be the same in both the generic and summit subarches.
+ * Change one, change 'em both.
+ */
+#define NR_IRQS	224
+#define NR_IRQ_VECTORS	1024
+
+#endif /* _ASM_IRQ_VECTORS_LIMITS_H */
diff --git a/include/asm-i386/mach-summit/mach_apic.h b/include/asm-i386/mach-summit/mach_apic.h
new file mode 100644
index 0000000..74e9cbc
--- /dev/null
+++ b/include/asm-i386/mach-summit/mach_apic.h
@@ -0,0 +1,189 @@
+#ifndef __ASM_MACH_APIC_H
+#define __ASM_MACH_APIC_H
+
+#include <linux/config.h>
+#include <asm/smp.h>
+
+#define esr_disable (1)
+#define NO_BALANCE_IRQ (0)
+
+#define NO_IOAPIC_CHECK (1)	/* Don't check I/O APIC ID for xAPIC */
+
+/* In clustered mode, the high nibble of APIC ID is a cluster number.
+ * The low nibble is a 4-bit bitmap. */
+#define XAPIC_DEST_CPUS_SHIFT	4
+#define XAPIC_DEST_CPUS_MASK	((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
+#define XAPIC_DEST_CLUSTER_MASK	(XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
+
+#define APIC_DFR_VALUE	(APIC_DFR_CLUSTER)
+
+static inline cpumask_t target_cpus(void)
+{
+	/* CPU_MASK_ALL (0xff) has undefined behaviour with
+	 * dest_LowestPrio mode logical clustered apic interrupt routing
+	 * Just start on cpu 0.  IRQ balancing will spread load
+	 */
+	return cpumask_of_cpu(0);
+} 
+#define TARGET_CPUS	(target_cpus())
+
+#define INT_DELIVERY_MODE (dest_LowestPrio)
+#define INT_DEST_MODE 1     /* logical delivery broadcast to all procs */
+
+static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+	return 0;
+} 
+
+/* we don't use the phys_cpu_present_map to indicate apicid presence */
+static inline unsigned long check_apicid_present(int bit) 
+{
+	return 1;
+}
+
+#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
+
+extern u8 bios_cpu_apicid[];
+extern u8 cpu_2_logical_apicid[];
+
+static inline void init_apic_ldr(void)
+{
+	unsigned long val, id;
+	int i, count;
+	u8 lid;
+	u8 my_id = (u8)hard_smp_processor_id();
+	u8 my_cluster = (u8)apicid_cluster(my_id);
+
+	/* Create logical APIC IDs by counting CPUs already in cluster. */
+	for (count = 0, i = NR_CPUS; --i >= 0; ) {
+		lid = cpu_2_logical_apicid[i];
+		if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
+			++count;
+	}
+	/* We only have a 4 wide bitmap in cluster mode.  If a deranged
+	 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
+	BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
+	id = my_cluster | (1UL << count);
+	apic_write_around(APIC_DFR, APIC_DFR_VALUE);
+	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+	val |= SET_APIC_LOGICAL_ID(id);
+	apic_write_around(APIC_LDR, val);
+}
+
+static inline int multi_timer_check(int apic, int irq)
+{
+	return 0;
+}
+
+static inline int apic_id_registered(void)
+{
+	return 1;
+}
+
+static inline void clustered_apic_check(void)
+{
+	printk("Enabling APIC mode:  Summit.  Using %d I/O APICs\n",
+						nr_ioapics);
+}
+
+static inline int apicid_to_node(int logical_apicid)
+{
+	return logical_apicid >> 5;          /* 2 clusterids per CEC */
+}
+
+/* Mapping from cpu number to logical apicid */
+static inline int cpu_to_logical_apicid(int cpu)
+{
+       if (cpu >= NR_CPUS)
+	       return BAD_APICID;
+	return (int)cpu_2_logical_apicid[cpu];
+}
+
+static inline int cpu_present_to_apicid(int mps_cpu)
+{
+	if (mps_cpu < NR_CPUS)
+		return (int)bios_cpu_apicid[mps_cpu];
+	else
+		return BAD_APICID;
+}
+
+static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
+{
+	/* For clustered we don't have a good way to do this yet - hack */
+	return physids_promote(0x0F);
+}
+
+static inline physid_mask_t apicid_to_cpu_present(int apicid)
+{
+	return physid_mask_of_physid(0);
+}
+
+static inline int mpc_apic_id(struct mpc_config_processor *m, 
+			struct mpc_config_translation *translation_record)
+{
+	printk("Processor #%d %ld:%ld APIC version %d\n",
+			m->mpc_apicid,
+			(m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
+			(m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
+			m->mpc_apicver);
+	return (m->mpc_apicid);
+}
+
+static inline void setup_portio_remap(void)
+{
+}
+
+static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+	return 1;
+}
+
+static inline void enable_apic_mode(void)
+{
+}
+
+static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
+{
+	int num_bits_set;
+	int cpus_found = 0;
+	int cpu;
+	int apicid;	
+
+	num_bits_set = cpus_weight(cpumask);
+	/* Return id to all */
+	if (num_bits_set == NR_CPUS)
+		return (int) 0xFF;
+	/* 
+	 * The cpus in the mask must all be on the apic cluster.  If are not 
+	 * on the same apicid cluster return default value of TARGET_CPUS. 
+	 */
+	cpu = first_cpu(cpumask);
+	apicid = cpu_to_logical_apicid(cpu);
+	while (cpus_found < num_bits_set) {
+		if (cpu_isset(cpu, cpumask)) {
+			int new_apicid = cpu_to_logical_apicid(cpu);
+			if (apicid_cluster(apicid) != 
+					apicid_cluster(new_apicid)){
+				printk ("%s: Not a valid mask!\n",__FUNCTION__);
+				return 0xFF;
+			}
+			apicid = apicid | new_apicid;
+			cpus_found++;
+		}
+		cpu++;
+	}
+	return apicid;
+}
+
+/* cpuid returns the value latched in the HW at reset, not the APIC ID
+ * register's value.  For any box whose BIOS changes APIC IDs, like
+ * clustered APIC systems, we must use hard_smp_processor_id.
+ *
+ * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
+ */
+static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
+{
+	return hard_smp_processor_id() >> index_msb;
+}
+
+#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-i386/mach-summit/mach_apicdef.h b/include/asm-i386/mach-summit/mach_apicdef.h
new file mode 100644
index 0000000..a58ab5a
--- /dev/null
+++ b/include/asm-i386/mach-summit/mach_apicdef.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_MACH_APICDEF_H
+#define __ASM_MACH_APICDEF_H
+
+#define		APIC_ID_MASK		(0xFF<<24)
+
+static inline unsigned get_apic_id(unsigned long x) 
+{ 
+	return (((x)>>24)&0xFF);
+} 
+
+#define		GET_APIC_ID(x)	get_apic_id(x)
+
+#endif
diff --git a/include/asm-i386/mach-summit/mach_ipi.h b/include/asm-i386/mach-summit/mach_ipi.h
new file mode 100644
index 0000000..9404c53
--- /dev/null
+++ b/include/asm-i386/mach-summit/mach_ipi.h
@@ -0,0 +1,25 @@
+#ifndef __ASM_MACH_IPI_H
+#define __ASM_MACH_IPI_H
+
+void send_IPI_mask_sequence(cpumask_t mask, int vector);
+
+static inline void send_IPI_mask(cpumask_t mask, int vector)
+{
+	send_IPI_mask_sequence(mask, vector);
+}
+
+static inline void send_IPI_allbutself(int vector)
+{
+	cpumask_t mask = cpu_online_map;
+	cpu_clear(smp_processor_id(), mask);
+
+	if (!cpus_empty(mask))
+		send_IPI_mask(mask, vector);
+}
+
+static inline void send_IPI_all(int vector)
+{
+	send_IPI_mask(cpu_online_map, vector);
+}
+
+#endif /* __ASM_MACH_IPI_H */
diff --git a/include/asm-i386/mach-summit/mach_mpparse.h b/include/asm-i386/mach-summit/mach_mpparse.h
new file mode 100644
index 0000000..2b9e6d5
--- /dev/null
+++ b/include/asm-i386/mach-summit/mach_mpparse.h
@@ -0,0 +1,121 @@
+#ifndef __ASM_MACH_MPPARSE_H
+#define __ASM_MACH_MPPARSE_H
+
+#include <mach_apic.h>
+
+extern int use_cyclone;
+
+#ifdef CONFIG_X86_SUMMIT_NUMA
+extern void setup_summit(void);
+#else
+#define setup_summit()	{}
+#endif
+
+static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, 
+				struct mpc_config_translation *translation)
+{
+	Dprintk("Bus #%d is %s\n", m->mpc_busid, name);
+}
+
+static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, 
+				struct mpc_config_translation *translation)
+{
+}
+
+extern int usb_early_handoff;
+static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, 
+		char *productid)
+{
+	if (!strncmp(oem, "IBM ENSW", 8) && 
+			(!strncmp(productid, "VIGIL SMP", 9) 
+			 || !strncmp(productid, "EXA", 3)
+			 || !strncmp(productid, "RUTHLESS SMP", 12))){
+		use_cyclone = 1; /*enable cyclone-timer*/
+		setup_summit();
+		usb_early_handoff = 1;
+		return 1;
+	}
+	return 0;
+}
+
+/* Hook from generic ACPI tables.c */
+static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+	if (!strncmp(oem_id, "IBM", 3) &&
+	    (!strncmp(oem_table_id, "SERVIGIL", 8)
+	     || !strncmp(oem_table_id, "EXA", 3))){
+		use_cyclone = 1; /*enable cyclone-timer*/
+		setup_summit();
+		usb_early_handoff = 1;
+		return 1;
+	}
+	return 0;
+}
+
+struct rio_table_hdr {
+	unsigned char version;      /* Version number of this data structure           */
+	                            /* Version 3 adds chassis_num & WP_index           */
+	unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil)   */
+	unsigned char num_rio_dev;  /* # of RIO I/O devices (Cyclones and Winnipegs)   */
+} __attribute__((packed));
+
+struct scal_detail {
+	unsigned char node_id;      /* Scalability Node ID                             */
+	unsigned long CBAR;         /* Address of 1MB register space                   */
+	unsigned char port0node;    /* Node ID port connected to: 0xFF=None            */
+	unsigned char port0port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
+	unsigned char port1node;    /* Node ID port connected to: 0xFF = None          */
+	unsigned char port1port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
+	unsigned char port2node;    /* Node ID port connected to: 0xFF = None          */
+	unsigned char port2port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
+	unsigned char chassis_num;  /* 1 based Chassis number (1 = boot node)          */
+} __attribute__((packed));
+
+struct rio_detail {
+	unsigned char node_id;      /* RIO Node ID                                     */
+	unsigned long BBAR;         /* Address of 1MB register space                   */
+	unsigned char type;         /* Type of device                                  */
+	unsigned char owner_id;     /* For WPEG: Node ID of Cyclone that owns this WPEG*/
+	                            /* For CYC:  Node ID of Twister that owns this CYC */
+	unsigned char port0node;    /* Node ID port connected to: 0xFF=None            */
+	unsigned char port0port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
+	unsigned char port1node;    /* Node ID port connected to: 0xFF=None            */
+	unsigned char port1port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
+	unsigned char first_slot;   /* For WPEG: Lowest slot number below this WPEG    */
+	                            /* For CYC:  0                                     */
+	unsigned char status;       /* For WPEG: Bit 0 = 1 : the XAPIC is used         */
+	                            /*                 = 0 : the XAPIC is not used, ie:*/
+	                            /*                     ints fwded to another XAPIC */
+	                            /*           Bits1:7 Reserved                      */
+	                            /* For CYC:  Bits0:7 Reserved                      */
+	unsigned char WP_index;     /* For WPEG: WPEG instance index - lower ones have */
+	                            /*           lower slot numbers/PCI bus numbers    */
+	                            /* For CYC:  No meaning                            */
+	unsigned char chassis_num;  /* 1 based Chassis number                          */
+	                            /* For LookOut WPEGs this field indicates the      */
+	                            /* Expansion Chassis #, enumerated from Boot       */
+	                            /* Node WPEG external port, then Boot Node CYC     */
+	                            /* external port, then Next Vigil chassis WPEG     */
+	                            /* external port, etc.                             */
+	                            /* Shared Lookouts have only 1 chassis number (the */
+	                            /* first one assigned)                             */
+} __attribute__((packed));
+
+
+typedef enum {
+	CompatTwister = 0,  /* Compatibility Twister               */
+	AltTwister    = 1,  /* Alternate Twister of internal 8-way */
+	CompatCyclone = 2,  /* Compatibility Cyclone               */
+	AltCyclone    = 3,  /* Alternate Cyclone of internal 8-way */
+	CompatWPEG    = 4,  /* Compatibility WPEG                  */
+	AltWPEG       = 5,  /* Second Planar WPEG                  */
+	LookOutAWPEG  = 6,  /* LookOut WPEG                        */
+	LookOutBWPEG  = 7,  /* LookOut WPEG                        */
+} node_type;
+
+static inline int is_WPEG(struct rio_detail *rio){
+	return (rio->type == CompatWPEG || rio->type == AltWPEG ||
+		rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
+}
+
+#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/include/asm-i386/mach-summit/mach_mpspec.h b/include/asm-i386/mach-summit/mach_mpspec.h
new file mode 100644
index 0000000..bd76552
--- /dev/null
+++ b/include/asm-i386/mach-summit/mach_mpspec.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_MACH_MPSPEC_H
+#define __ASM_MACH_MPSPEC_H
+
+#define MAX_IRQ_SOURCES 256
+
+/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
+#define MAX_MP_BUSSES 260
+
+#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-i386/mach-visws/cobalt.h b/include/asm-i386/mach-visws/cobalt.h
new file mode 100644
index 0000000..33c3622
--- /dev/null
+++ b/include/asm-i386/mach-visws/cobalt.h
@@ -0,0 +1,125 @@
+#ifndef __I386_SGI_COBALT_H
+#define __I386_SGI_COBALT_H
+
+#include <asm/fixmap.h>
+
+/*
+ * Cobalt SGI Visual Workstation system ASIC
+ */ 
+
+#define CO_CPU_NUM_PHYS 0x1e00
+#define CO_CPU_TAB_PHYS (CO_CPU_NUM_PHYS + 2)
+
+#define CO_CPU_MAX 4
+
+#define	CO_CPU_PHYS		0xc2000000
+#define	CO_APIC_PHYS		0xc4000000
+
+/* see set_fixmap() and asm/fixmap.h */
+#define	CO_CPU_VADDR		(fix_to_virt(FIX_CO_CPU))
+#define	CO_APIC_VADDR		(fix_to_virt(FIX_CO_APIC))
+
+/* Cobalt CPU registers -- relative to CO_CPU_VADDR, use co_cpu_*() */
+#define	CO_CPU_REV		0x08
+#define	CO_CPU_CTRL		0x10
+#define	CO_CPU_STAT		0x20
+#define	CO_CPU_TIMEVAL		0x30
+
+/* CO_CPU_CTRL bits */
+#define	CO_CTRL_TIMERUN		0x04		/* 0 == disabled */
+#define	CO_CTRL_TIMEMASK	0x08		/* 0 == unmasked */
+
+/* CO_CPU_STATUS bits */
+#define	CO_STAT_TIMEINTR	0x02	/* (r) 1 == int pend, (w) 0 == clear */
+
+/* CO_CPU_TIMEVAL value */
+#define	CO_TIME_HZ		100000000	/* Cobalt core rate */
+
+/* Cobalt APIC registers -- relative to CO_APIC_VADDR, use co_apic_*() */
+#define	CO_APIC_HI(n)		(((n) * 0x10) + 4)
+#define	CO_APIC_LO(n)		((n) * 0x10)
+#define	CO_APIC_ID		0x0ffc
+
+/* CO_APIC_ID bits */
+#define	CO_APIC_ENABLE		0x00000100
+
+/* CO_APIC_LO bits */
+#define	CO_APIC_MASK		0x00010000	/* 0 = enabled */
+#define	CO_APIC_LEVEL		0x00008000	/* 0 = edge */
+
+/*
+ * Where things are physically wired to Cobalt
+ * #defines with no board _<type>_<rev>_ are common to all (thus far)
+ */
+#define	CO_APIC_IDE0		4
+#define CO_APIC_IDE1		2		/* Only on 320 */
+
+#define	CO_APIC_8259		12		/* serial, floppy, par-l-l */
+
+/* Lithium PCI Bridge A -- "the one with 82557 Ethernet" */
+#define	CO_APIC_PCIA_BASE0	0 /* and 1 */	/* slot 0, line 0 */
+#define	CO_APIC_PCIA_BASE123	5 /* and 6 */	/* slot 0, line 1 */
+
+#define	CO_APIC_PIIX4_USB	7		/* this one is weird */
+
+/* Lithium PCI Bridge B -- "the one with PIIX4" */
+#define	CO_APIC_PCIB_BASE0	8 /* and 9-12 *//* slot 0, line 0 */
+#define	CO_APIC_PCIB_BASE123	13 /* 14.15 */	/* slot 0, line 1 */
+
+#define	CO_APIC_VIDOUT0		16
+#define	CO_APIC_VIDOUT1		17
+#define	CO_APIC_VIDIN0		18
+#define	CO_APIC_VIDIN1		19
+
+#define	CO_APIC_LI_AUDIO	22
+
+#define	CO_APIC_AS		24
+#define	CO_APIC_RE		25
+
+#define CO_APIC_CPU		28		/* Timer and Cache interrupt */
+#define	CO_APIC_NMI		29
+#define	CO_APIC_LAST		CO_APIC_NMI
+
+/*
+ * This is how irqs are assigned on the Visual Workstation.
+ * Legacy devices get irq's 1-15 (system clock is 0 and is CO_APIC_CPU).
+ * All other devices (including PCI) go to Cobalt and are irq's 16 on up.
+ */
+#define	CO_IRQ_APIC0	16			/* irq of apic entry 0 */
+#define	IS_CO_APIC(irq)	((irq) >= CO_IRQ_APIC0)
+#define	CO_IRQ(apic)	(CO_IRQ_APIC0 + (apic))	/* apic ent to irq */
+#define	CO_APIC(irq)	((irq) - CO_IRQ_APIC0)	/* irq to apic ent */
+#define CO_IRQ_IDE0	14			/* knowledge of... */
+#define CO_IRQ_IDE1	15			/* ... ide driver defaults! */
+#define	CO_IRQ_8259	CO_IRQ(CO_APIC_8259)
+
+#ifdef CONFIG_X86_VISWS_APIC
+extern __inline void co_cpu_write(unsigned long reg, unsigned long v)
+{
+	*((volatile unsigned long *)(CO_CPU_VADDR+reg))=v;
+}
+
+extern __inline unsigned long co_cpu_read(unsigned long reg)
+{
+	return *((volatile unsigned long *)(CO_CPU_VADDR+reg));
+}            
+             
+extern __inline void co_apic_write(unsigned long reg, unsigned long v)
+{
+	*((volatile unsigned long *)(CO_APIC_VADDR+reg))=v;
+}            
+             
+extern __inline unsigned long co_apic_read(unsigned long reg)
+{
+	return *((volatile unsigned long *)(CO_APIC_VADDR+reg));
+}
+#endif
+
+extern char visws_board_type;
+
+#define	VISWS_320	0
+#define	VISWS_540	1
+
+extern char visws_board_rev;
+
+#endif /* __I386_SGI_COBALT_H */
diff --git a/include/asm-i386/mach-visws/do_timer.h b/include/asm-i386/mach-visws/do_timer.h
new file mode 100644
index 0000000..33acd50
--- /dev/null
+++ b/include/asm-i386/mach-visws/do_timer.h
@@ -0,0 +1,52 @@
+/* defines for inline arch setup functions */
+
+#include <asm/fixmap.h>
+#include "cobalt.h"
+
+static inline void do_timer_interrupt_hook(struct pt_regs *regs)
+{
+	/* Clear the interrupt */
+	co_cpu_write(CO_CPU_STAT,co_cpu_read(CO_CPU_STAT) & ~CO_STAT_TIMEINTR);
+
+	do_timer(regs);
+#ifndef CONFIG_SMP
+	update_process_times(user_mode(regs));
+#endif
+/*
+ * In the SMP case we use the local APIC timer interrupt to do the
+ * profiling, except when we simulate SMP mode on a uniprocessor
+ * system, in that case we have to call the local interrupt handler.
+ */
+#ifndef CONFIG_X86_LOCAL_APIC
+	profile_tick(CPU_PROFILING, regs);
+#else
+	if (!using_apic_timer)
+		smp_local_timer_interrupt(regs);
+#endif
+}
+
+static inline int do_timer_overflow(int count)
+{
+	int i;
+
+	spin_lock(&i8259A_lock);
+	/*
+	 * This is tricky when I/O APICs are used;
+	 * see do_timer_interrupt().
+	 */
+	i = inb(0x20);
+	spin_unlock(&i8259A_lock);
+	
+	/* assumption about timer being IRQ0 */
+	if (i & 0x01) {
+		/*
+		 * We cannot detect lost timer interrupts ... 
+		 * well, that's why we call them lost, don't we? :)
+		 * [hmm, on the Pentium and Alpha we can ... sort of]
+		 */
+		count -= LATCH;
+	} else {
+		printk("do_slow_gettimeoffset(): hardware timer problem?\n");
+	}
+	return count;
+}
diff --git a/include/asm-i386/mach-visws/entry_arch.h b/include/asm-i386/mach-visws/entry_arch.h
new file mode 100644
index 0000000..b183fa6
--- /dev/null
+++ b/include/asm-i386/mach-visws/entry_arch.h
@@ -0,0 +1,23 @@
+/*
+ * The following vectors are part of the Linux architecture, there
+ * is no hardware IRQ pin equivalent for them, they are triggered
+ * through the ICC by us (IPIs)
+ */
+#ifdef CONFIG_X86_SMP
+BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
+BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
+BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
+#endif
+
+/*
+ * every pentium local APIC has two 'local interrupts', with a
+ * soft-definable vector attached to both interrupts, one of
+ * which is a timer interrupt, the other one is error counter
+ * overflow. Linux uses the local APIC timer interrupt to get
+ * a much simpler SMP time architecture:
+ */
+#ifdef CONFIG_X86_LOCAL_APIC
+BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
+BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
+BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
+#endif
diff --git a/include/asm-i386/mach-visws/irq_vectors.h b/include/asm-i386/mach-visws/irq_vectors.h
new file mode 100644
index 0000000..cb572d8
--- /dev/null
+++ b/include/asm-i386/mach-visws/irq_vectors.h
@@ -0,0 +1,62 @@
+#ifndef _ASM_IRQ_VECTORS_H
+#define _ASM_IRQ_VECTORS_H
+
+/*
+ * IDT vectors usable for external interrupt sources start
+ * at 0x20:
+ */
+#define FIRST_EXTERNAL_VECTOR	0x20
+
+#define SYSCALL_VECTOR		0x80
+
+/*
+ * Vectors 0x20-0x2f are used for ISA interrupts.
+ */
+
+/*
+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
+ *
+ *  some of the following vectors are 'rare', they are merged
+ *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
+ *  TLB, reschedule and local APIC vectors are performance-critical.
+ *
+ *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
+ */
+#define SPURIOUS_APIC_VECTOR	0xff
+#define ERROR_APIC_VECTOR	0xfe
+#define INVALIDATE_TLB_VECTOR	0xfd
+#define RESCHEDULE_VECTOR	0xfc
+#define CALL_FUNCTION_VECTOR	0xfb
+
+#define THERMAL_APIC_VECTOR	0xf0
+/*
+ * Local APIC timer IRQ vector is on a different priority level,
+ * to work around the 'lost local interrupt if more than 2 IRQ
+ * sources per level' errata.
+ */
+#define LOCAL_TIMER_VECTOR	0xef
+
+/*
+ * First APIC vector available to drivers: (vectors 0x30-0xee)
+ * we start at 0x31 to spread out vectors evenly between priority
+ * levels. (0x80 is the syscall vector)
+ */
+#define FIRST_DEVICE_VECTOR	0x31
+#define FIRST_SYSTEM_VECTOR	0xef
+
+#define TIMER_IRQ 0
+
+/*
+ * IRQ definitions
+ */
+#define NR_VECTORS 256
+#define NR_IRQS 224
+#define NR_IRQ_VECTORS NR_IRQS
+
+#define FPU_IRQ			13
+
+#define	FIRST_VM86_IRQ		3
+#define LAST_VM86_IRQ		15
+#define invalid_vm86_irq(irq)	((irq) < 3 || (irq) > 15)
+
+#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/include/asm-i386/mach-visws/lithium.h b/include/asm-i386/mach-visws/lithium.h
new file mode 100644
index 0000000..d443e68
--- /dev/null
+++ b/include/asm-i386/mach-visws/lithium.h
@@ -0,0 +1,53 @@
+#ifndef __I386_SGI_LITHIUM_H
+#define __I386_SGI_LITHIUM_H
+
+#include <asm/fixmap.h>
+
+/*
+ * Lithium is the SGI Visual Workstation I/O ASIC
+ */
+
+#define	LI_PCI_A_PHYS		0xfc000000	/* Enet is dev 3 */
+#define	LI_PCI_B_PHYS		0xfd000000	/* PIIX4 is here */
+
+/* see set_fixmap() and asm/fixmap.h */
+#define LI_PCIA_VADDR   (fix_to_virt(FIX_LI_PCIA))
+#define LI_PCIB_VADDR   (fix_to_virt(FIX_LI_PCIB))
+
+/* Not a standard PCI? (not in linux/pci.h) */
+#define	LI_PCI_BUSNUM	0x44			/* lo8: primary, hi8: sub */
+#define LI_PCI_INTEN    0x46
+
+/* LI_PCI_INTENT bits */
+#define	LI_INTA_0	0x0001
+#define	LI_INTA_1	0x0002
+#define	LI_INTA_2	0x0004
+#define	LI_INTA_3	0x0008
+#define	LI_INTA_4	0x0010
+#define	LI_INTB		0x0020
+#define	LI_INTC		0x0040
+#define	LI_INTD		0x0080
+
+/* More special purpose macros... */
+extern __inline void li_pcia_write16(unsigned long reg, unsigned short v)
+{
+	*((volatile unsigned short *)(LI_PCIA_VADDR+reg))=v;
+}
+
+extern __inline unsigned short li_pcia_read16(unsigned long reg)
+{
+	 return *((volatile unsigned short *)(LI_PCIA_VADDR+reg));
+}
+
+extern __inline void li_pcib_write16(unsigned long reg, unsigned short v)
+{
+	*((volatile unsigned short *)(LI_PCIB_VADDR+reg))=v;
+}
+
+extern __inline unsigned short li_pcib_read16(unsigned long reg)
+{
+	return *((volatile unsigned short *)(LI_PCIB_VADDR+reg));
+}
+
+#endif
+
diff --git a/include/asm-i386/mach-visws/mach_apic.h b/include/asm-i386/mach-visws/mach_apic.h
new file mode 100644
index 0000000..4e6cdfb
--- /dev/null
+++ b/include/asm-i386/mach-visws/mach_apic.h
@@ -0,0 +1,100 @@
+#ifndef __ASM_MACH_APIC_H
+#define __ASM_MACH_APIC_H
+
+#include <mach_apicdef.h>
+#include <asm/smp.h>
+
+#define APIC_DFR_VALUE	(APIC_DFR_FLAT)
+
+#define no_balance_irq (0)
+#define esr_disable (0)
+
+#define NO_IOAPIC_CHECK (0)
+
+#define INT_DELIVERY_MODE dest_LowestPrio
+#define INT_DEST_MODE 1     /* logical delivery broadcast to all procs */
+
+#ifdef CONFIG_SMP
+ #define TARGET_CPUS cpu_online_map
+#else
+ #define TARGET_CPUS cpumask_of_cpu(0)
+#endif
+
+#define check_apicid_used(bitmap, apicid)	physid_isset(apicid, bitmap)
+#define check_apicid_present(bit)		physid_isset(bit, phys_cpu_present_map)
+
+static inline int apic_id_registered(void)
+{
+	return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map);
+}
+
+/*
+ * Set up the logical destination ID.
+ *
+ * Intel recommends to set DFR, LDR and TPR before enabling
+ * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
+ * document number 292116).  So here it goes...
+ */
+static inline void init_apic_ldr(void)
+{
+	unsigned long val;
+
+	apic_write_around(APIC_DFR, APIC_DFR_VALUE);
+	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+	val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
+	apic_write_around(APIC_LDR, val);
+}
+
+static inline void summit_check(char *oem, char *productid) 
+{
+}
+
+static inline void clustered_apic_check(void)
+{
+}
+
+/* Mapping from cpu number to logical apicid */
+static inline int cpu_to_logical_apicid(int cpu)
+{
+	return 1 << cpu;
+}
+
+static inline int cpu_present_to_apicid(int mps_cpu)
+{
+	if (mps_cpu < get_physical_broadcast())
+		return mps_cpu;
+	else
+		return BAD_APICID;
+}
+
+static inline physid_mask_t apicid_to_cpu_present(int apicid)
+{
+	return physid_mask_of_physid(apicid);
+}
+
+#define WAKE_SECONDARY_VIA_INIT
+
+static inline void setup_portio_remap(void)
+{
+}
+
+static inline void enable_apic_mode(void)
+{
+}
+
+static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+	return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
+}
+
+static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
+{
+	return cpus_addr(cpumask)[0];
+}
+
+static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
+{
+	return cpuid_apic >> index_msb;
+}
+
+#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-i386/mach-visws/mach_apicdef.h b/include/asm-i386/mach-visws/mach_apicdef.h
new file mode 100644
index 0000000..826cfa9
--- /dev/null
+++ b/include/asm-i386/mach-visws/mach_apicdef.h
@@ -0,0 +1,12 @@
+#ifndef __ASM_MACH_APICDEF_H
+#define __ASM_MACH_APICDEF_H
+
+#define         APIC_ID_MASK            (0xF<<24)
+
+static inline unsigned get_apic_id(unsigned long x)
+{
+	        return (((x)>>24)&0xF);
+}
+#define         GET_APIC_ID(x)  get_apic_id(x)
+
+#endif
diff --git a/include/asm-i386/mach-visws/piix4.h b/include/asm-i386/mach-visws/piix4.h
new file mode 100644
index 0000000..83ea4f4
--- /dev/null
+++ b/include/asm-i386/mach-visws/piix4.h
@@ -0,0 +1,107 @@
+#ifndef __I386_SGI_PIIX_H
+#define __I386_SGI_PIIX_H
+
+/*
+ * PIIX4 as used on SGI Visual Workstations
+ */
+
+#define	PIIX_PM_START		0x0F80
+
+#define	SIO_GPIO_START		0x0FC0
+
+#define	SIO_PM_START		0x0FC8
+
+#define	PMBASE			PIIX_PM_START
+#define	GPIREG0			(PMBASE+0x30)
+#define	GPIREG(x)		(GPIREG0+((x)/8))
+#define	GPIBIT(x)		(1 << ((x)%8))
+
+#define	PIIX_GPI_BD_ID1		18
+#define	PIIX_GPI_BD_ID2		19
+#define	PIIX_GPI_BD_ID3		20
+#define	PIIX_GPI_BD_ID4		21
+#define	PIIX_GPI_BD_REG		GPIREG(PIIX_GPI_BD_ID1)
+#define	PIIX_GPI_BD_MASK	(GPIBIT(PIIX_GPI_BD_ID1) | \
+				GPIBIT(PIIX_GPI_BD_ID2) | \
+				GPIBIT(PIIX_GPI_BD_ID3) | \
+				GPIBIT(PIIX_GPI_BD_ID4) )
+
+#define	PIIX_GPI_BD_SHIFT	(PIIX_GPI_BD_ID1 % 8)
+
+#define	SIO_INDEX		0x2e
+#define	SIO_DATA		0x2f
+
+#define	SIO_DEV_SEL		0x7
+#define	SIO_DEV_ENB		0x30
+#define	SIO_DEV_MSB		0x60
+#define	SIO_DEV_LSB		0x61
+
+#define	SIO_GP_DEV		0x7
+
+#define	SIO_GP_BASE		SIO_GPIO_START
+#define	SIO_GP_MSB		(SIO_GP_BASE>>8)
+#define	SIO_GP_LSB		(SIO_GP_BASE&0xff)
+
+#define	SIO_GP_DATA1		(SIO_GP_BASE+0)
+
+#define	SIO_PM_DEV		0x8
+
+#define	SIO_PM_BASE		SIO_PM_START
+#define	SIO_PM_MSB		(SIO_PM_BASE>>8)
+#define	SIO_PM_LSB		(SIO_PM_BASE&0xff)
+#define	SIO_PM_INDEX		(SIO_PM_BASE+0)
+#define	SIO_PM_DATA		(SIO_PM_BASE+1)
+
+#define	SIO_PM_FER2		0x1
+
+#define	SIO_PM_GP_EN		0x80
+
+
+
+/*
+ * This is the dev/reg where generating a config cycle will
+ * result in a PCI special cycle.
+ */
+#define SPECIAL_DEV		0xff
+#define SPECIAL_REG		0x00
+
+/*
+ * PIIX4 needs to see a special cycle with the following data
+ * to be convinced the processor has gone into the stop grant
+ * state.  PIIX4 insists on seeing this before it will power
+ * down a system.
+ */
+#define PIIX_SPECIAL_STOP		0x00120002
+
+#define PIIX4_RESET_PORT	0xcf9
+#define PIIX4_RESET_VAL		0x6
+
+#define PMSTS_PORT		0xf80	// 2 bytes	PM Status
+#define PMEN_PORT		0xf82	// 2 bytes	PM Enable
+#define	PMCNTRL_PORT		0xf84	// 2 bytes	PM Control
+
+#define PM_SUSPEND_ENABLE	0x2000	// start sequence to suspend state
+
+/*
+ * PMSTS and PMEN I/O bit definitions.
+ * (Bits are the same in both registers)
+ */
+#define PM_STS_RSM		(1<<15)	// Resume Status
+#define PM_STS_PWRBTNOR		(1<<11)	// Power Button Override
+#define PM_STS_RTC		(1<<10)	// RTC status
+#define PM_STS_PWRBTN		(1<<8)	// Power Button Pressed?
+#define PM_STS_GBL		(1<<5)	// Global Status
+#define PM_STS_BM		(1<<4)	// Bus Master Status
+#define PM_STS_TMROF		(1<<0)	// Timer Overflow Status.
+
+/*
+ * Stop clock GPI register
+ */
+#define PIIX_GPIREG0			(0xf80 + 0x30)
+
+/*
+ * Stop clock GPI bit in GPIREG0
+ */
+#define	PIIX_GPI_STPCLK		0x4	// STPCLK signal routed back in
+
+#endif
diff --git a/include/asm-i386/mach-visws/setup_arch_post.h b/include/asm-i386/mach-visws/setup_arch_post.h
new file mode 100644
index 0000000..cdbd895
--- /dev/null
+++ b/include/asm-i386/mach-visws/setup_arch_post.h
@@ -0,0 +1,49 @@
+/* Hook for machine specific memory setup.
+ *
+ * This is included late in kernel/setup.c so that it can make use of all of
+ * the static functions. */
+
+#define MB (1024 * 1024)
+
+unsigned long sgivwfb_mem_phys;
+unsigned long sgivwfb_mem_size;
+
+long long mem_size __initdata = 0;
+
+static char * __init machine_specific_memory_setup(void)
+{
+	long long gfx_mem_size = 8 * MB;
+
+	mem_size = ALT_MEM_K;
+
+	if (!mem_size) {
+		printk(KERN_WARNING "Bootloader didn't set memory size, upgrade it !\n");
+		mem_size = 128 * MB;
+	}
+
+	/*
+	 * this hardcodes the graphics memory to 8 MB
+	 * it really should be sized dynamically (or at least
+	 * set as a boot param)
+	 */
+	if (!sgivwfb_mem_size) {
+		printk(KERN_WARNING "Defaulting to 8 MB framebuffer size\n");
+		sgivwfb_mem_size = 8 * MB;
+	}
+
+	/*
+	 * Trim to nearest MB
+	 */
+	sgivwfb_mem_size &= ~((1 << 20) - 1);
+	sgivwfb_mem_phys = mem_size - gfx_mem_size;
+
+	add_memory_region(0, LOWMEMSIZE(), E820_RAM);
+	add_memory_region(HIGH_MEMORY, mem_size - sgivwfb_mem_size - HIGH_MEMORY, E820_RAM);
+	add_memory_region(sgivwfb_mem_phys, sgivwfb_mem_size, E820_RESERVED);
+
+	return "PROM";
+
+	/* Remove gcc warnings */
+	(void) sanitize_e820_map(NULL, NULL);
+	(void) copy_e820_map(NULL, 0);
+}
diff --git a/include/asm-i386/mach-visws/setup_arch_pre.h b/include/asm-i386/mach-visws/setup_arch_pre.h
new file mode 100644
index 0000000..b92d6d9
--- /dev/null
+++ b/include/asm-i386/mach-visws/setup_arch_pre.h
@@ -0,0 +1,5 @@
+/* Hook to call BIOS initialisation function */
+
+/* no action for visws */
+
+#define ARCH_SETUP
diff --git a/include/asm-i386/mach-visws/smpboot_hooks.h b/include/asm-i386/mach-visws/smpboot_hooks.h
new file mode 100644
index 0000000..d926471
--- /dev/null
+++ b/include/asm-i386/mach-visws/smpboot_hooks.h
@@ -0,0 +1,24 @@
+static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
+{
+	CMOS_WRITE(0xa, 0xf);
+	local_flush_tlb();
+	Dprintk("1.\n");
+	*((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
+	Dprintk("2.\n");
+	*((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
+	Dprintk("3.\n");
+}
+
+/* for visws do nothing for any of these */
+
+static inline void smpboot_clear_io_apic_irqs(void)
+{
+}
+
+static inline void smpboot_restore_warm_reset_vector(void)
+{
+}
+
+static inline void smpboot_setup_io_apic(void)
+{
+}
diff --git a/include/asm-i386/mach-voyager/do_timer.h b/include/asm-i386/mach-voyager/do_timer.h
new file mode 100644
index 0000000..ae510e5
--- /dev/null
+++ b/include/asm-i386/mach-voyager/do_timer.h
@@ -0,0 +1,25 @@
+/* defines for inline arch setup functions */
+#include <asm/voyager.h>
+
+static inline void do_timer_interrupt_hook(struct pt_regs *regs)
+{
+	do_timer(regs);
+#ifndef CONFIG_SMP
+	update_process_times(user_mode(regs));
+#endif
+
+	voyager_timer_interrupt(regs);
+}
+
+static inline int do_timer_overflow(int count)
+{
+	/* can't read the ISR, just assume 1 tick
+	   overflow */
+	if(count > LATCH || count < 0) {
+		printk(KERN_ERR "VOYAGER PROBLEM: count is %d, latch is %d\n", count, LATCH);
+		count = LATCH;
+	}
+	count -= LATCH;
+
+	return count;
+}
diff --git a/include/asm-i386/mach-voyager/entry_arch.h b/include/asm-i386/mach-voyager/entry_arch.h
new file mode 100644
index 0000000..4a1e1e8
--- /dev/null
+++ b/include/asm-i386/mach-voyager/entry_arch.h
@@ -0,0 +1,26 @@
+/* -*- mode: c; c-basic-offset: 8 -*- */
+
+/* Copyright (C) 2002
+ *
+ * Author: James.Bottomley@HansenPartnership.com
+ *
+ * linux/arch/i386/voyager/entry_arch.h
+ *
+ * This file builds the VIC and QIC CPI gates
+ */
+
+/* initialise the voyager interrupt gates 
+ *
+ * This uses the macros in irq.h to set up assembly jump gates.  The
+ * calls are then redirected to the same routine with smp_ prefixed */
+BUILD_INTERRUPT(vic_sys_interrupt, VIC_SYS_INT)
+BUILD_INTERRUPT(vic_cmn_interrupt, VIC_CMN_INT)
+BUILD_INTERRUPT(vic_cpi_interrupt, VIC_CPI_LEVEL0);
+
+/* do all the QIC interrupts */
+BUILD_INTERRUPT(qic_timer_interrupt, QIC_TIMER_CPI);
+BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI);
+BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI);
+BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI);
+BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI);
+
diff --git a/include/asm-i386/mach-voyager/irq_vectors.h b/include/asm-i386/mach-voyager/irq_vectors.h
new file mode 100644
index 0000000..165421f
--- /dev/null
+++ b/include/asm-i386/mach-voyager/irq_vectors.h
@@ -0,0 +1,79 @@
+/* -*- mode: c; c-basic-offset: 8 -*- */
+
+/* Copyright (C) 2002
+ *
+ * Author: James.Bottomley@HansenPartnership.com
+ *
+ * linux/arch/i386/voyager/irq_vectors.h
+ *
+ * This file provides definitions for the VIC and QIC CPIs
+ */
+
+#ifndef _ASM_IRQ_VECTORS_H
+#define _ASM_IRQ_VECTORS_H
+
+/*
+ * IDT vectors usable for external interrupt sources start
+ * at 0x20:
+ */
+#define FIRST_EXTERNAL_VECTOR	0x20
+
+#define SYSCALL_VECTOR		0x80
+
+/*
+ * Vectors 0x20-0x2f are used for ISA interrupts.
+ */
+
+/* These define the CPIs we use in linux */
+#define VIC_CPI_LEVEL0			0
+#define VIC_CPI_LEVEL1			1
+/* now the fake CPIs */
+#define VIC_TIMER_CPI			2
+#define VIC_INVALIDATE_CPI		3
+#define VIC_RESCHEDULE_CPI		4
+#define VIC_ENABLE_IRQ_CPI		5
+#define VIC_CALL_FUNCTION_CPI		6
+
+/* Now the QIC CPIs:  Since we don't need the two initial levels,
+ * these are 2 less than the VIC CPIs */
+#define QIC_CPI_OFFSET			1
+#define QIC_TIMER_CPI			(VIC_TIMER_CPI - QIC_CPI_OFFSET)
+#define QIC_INVALIDATE_CPI		(VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
+#define QIC_RESCHEDULE_CPI		(VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
+#define QIC_ENABLE_IRQ_CPI		(VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
+#define QIC_CALL_FUNCTION_CPI		(VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
+
+#define VIC_START_FAKE_CPI		VIC_TIMER_CPI
+#define VIC_END_FAKE_CPI		VIC_CALL_FUNCTION_CPI
+
+/* this is the SYS_INT CPI. */
+#define VIC_SYS_INT			8
+#define VIC_CMN_INT			15
+
+/* This is the boot CPI for alternate processors.  It gets overwritten
+ * by the above once the system has activated all available processors */
+#define VIC_CPU_BOOT_CPI		VIC_CPI_LEVEL0
+#define VIC_CPU_BOOT_ERRATA_CPI		(VIC_CPI_LEVEL0 + 8)
+
+#define NR_VECTORS 256
+#define NR_IRQS 224
+#define NR_IRQ_VECTORS NR_IRQS
+
+#define FPU_IRQ				13
+
+#define	FIRST_VM86_IRQ		3
+#define LAST_VM86_IRQ		15
+#define invalid_vm86_irq(irq)	((irq) < 3 || (irq) > 15)
+
+#ifndef __ASSEMBLY__
+extern asmlinkage void vic_cpi_interrupt(void);
+extern asmlinkage void vic_sys_interrupt(void);
+extern asmlinkage void vic_cmn_interrupt(void);
+extern asmlinkage void qic_timer_interrupt(void);
+extern asmlinkage void qic_invalidate_interrupt(void);
+extern asmlinkage void qic_reschedule_interrupt(void);
+extern asmlinkage void qic_enable_irq_interrupt(void);
+extern asmlinkage void qic_call_function_interrupt(void);
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/include/asm-i386/mach-voyager/setup_arch_post.h b/include/asm-i386/mach-voyager/setup_arch_post.h
new file mode 100644
index 0000000..f6f6c2c
--- /dev/null
+++ b/include/asm-i386/mach-voyager/setup_arch_post.h
@@ -0,0 +1,73 @@
+/* Hook for machine specific memory setup.
+ *
+ * This is included late in kernel/setup.c so that it can make use of all of
+ * the static functions. */
+
+static char * __init machine_specific_memory_setup(void)
+{
+	char *who;
+
+	who = "NOT VOYAGER";
+
+	if(voyager_level == 5) {
+		__u32 addr, length;
+		int i;
+
+		who = "Voyager-SUS";
+
+		e820.nr_map = 0;
+		for(i=0; voyager_memory_detect(i, &addr, &length); i++) {
+			add_memory_region(addr, length, E820_RAM);
+		}
+		return who;
+	} else if(voyager_level == 4) {
+		__u32 tom;
+		__u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT)<<8;
+		/* select the DINO config space */
+		outb(VOYAGER_DINO, VOYAGER_CAT_CONFIG_PORT);
+		/* Read DINO top of memory register */
+		tom = ((inb(catbase + 0x4) & 0xf0) << 16)
+			+ ((inb(catbase + 0x5) & 0x7f) << 24);
+
+		if(inb(catbase) != VOYAGER_DINO) {
+			printk(KERN_ERR "Voyager: Failed to get DINO for L4, setting tom to EXT_MEM_K\n");
+			tom = (EXT_MEM_K)<<10;
+		}
+		who = "Voyager-TOM";
+		add_memory_region(0, 0x9f000, E820_RAM);
+		/* map from 1M to top of memory */
+		add_memory_region(1*1024*1024, tom - 1*1024*1024, E820_RAM);
+		/* FIXME: Should check the ASICs to see if I need to
+		 * take out the 8M window.  Just do it at the moment
+		 * */
+		add_memory_region(8*1024*1024, 8*1024*1024, E820_RESERVED);
+		return who;
+	}
+
+	who = "BIOS-e820";
+
+	/*
+	 * Try to copy the BIOS-supplied E820-map.
+	 *
+	 * Otherwise fake a memory map; one section from 0k->640k,
+	 * the next section from 1mb->appropriate_mem_k
+	 */
+	sanitize_e820_map(E820_MAP, &E820_MAP_NR);
+	if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
+		unsigned long mem_size;
+
+		/* compare results from other methods and take the greater */
+		if (ALT_MEM_K < EXT_MEM_K) {
+			mem_size = EXT_MEM_K;
+			who = "BIOS-88";
+		} else {
+			mem_size = ALT_MEM_K;
+			who = "BIOS-e801";
+		}
+
+		e820.nr_map = 0;
+		add_memory_region(0, LOWMEMSIZE(), E820_RAM);
+		add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
+  	}
+	return who;
+}
diff --git a/include/asm-i386/mach-voyager/setup_arch_pre.h b/include/asm-i386/mach-voyager/setup_arch_pre.h
new file mode 100644
index 0000000..48f7e6f
--- /dev/null
+++ b/include/asm-i386/mach-voyager/setup_arch_pre.h
@@ -0,0 +1,10 @@
+#include <asm/voyager.h>
+#define VOYAGER_BIOS_INFO ((struct voyager_bios_info *)(PARAM+0x40))
+
+/* Hook to call BIOS initialisation function */
+
+/* for voyager, pass the voyager BIOS/SUS info area to the detection 
+ * routines */
+
+#define ARCH_SETUP	voyager_detect(VOYAGER_BIOS_INFO);
+
diff --git a/include/asm-i386/math_emu.h b/include/asm-i386/math_emu.h
new file mode 100644
index 0000000..697673b
--- /dev/null
+++ b/include/asm-i386/math_emu.h
@@ -0,0 +1,35 @@
+#ifndef _I386_MATH_EMU_H
+#define _I386_MATH_EMU_H
+
+#include <asm/sigcontext.h>
+
+int restore_i387_soft(void *s387, struct _fpstate __user *buf);
+int save_i387_soft(void *s387, struct _fpstate __user *buf);
+
+/* This structure matches the layout of the data saved to the stack
+   following a device-not-present interrupt, part of it saved
+   automatically by the 80386/80486.
+   */
+struct info {
+	long ___orig_eip;
+	long ___ebx;
+	long ___ecx;
+	long ___edx;
+	long ___esi;
+	long ___edi;
+	long ___ebp;
+	long ___eax;
+	long ___ds;
+	long ___es;
+	long ___orig_eax;
+	long ___eip;
+	long ___cs;
+	long ___eflags;
+	long ___esp;
+	long ___ss;
+	long ___vm86_es; /* This and the following only in vm86 mode */
+	long ___vm86_ds;
+	long ___vm86_fs;
+	long ___vm86_gs;
+};
+#endif
diff --git a/include/asm-i386/mc146818rtc.h b/include/asm-i386/mc146818rtc.h
new file mode 100644
index 0000000..99a8900
--- /dev/null
+++ b/include/asm-i386/mc146818rtc.h
@@ -0,0 +1,94 @@
+/*
+ * Machine dependent access functions for RTC registers.
+ */
+#ifndef _ASM_MC146818RTC_H
+#define _ASM_MC146818RTC_H
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <linux/mc146818rtc.h>
+
+#ifndef RTC_PORT
+#define RTC_PORT(x)	(0x70 + (x))
+#define RTC_ALWAYS_BCD	1	/* RTC operates in binary mode */
+#endif
+
+#ifdef __HAVE_ARCH_CMPXCHG
+/*
+ * This lock provides nmi access to the CMOS/RTC registers.  It has some
+ * special properties.  It is owned by a CPU and stores the index register
+ * currently being accessed (if owned).  The idea here is that it works
+ * like a normal lock (normally).  However, in an NMI, the NMI code will
+ * first check to see if its CPU owns the lock, meaning that the NMI
+ * interrupted during the read/write of the device.  If it does, it goes ahead
+ * and performs the access and then restores the index register.  If it does
+ * not, it locks normally.
+ *
+ * Note that since we are working with NMIs, we need this lock even in
+ * a non-SMP machine just to mark that the lock is owned.
+ *
+ * This only works with compare-and-swap.  There is no other way to
+ * atomically claim the lock and set the owner.
+ */
+#include <linux/smp.h>
+extern volatile unsigned long cmos_lock;
+
+/*
+ * All of these below must be called with interrupts off, preempt
+ * disabled, etc.
+ */
+
+static inline void lock_cmos(unsigned char reg)
+{
+	unsigned long new;
+	new = ((smp_processor_id()+1) << 8) | reg;
+	for (;;) {
+		if (cmos_lock)
+			continue;
+		if (__cmpxchg(&cmos_lock, 0, new, sizeof(cmos_lock)) == 0)
+			return;
+	}
+}
+
+static inline void unlock_cmos(void)
+{
+	cmos_lock = 0;
+}
+static inline int do_i_have_lock_cmos(void)
+{
+	return (cmos_lock >> 8) == (smp_processor_id()+1);
+}
+static inline unsigned char current_lock_cmos_reg(void)
+{
+	return cmos_lock & 0xff;
+}
+#define lock_cmos_prefix(reg) \
+	do {					\
+		unsigned long cmos_flags;	\
+		local_irq_save(cmos_flags);	\
+		lock_cmos(reg)
+#define lock_cmos_suffix(reg) \
+		unlock_cmos();			\
+		local_irq_restore(cmos_flags);	\
+	} while (0)
+#else
+#define lock_cmos_prefix(reg) do {} while (0)
+#define lock_cmos_suffix(reg) do {} while (0)
+#define lock_cmos(reg)
+#define unlock_cmos()
+#define do_i_have_lock_cmos() 0
+#define current_lock_cmos_reg() 0
+#endif
+
+/*
+ * The yet supported machines all access the RTC index register via
+ * an ISA port access but the way to access the date register differs ...
+ */
+#define CMOS_READ(addr) rtc_cmos_read(addr)
+#define CMOS_WRITE(val, addr) rtc_cmos_write(val, addr)
+unsigned char rtc_cmos_read(unsigned char addr);
+void rtc_cmos_write(unsigned char val, unsigned char addr);
+
+#define RTC_IRQ 8
+
+#endif /* _ASM_MC146818RTC_H */
diff --git a/include/asm-i386/mca.h b/include/asm-i386/mca.h
new file mode 100644
index 0000000..09adf2e
--- /dev/null
+++ b/include/asm-i386/mca.h
@@ -0,0 +1,43 @@
+/* -*- mode: c; c-basic-offset: 8 -*- */
+
+/* Platform specific MCA defines */
+#ifndef _ASM_MCA_H
+#define _ASM_MCA_H
+
+/* Maximal number of MCA slots - actually, some machines have less, but
+ * they all have sufficient number of POS registers to cover 8.
+ */
+#define MCA_MAX_SLOT_NR  8
+
+/* Most machines have only one MCA bus.  The only multiple bus machines
+ * I know have at most two */
+#define MAX_MCA_BUSSES 2
+
+#define MCA_PRIMARY_BUS		0
+#define MCA_SECONDARY_BUS	1
+
+/* Dummy slot numbers on primary MCA for integrated functions */
+#define MCA_INTEGSCSI	(MCA_MAX_SLOT_NR)
+#define MCA_INTEGVIDEO	(MCA_MAX_SLOT_NR+1)
+#define MCA_MOTHERBOARD (MCA_MAX_SLOT_NR+2)
+
+/* Dummy POS values for integrated functions */
+#define MCA_DUMMY_POS_START	0x10000
+#define MCA_INTEGSCSI_POS	(MCA_DUMMY_POS_START+1)
+#define MCA_INTEGVIDEO_POS	(MCA_DUMMY_POS_START+2)
+#define MCA_MOTHERBOARD_POS	(MCA_DUMMY_POS_START+3)
+
+/* MCA registers */
+
+#define MCA_MOTHERBOARD_SETUP_REG	0x94
+#define MCA_ADAPTER_SETUP_REG		0x96
+#define MCA_POS_REG(n)			(0x100+(n))
+
+#define MCA_ENABLED	0x01	/* POS 2, set if adapter enabled */
+
+/* Max number of adapters, including both slots and various integrated
+ * things.
+ */
+#define MCA_NUMADAPTERS (MCA_MAX_SLOT_NR+3)
+
+#endif
diff --git a/include/asm-i386/mca_dma.h b/include/asm-i386/mca_dma.h
new file mode 100644
index 0000000..4b3b526
--- /dev/null
+++ b/include/asm-i386/mca_dma.h
@@ -0,0 +1,202 @@
+#ifndef MCA_DMA_H
+#define MCA_DMA_H
+
+#include <asm/io.h>
+#include <linux/ioport.h>
+
+/*
+ * Microchannel specific DMA stuff.  DMA on an MCA machine is fairly similar to
+ *   standard PC dma, but it certainly has its quirks.  DMA register addresses
+ *   are in a different place and there are some added functions.  Most of this
+ *   should be pretty obvious on inspection.  Note that the user must divide
+ *   count by 2 when using 16-bit dma; that is not handled by these functions.
+ *
+ * Ramen Noodles are yummy.
+ * 
+ *  1998 Tymm Twillman <tymm@computer.org>  
+ */
+
+/*
+ * Registers that are used by the DMA controller; FN is the function register 
+ *   (tell the controller what to do) and EXE is the execution register (how
+ *   to do it)
+ */
+
+#define MCA_DMA_REG_FN  0x18
+#define MCA_DMA_REG_EXE 0x1A 
+
+/*
+ * Functions that the DMA controller can do
+ */
+
+#define MCA_DMA_FN_SET_IO       0x00
+#define MCA_DMA_FN_SET_ADDR     0x20
+#define MCA_DMA_FN_GET_ADDR     0x30
+#define MCA_DMA_FN_SET_COUNT    0x40
+#define MCA_DMA_FN_GET_COUNT    0x50
+#define MCA_DMA_FN_GET_STATUS   0x60
+#define MCA_DMA_FN_SET_MODE     0x70
+#define MCA_DMA_FN_SET_ARBUS    0x80
+#define MCA_DMA_FN_MASK         0x90
+#define MCA_DMA_FN_RESET_MASK   0xA0
+#define MCA_DMA_FN_MASTER_CLEAR 0xD0
+
+/*
+ * Modes (used by setting MCA_DMA_FN_MODE in the function register)
+ * 
+ * Note that the MODE_READ is read from memory (write to device), and
+ *   MODE_WRITE is vice-versa.  
+ */
+
+#define MCA_DMA_MODE_XFER  0x04  /* read by default */
+#define MCA_DMA_MODE_READ  0x04  /* same as XFER */
+#define MCA_DMA_MODE_WRITE 0x08  /* OR with MODE_XFER to use */
+#define MCA_DMA_MODE_IO    0x01  /* DMA from IO register */
+#define MCA_DMA_MODE_16    0x40  /* 16 bit xfers */
+
+
+/**
+ *	mca_enable_dma	-	channel to enable DMA on
+ *	@dmanr: DMA channel
+ *
+ *	Enable the MCA bus DMA on a channel. This can be called from
+ *	IRQ context.
+ */
+
+static __inline__ void mca_enable_dma(unsigned int dmanr)
+{
+	outb(MCA_DMA_FN_RESET_MASK | dmanr, MCA_DMA_REG_FN);
+}
+
+/**
+ *	mca_disble_dma	-	channel to disable DMA on
+ *	@dmanr: DMA channel
+ *
+ *	Enable the MCA bus DMA on a channel. This can be called from
+ *	IRQ context.
+ */
+
+static __inline__ void mca_disable_dma(unsigned int dmanr)
+{
+	outb(MCA_DMA_FN_MASK | dmanr, MCA_DMA_REG_FN);
+}
+
+/**
+ *	mca_set_dma_addr -	load a 24bit DMA address
+ *	@dmanr: DMA channel
+ *	@a: 24bit bus address
+ *
+ *	Load the address register in the DMA controller. This has a 24bit
+ *	limitation (16Mb). 
+ */
+
+static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a)
+{
+	outb(MCA_DMA_FN_SET_ADDR | dmanr, MCA_DMA_REG_FN);
+	outb(a & 0xff, MCA_DMA_REG_EXE);
+	outb((a >> 8) & 0xff, MCA_DMA_REG_EXE);
+	outb((a >> 16) & 0xff, MCA_DMA_REG_EXE);
+}
+
+/**
+ *	mca_get_dma_addr -	load a 24bit DMA address
+ *	@dmanr: DMA channel
+ *
+ *	Read the address register in the DMA controller. This has a 24bit
+ *	limitation (16Mb). The return is a bus address.
+ */
+
+static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr)
+{
+	unsigned int addr;
+
+	outb(MCA_DMA_FN_GET_ADDR | dmanr, MCA_DMA_REG_FN);
+	addr = inb(MCA_DMA_REG_EXE);
+	addr |= inb(MCA_DMA_REG_EXE) << 8;
+	addr |= inb(MCA_DMA_REG_EXE) << 16;  
+
+	return addr;
+}
+
+/**
+ *	mca_set_dma_count -	load a 16bit transfer count
+ *	@dmanr: DMA channel
+ *	@count: count
+ *
+ *	Set the DMA count for this channel. This can be up to 64Kbytes.
+ *	Setting a count of zero will not do what you expect.
+ */
+
+static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count)
+{
+	count--;  /* transfers one more than count -- correct for this */
+
+	outb(MCA_DMA_FN_SET_COUNT | dmanr, MCA_DMA_REG_FN);
+	outb(count & 0xff, MCA_DMA_REG_EXE);
+	outb((count >> 8) & 0xff, MCA_DMA_REG_EXE);
+}
+
+/**
+ *	mca_get_dma_residue -	get the remaining bytes to transfer
+ *	@dmanr: DMA channel
+ *
+ *	This function returns the number of bytes left to transfer
+ *	on this DMA channel.
+ */
+
+static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr)
+{
+	unsigned short count;
+
+	outb(MCA_DMA_FN_GET_COUNT | dmanr, MCA_DMA_REG_FN);
+	count = 1 + inb(MCA_DMA_REG_EXE);
+	count += inb(MCA_DMA_REG_EXE) << 8;
+
+	return count;
+}
+
+/**
+ *	mca_set_dma_io -	set the port for an I/O transfer
+ *	@dmanr: DMA channel
+ *	@io_addr: an I/O port number
+ *
+ *	Unlike the ISA bus DMA controllers the DMA on MCA bus can transfer
+ *	with an I/O port target.
+ */
+
+static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr)
+{
+	/*
+	 * DMA from a port address -- set the io address
+	 */
+	
+	outb(MCA_DMA_FN_SET_IO | dmanr, MCA_DMA_REG_FN);
+	outb(io_addr & 0xff, MCA_DMA_REG_EXE);
+	outb((io_addr >>  8) & 0xff, MCA_DMA_REG_EXE);
+}
+
+/**
+ *	mca_set_dma_mode -	set the DMA mode
+ *	@dmanr: DMA channel
+ *	@mode: mode to set
+ *
+ *	The DMA controller supports several modes. The mode values you can
+ *	set are :
+ *
+ *	%MCA_DMA_MODE_READ when reading from the DMA device.
+ *
+ *	%MCA_DMA_MODE_WRITE to writing to the DMA device.
+ *
+ *	%MCA_DMA_MODE_IO to do DMA to or from an I/O port.
+ *
+ *	%MCA_DMA_MODE_16 to do 16bit transfers.
+ *
+ */
+
+static __inline__ void mca_set_dma_mode(unsigned int dmanr, unsigned int mode)
+{
+	outb(MCA_DMA_FN_SET_MODE | dmanr, MCA_DMA_REG_FN);
+	outb(mode, MCA_DMA_REG_EXE);
+}
+
+#endif /* MCA_DMA_H */
diff --git a/include/asm-i386/mman.h b/include/asm-i386/mman.h
new file mode 100644
index 0000000..196619a
--- /dev/null
+++ b/include/asm-i386/mman.h
@@ -0,0 +1,43 @@
+#ifndef __I386_MMAN_H__
+#define __I386_MMAN_H__
+
+#define PROT_READ	0x1		/* page can be read */
+#define PROT_WRITE	0x2		/* page can be written */
+#define PROT_EXEC	0x4		/* page can be executed */
+#define PROT_SEM	0x8		/* page may be used for atomic ops */
+#define PROT_NONE	0x0		/* page can not be accessed */
+#define PROT_GROWSDOWN	0x01000000	/* mprotect flag: extend change to start of growsdown vma */
+#define PROT_GROWSUP	0x02000000	/* mprotect flag: extend change to end of growsup vma */
+
+#define MAP_SHARED	0x01		/* Share changes */
+#define MAP_PRIVATE	0x02		/* Changes are private */
+#define MAP_TYPE	0x0f		/* Mask for type of mapping */
+#define MAP_FIXED	0x10		/* Interpret addr exactly */
+#define MAP_ANONYMOUS	0x20		/* don't use a file */
+
+#define MAP_GROWSDOWN	0x0100		/* stack-like segment */
+#define MAP_DENYWRITE	0x0800		/* ETXTBSY */
+#define MAP_EXECUTABLE	0x1000		/* mark it as an executable */
+#define MAP_LOCKED	0x2000		/* pages are locked */
+#define MAP_NORESERVE	0x4000		/* don't check for reservations */
+#define MAP_POPULATE	0x8000		/* populate (prefault) pagetables */
+#define MAP_NONBLOCK	0x10000		/* do not block on IO */
+
+#define MS_ASYNC	1		/* sync memory asynchronously */
+#define MS_INVALIDATE	2		/* invalidate the caches */
+#define MS_SYNC		4		/* synchronous memory sync */
+
+#define MCL_CURRENT	1		/* lock all current mappings */
+#define MCL_FUTURE	2		/* lock all future mappings */
+
+#define MADV_NORMAL	0x0		/* default page-in behavior */
+#define MADV_RANDOM	0x1		/* page-in minimum required */
+#define MADV_SEQUENTIAL	0x2		/* read-ahead aggressively */
+#define MADV_WILLNEED	0x3		/* pre-fault pages */
+#define MADV_DONTNEED	0x4		/* discard these pages */
+
+/* compatibility flags */
+#define MAP_ANON	MAP_ANONYMOUS
+#define MAP_FILE	0
+
+#endif /* __I386_MMAN_H__ */
diff --git a/include/asm-i386/mmu.h b/include/asm-i386/mmu.h
new file mode 100644
index 0000000..f431a0b
--- /dev/null
+++ b/include/asm-i386/mmu.h
@@ -0,0 +1,17 @@
+#ifndef __i386_MMU_H
+#define __i386_MMU_H
+
+#include <asm/semaphore.h>
+/*
+ * The i386 doesn't have a mmu context, but
+ * we put the segment information here.
+ *
+ * cpu_vm_mask is used to optimize ldt flushing.
+ */
+typedef struct { 
+	int size;
+	struct semaphore sem;
+	void *ldt;
+} mm_context_t;
+
+#endif
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
new file mode 100644
index 0000000..bf08218
--- /dev/null
+++ b/include/asm-i386/mmu_context.h
@@ -0,0 +1,72 @@
+#ifndef __I386_SCHED_H
+#define __I386_SCHED_H
+
+#include <linux/config.h>
+#include <asm/desc.h>
+#include <asm/atomic.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+/*
+ * Used for LDT copy/destruction.
+ */
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+void destroy_context(struct mm_struct *mm);
+
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+#ifdef CONFIG_SMP
+	unsigned cpu = smp_processor_id();
+	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
+		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
+#endif
+}
+
+static inline void switch_mm(struct mm_struct *prev,
+			     struct mm_struct *next,
+			     struct task_struct *tsk)
+{
+	int cpu = smp_processor_id();
+
+	if (likely(prev != next)) {
+		/* stop flush ipis for the previous mm */
+		cpu_clear(cpu, prev->cpu_vm_mask);
+#ifdef CONFIG_SMP
+		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
+		per_cpu(cpu_tlbstate, cpu).active_mm = next;
+#endif
+		cpu_set(cpu, next->cpu_vm_mask);
+
+		/* Re-load page tables */
+		load_cr3(next->pgd);
+
+		/*
+		 * load the LDT, if the LDT is different:
+		 */
+		if (unlikely(prev->context.ldt != next->context.ldt))
+			load_LDT_nolock(&next->context, cpu);
+	}
+#ifdef CONFIG_SMP
+	else {
+		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
+		BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
+
+		if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
+			/* We were in lazy tlb mode and leave_mm disabled 
+			 * tlb flush IPI delivery. We must reload %cr3.
+			 */
+			load_cr3(next->pgd);
+			load_LDT_nolock(&next->context, cpu);
+		}
+	}
+#endif
+}
+
+#define deactivate_mm(tsk, mm) \
+	asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
+
+#define activate_mm(prev, next) \
+	switch_mm((prev),(next),NULL)
+
+#endif
diff --git a/include/asm-i386/mmx.h b/include/asm-i386/mmx.h
new file mode 100644
index 0000000..46b71da
--- /dev/null
+++ b/include/asm-i386/mmx.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_MMX_H
+#define _ASM_MMX_H
+
+/*
+ *	MMX 3Dnow! helper operations
+ */
+
+#include <linux/types.h>
+ 
+extern void *_mmx_memcpy(void *to, const void *from, size_t size);
+extern void mmx_clear_page(void *page);
+extern void mmx_copy_page(void *to, void *from);
+
+#endif
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
new file mode 100644
index 0000000..13830ae
--- /dev/null
+++ b/include/asm-i386/mmzone.h
@@ -0,0 +1,147 @@
+/*
+ * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002
+ *
+ */
+
+#ifndef _ASM_MMZONE_H_
+#define _ASM_MMZONE_H_
+
+#include <asm/smp.h>
+
+#ifdef CONFIG_DISCONTIGMEM
+
+#ifdef CONFIG_NUMA
+	#ifdef CONFIG_X86_NUMAQ
+		#include <asm/numaq.h>
+	#else	/* summit or generic arch */
+		#include <asm/srat.h>
+	#endif
+#else /* !CONFIG_NUMA */
+	#define get_memcfg_numa get_memcfg_numa_flat
+	#define get_zholes_size(n) (0)
+#endif /* CONFIG_NUMA */
+
+extern struct pglist_data *node_data[];
+#define NODE_DATA(nid)		(node_data[nid])
+
+/*
+ * generic node memory support, the following assumptions apply:
+ *
+ * 1) memory comes in 256Mb contigious chunks which are either present or not
+ * 2) we will not have more than 64Gb in total
+ *
+ * for now assume that 64Gb is max amount of RAM for whole system
+ *    64Gb / 4096bytes/page = 16777216 pages
+ */
+#define MAX_NR_PAGES 16777216
+#define MAX_ELEMENTS 256
+#define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS)
+
+extern s8 physnode_map[];
+
+static inline int pfn_to_nid(unsigned long pfn)
+{
+#ifdef CONFIG_NUMA
+	return((int) physnode_map[(pfn) / PAGES_PER_ELEMENT]);
+#else
+	return 0;
+#endif
+}
+
+/*
+ * Following are macros that are specific to this numa platform.
+ */
+#define reserve_bootmem(addr, size) \
+	reserve_bootmem_node(NODE_DATA(0), (addr), (size))
+#define alloc_bootmem(x) \
+	__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_low(x) \
+	__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0)
+#define alloc_bootmem_pages(x) \
+	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_low_pages(x) \
+	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
+#define alloc_bootmem_node(ignore, x) \
+	__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_pages_node(ignore, x) \
+	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_low_pages_node(ignore, x) \
+	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
+
+#define node_localnr(pfn, nid)		((pfn) - node_data[nid]->node_start_pfn)
+
+/*
+ * Following are macros that each numa implmentation must define.
+ */
+
+/*
+ * Given a kernel address, find the home node of the underlying memory.
+ */
+#define kvaddr_to_nid(kaddr)	pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
+
+#define node_mem_map(nid)	(NODE_DATA(nid)->node_mem_map)
+#define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
+#define node_end_pfn(nid)						\
+({									\
+	pg_data_t *__pgdat = NODE_DATA(nid);				\
+	__pgdat->node_start_pfn + __pgdat->node_spanned_pages;		\
+})
+
+#define local_mapnr(kvaddr)						\
+({									\
+	unsigned long __pfn = __pa(kvaddr) >> PAGE_SHIFT;		\
+	(__pfn - node_start_pfn(pfn_to_nid(__pfn)));			\
+})
+
+/* XXX: FIXME -- wli */
+#define kern_addr_valid(kaddr)	(0)
+
+#define pfn_to_page(pfn)						\
+({									\
+	unsigned long __pfn = pfn;					\
+	int __node  = pfn_to_nid(__pfn);				\
+	&node_mem_map(__node)[node_localnr(__pfn,__node)];		\
+})
+
+#define page_to_pfn(pg)							\
+({									\
+	struct page *__page = pg;					\
+	struct zone *__zone = page_zone(__page);			\
+	(unsigned long)(__page - __zone->zone_mem_map)			\
+		+ __zone->zone_start_pfn;				\
+})
+
+#ifdef CONFIG_X86_NUMAQ            /* we have contiguous memory on NUMA-Q */
+#define pfn_valid(pfn)          ((pfn) < num_physpages)
+#else
+static inline int pfn_valid(int pfn)
+{
+	int nid = pfn_to_nid(pfn);
+
+	if (nid >= 0)
+		return (pfn < node_end_pfn(nid));
+	return 0;
+}
+#endif
+
+extern int get_memcfg_numa_flat(void );
+/*
+ * This allows any one NUMA architecture to be compiled
+ * for, and still fall back to the flat function if it
+ * fails.
+ */
+static inline void get_memcfg_numa(void)
+{
+#ifdef CONFIG_X86_NUMAQ
+	if (get_memcfg_numaq())
+		return;
+#elif CONFIG_ACPI_SRAT
+	if (get_memcfg_from_srat())
+		return;
+#endif
+
+	get_memcfg_numa_flat();
+}
+
+#endif /* CONFIG_DISCONTIGMEM */
+#endif /* _ASM_MMZONE_H_ */
diff --git a/include/asm-i386/module.h b/include/asm-i386/module.h
new file mode 100644
index 0000000..508865e
--- /dev/null
+++ b/include/asm-i386/module.h
@@ -0,0 +1,75 @@
+#ifndef _ASM_I386_MODULE_H
+#define _ASM_I386_MODULE_H
+
+/* x86 is simple */
+struct mod_arch_specific
+{
+};
+
+#define Elf_Shdr Elf32_Shdr
+#define Elf_Sym Elf32_Sym
+#define Elf_Ehdr Elf32_Ehdr
+
+#ifdef CONFIG_M386
+#define MODULE_PROC_FAMILY "386 "
+#elif defined CONFIG_M486
+#define MODULE_PROC_FAMILY "486 "
+#elif defined CONFIG_M586
+#define MODULE_PROC_FAMILY "586 "
+#elif defined CONFIG_M586TSC
+#define MODULE_PROC_FAMILY "586TSC "
+#elif defined CONFIG_M586MMX
+#define MODULE_PROC_FAMILY "586MMX "
+#elif defined CONFIG_M686
+#define MODULE_PROC_FAMILY "686 "
+#elif defined CONFIG_MPENTIUMII
+#define MODULE_PROC_FAMILY "PENTIUMII "
+#elif defined CONFIG_MPENTIUMIII
+#define MODULE_PROC_FAMILY "PENTIUMIII "
+#elif defined CONFIG_MPENTIUMM
+#define MODULE_PROC_FAMILY "PENTIUMM "
+#elif defined CONFIG_MPENTIUM4
+#define MODULE_PROC_FAMILY "PENTIUM4 "
+#elif defined CONFIG_MK6
+#define MODULE_PROC_FAMILY "K6 "
+#elif defined CONFIG_MK7
+#define MODULE_PROC_FAMILY "K7 "
+#elif defined CONFIG_MK8
+#define MODULE_PROC_FAMILY "K8 "
+#elif defined CONFIG_X86_ELAN
+#define MODULE_PROC_FAMILY "ELAN "
+#elif defined CONFIG_MCRUSOE
+#define MODULE_PROC_FAMILY "CRUSOE "
+#elif defined CONFIG_MEFFICEON
+#define MODULE_PROC_FAMILY "EFFICEON "
+#elif defined CONFIG_MWINCHIPC6
+#define MODULE_PROC_FAMILY "WINCHIPC6 "
+#elif defined CONFIG_MWINCHIP2
+#define MODULE_PROC_FAMILY "WINCHIP2 "
+#elif defined CONFIG_MWINCHIP3D
+#define MODULE_PROC_FAMILY "WINCHIP3D "
+#elif defined CONFIG_MCYRIXIII
+#define MODULE_PROC_FAMILY "CYRIXIII "
+#elif defined CONFIG_MVIAC3_2
+#define MODULE_PROC_FAMILY "VIAC3-2 "
+#elif CONFIG_MGEODE
+#define MODULE_PROC_FAMILY "GEODE "
+#else
+#error unknown processor family
+#endif
+
+#ifdef CONFIG_REGPARM
+#define MODULE_REGPARM "REGPARM "
+#else
+#define MODULE_REGPARM ""
+#endif
+
+#ifdef CONFIG_4KSTACKS
+#define MODULE_STACKSIZE "4KSTACKS "
+#else
+#define MODULE_STACKSIZE ""
+#endif
+
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM MODULE_STACKSIZE
+
+#endif /* _ASM_I386_MODULE_H */
diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h
new file mode 100644
index 0000000..d9fafba
--- /dev/null
+++ b/include/asm-i386/mpspec.h
@@ -0,0 +1,83 @@
+#ifndef __ASM_MPSPEC_H
+#define __ASM_MPSPEC_H
+
+#include <linux/cpumask.h>
+#include <asm/mpspec_def.h>
+#include <mach_mpspec.h>
+
+extern int mp_bus_id_to_type [MAX_MP_BUSSES];
+extern int mp_bus_id_to_node [MAX_MP_BUSSES];
+extern int mp_bus_id_to_local [MAX_MP_BUSSES];
+extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
+extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
+
+extern unsigned int boot_cpu_physical_apicid;
+extern int smp_found_config;
+extern void find_smp_config (void);
+extern void get_smp_config (void);
+extern int nr_ioapics;
+extern int apic_version [MAX_APICS];
+extern int mp_bus_id_to_type [MAX_MP_BUSSES];
+extern int mp_irq_entries;
+extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES];
+extern int mpc_default_type;
+extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
+extern unsigned long mp_lapic_addr;
+extern int pic_mode;
+extern int using_apic_timer;
+
+#ifdef CONFIG_ACPI_BOOT
+extern void mp_register_lapic (u8 id, u8 enabled);
+extern void mp_register_lapic_address (u64 address);
+extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base);
+extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi);
+extern void mp_config_acpi_legacy_irqs (void);
+extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low);
+#endif /*CONFIG_ACPI_BOOT*/
+
+#define PHYSID_ARRAY_SIZE	BITS_TO_LONGS(MAX_APICS)
+
+struct physid_mask
+{
+	unsigned long mask[PHYSID_ARRAY_SIZE];
+};
+
+typedef struct physid_mask physid_mask_t;
+
+#define physid_set(physid, map)			set_bit(physid, (map).mask)
+#define physid_clear(physid, map)		clear_bit(physid, (map).mask)
+#define physid_isset(physid, map)		test_bit(physid, (map).mask)
+#define physid_test_and_set(physid, map)	test_and_set_bit(physid, (map).mask)
+
+#define physids_and(dst, src1, src2)		bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
+#define physids_or(dst, src1, src2)		bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
+#define physids_clear(map)			bitmap_zero((map).mask, MAX_APICS)
+#define physids_complement(dst, src)		bitmap_complement((dst).mask,(src).mask, MAX_APICS)
+#define physids_empty(map)			bitmap_empty((map).mask, MAX_APICS)
+#define physids_equal(map1, map2)		bitmap_equal((map1).mask, (map2).mask, MAX_APICS)
+#define physids_weight(map)			bitmap_weight((map).mask, MAX_APICS)
+#define physids_shift_right(d, s, n)		bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS)
+#define physids_shift_left(d, s, n)		bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS)
+#define physids_coerce(map)			((map).mask[0])
+
+#define physids_promote(physids)						\
+	({									\
+		physid_mask_t __physid_mask = PHYSID_MASK_NONE;			\
+		__physid_mask.mask[0] = physids;				\
+		__physid_mask;							\
+	})
+
+#define physid_mask_of_physid(physid)						\
+	({									\
+		physid_mask_t __physid_mask = PHYSID_MASK_NONE;			\
+		physid_set(physid, __physid_mask);				\
+		__physid_mask;							\
+	})
+
+#define PHYSID_MASK_ALL		{ {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} }
+#define PHYSID_MASK_NONE	{ {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }
+
+extern physid_mask_t phys_cpu_present_map;
+
+#endif
+
diff --git a/include/asm-i386/mpspec_def.h b/include/asm-i386/mpspec_def.h
new file mode 100644
index 0000000..a961093
--- /dev/null
+++ b/include/asm-i386/mpspec_def.h
@@ -0,0 +1,188 @@
+#ifndef __ASM_MPSPEC_DEF_H
+#define __ASM_MPSPEC_DEF_H
+
+/*
+ * Structure definitions for SMP machines following the
+ * Intel Multiprocessing Specification 1.1 and 1.4.
+ */
+
+/*
+ * This tag identifies where the SMP configuration
+ * information is. 
+ */
+ 
+#define SMP_MAGIC_IDENT	(('_'<<24)|('P'<<16)|('M'<<8)|'_')
+
+#define MAX_MPC_ENTRY 1024
+#define MAX_APICS      256
+
+struct intel_mp_floating
+{
+	char mpf_signature[4];		/* "_MP_" 			*/
+	unsigned long mpf_physptr;	/* Configuration table address	*/
+	unsigned char mpf_length;	/* Our length (paragraphs)	*/
+	unsigned char mpf_specification;/* Specification version	*/
+	unsigned char mpf_checksum;	/* Checksum (makes sum 0)	*/
+	unsigned char mpf_feature1;	/* Standard or configuration ? 	*/
+	unsigned char mpf_feature2;	/* Bit7 set for IMCR|PIC	*/
+	unsigned char mpf_feature3;	/* Unused (0)			*/
+	unsigned char mpf_feature4;	/* Unused (0)			*/
+	unsigned char mpf_feature5;	/* Unused (0)			*/
+};
+
+struct mp_config_table
+{
+	char mpc_signature[4];
+#define MPC_SIGNATURE "PCMP"
+	unsigned short mpc_length;	/* Size of table */
+	char  mpc_spec;			/* 0x01 */
+	char  mpc_checksum;
+	char  mpc_oem[8];
+	char  mpc_productid[12];
+	unsigned long mpc_oemptr;	/* 0 if not present */
+	unsigned short mpc_oemsize;	/* 0 if not present */
+	unsigned short mpc_oemcount;
+	unsigned long mpc_lapic;	/* APIC address */
+	unsigned long reserved;
+};
+
+/* Followed by entries */
+
+#define	MP_PROCESSOR	0
+#define	MP_BUS		1
+#define	MP_IOAPIC	2
+#define	MP_INTSRC	3
+#define	MP_LINTSRC	4
+#define	MP_TRANSLATION  192  /* Used by IBM NUMA-Q to describe node locality */
+
+struct mpc_config_processor
+{
+	unsigned char mpc_type;
+	unsigned char mpc_apicid;	/* Local APIC number */
+	unsigned char mpc_apicver;	/* Its versions */
+	unsigned char mpc_cpuflag;
+#define CPU_ENABLED		1	/* Processor is available */
+#define CPU_BOOTPROCESSOR	2	/* Processor is the BP */
+	unsigned long mpc_cpufeature;		
+#define CPU_STEPPING_MASK 0x0F
+#define CPU_MODEL_MASK	0xF0
+#define CPU_FAMILY_MASK	0xF00
+	unsigned long mpc_featureflag;	/* CPUID feature value */
+	unsigned long mpc_reserved[2];
+};
+
+struct mpc_config_bus
+{
+	unsigned char mpc_type;
+	unsigned char mpc_busid;
+	unsigned char mpc_bustype[6] __attribute((packed));
+};
+
+/* List of Bus Type string values, Intel MP Spec. */
+#define BUSTYPE_EISA	"EISA"
+#define BUSTYPE_ISA	"ISA"
+#define BUSTYPE_INTERN	"INTERN"	/* Internal BUS */
+#define BUSTYPE_MCA	"MCA"
+#define BUSTYPE_VL	"VL"		/* Local bus */
+#define BUSTYPE_PCI	"PCI"
+#define BUSTYPE_PCMCIA	"PCMCIA"
+#define BUSTYPE_CBUS	"CBUS"
+#define BUSTYPE_CBUSII	"CBUSII"
+#define BUSTYPE_FUTURE	"FUTURE"
+#define BUSTYPE_MBI	"MBI"
+#define BUSTYPE_MBII	"MBII"
+#define BUSTYPE_MPI	"MPI"
+#define BUSTYPE_MPSA	"MPSA"
+#define BUSTYPE_NUBUS	"NUBUS"
+#define BUSTYPE_TC	"TC"
+#define BUSTYPE_VME	"VME"
+#define BUSTYPE_XPRESS	"XPRESS"
+#define BUSTYPE_NEC98	"NEC98"
+
+struct mpc_config_ioapic
+{
+	unsigned char mpc_type;
+	unsigned char mpc_apicid;
+	unsigned char mpc_apicver;
+	unsigned char mpc_flags;
+#define MPC_APIC_USABLE		0x01
+	unsigned long mpc_apicaddr;
+};
+
+struct mpc_config_intsrc
+{
+	unsigned char mpc_type;
+	unsigned char mpc_irqtype;
+	unsigned short mpc_irqflag;
+	unsigned char mpc_srcbus;
+	unsigned char mpc_srcbusirq;
+	unsigned char mpc_dstapic;
+	unsigned char mpc_dstirq;
+};
+
+enum mp_irq_source_types {
+	mp_INT = 0,
+	mp_NMI = 1,
+	mp_SMI = 2,
+	mp_ExtINT = 3
+};
+
+#define MP_IRQDIR_DEFAULT	0
+#define MP_IRQDIR_HIGH		1
+#define MP_IRQDIR_LOW		3
+
+
+struct mpc_config_lintsrc
+{
+	unsigned char mpc_type;
+	unsigned char mpc_irqtype;
+	unsigned short mpc_irqflag;
+	unsigned char mpc_srcbusid;
+	unsigned char mpc_srcbusirq;
+	unsigned char mpc_destapic;	
+#define MP_APIC_ALL	0xFF
+	unsigned char mpc_destapiclint;
+};
+
+struct mp_config_oemtable
+{
+	char oem_signature[4];
+#define MPC_OEM_SIGNATURE "_OEM"
+	unsigned short oem_length;	/* Size of table */
+	char  oem_rev;			/* 0x01 */
+	char  oem_checksum;
+	char  mpc_oem[8];
+};
+
+struct mpc_config_translation
+{
+        unsigned char mpc_type;
+        unsigned char trans_len;
+        unsigned char trans_type;
+        unsigned char trans_quad;
+        unsigned char trans_global;
+        unsigned char trans_local;
+        unsigned short trans_reserved;
+};
+
+/*
+ *	Default configurations
+ *
+ *	1	2 CPU ISA 82489DX
+ *	2	2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining
+ *	3	2 CPU EISA 82489DX
+ *	4	2 CPU MCA 82489DX
+ *	5	2 CPU ISA+PCI
+ *	6	2 CPU EISA+PCI
+ *	7	2 CPU MCA+PCI
+ */
+
+enum mp_bustype {
+	MP_BUS_ISA = 1,
+	MP_BUS_EISA,
+	MP_BUS_PCI,
+	MP_BUS_MCA,
+	MP_BUS_NEC98
+};
+#endif
+
diff --git a/include/asm-i386/msgbuf.h b/include/asm-i386/msgbuf.h
new file mode 100644
index 0000000..b8d659c
--- /dev/null
+++ b/include/asm-i386/msgbuf.h
@@ -0,0 +1,31 @@
+#ifndef _I386_MSGBUF_H
+#define _I386_MSGBUF_H
+
+/* 
+ * The msqid64_ds structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct msqid64_ds {
+	struct ipc64_perm msg_perm;
+	__kernel_time_t msg_stime;	/* last msgsnd time */
+	unsigned long	__unused1;
+	__kernel_time_t msg_rtime;	/* last msgrcv time */
+	unsigned long	__unused2;
+	__kernel_time_t msg_ctime;	/* last change time */
+	unsigned long	__unused3;
+	unsigned long  msg_cbytes;	/* current number of bytes on queue */
+	unsigned long  msg_qnum;	/* number of messages in queue */
+	unsigned long  msg_qbytes;	/* max number of bytes on queue */
+	__kernel_pid_t msg_lspid;	/* pid of last msgsnd */
+	__kernel_pid_t msg_lrpid;	/* last receive pid */
+	unsigned long  __unused4;
+	unsigned long  __unused5;
+};
+
+#endif /* _I386_MSGBUF_H */
diff --git a/include/asm-i386/msi.h b/include/asm-i386/msi.h
new file mode 100644
index 0000000..b853930
--- /dev/null
+++ b/include/asm-i386/msi.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2003-2004 Intel
+ * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
+ */
+
+#ifndef ASM_MSI_H
+#define ASM_MSI_H
+
+#include <asm/desc.h>
+#include <mach_apic.h>
+
+#define LAST_DEVICE_VECTOR		232
+#define MSI_DEST_MODE			MSI_LOGICAL_MODE
+#define MSI_TARGET_CPU_SHIFT		12
+
+#ifdef CONFIG_SMP
+#define MSI_TARGET_CPU		logical_smp_processor_id()
+#else
+#define MSI_TARGET_CPU	cpu_to_logical_apicid(first_cpu(cpu_online_map))
+#endif
+
+#endif /* ASM_MSI_H */
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
new file mode 100644
index 0000000..c76fce8
--- /dev/null
+++ b/include/asm-i386/msr.h
@@ -0,0 +1,272 @@
+#ifndef __ASM_MSR_H
+#define __ASM_MSR_H
+
+/*
+ * Access to machine-specific registers (available on 586 and better only)
+ * Note: the rd* operations modify the parameters directly (without using
+ * pointer indirection), this allows gcc to optimize better
+ */
+
+#define rdmsr(msr,val1,val2) \
+	__asm__ __volatile__("rdmsr" \
+			  : "=a" (val1), "=d" (val2) \
+			  : "c" (msr))
+
+#define wrmsr(msr,val1,val2) \
+	__asm__ __volatile__("wrmsr" \
+			  : /* no outputs */ \
+			  : "c" (msr), "a" (val1), "d" (val2))
+
+#define rdmsrl(msr,val) do { \
+	unsigned long l__,h__; \
+	rdmsr (msr, l__, h__);  \
+	val = l__;  \
+	val |= ((u64)h__<<32);  \
+} while(0)
+
+static inline void wrmsrl (unsigned long msr, unsigned long long val)
+{
+	unsigned long lo, hi;
+	lo = (unsigned long) val;
+	hi = val >> 32;
+	wrmsr (msr, lo, hi);
+}
+
+/* wrmsr with exception handling */
+#define wrmsr_safe(msr,a,b) ({ int ret__;						\
+	asm volatile("2: wrmsr ; xorl %0,%0\n"						\
+		     "1:\n\t"								\
+		     ".section .fixup,\"ax\"\n\t"					\
+		     "3:  movl %4,%0 ; jmp 1b\n\t"					\
+		     ".previous\n\t"							\
+ 		     ".section __ex_table,\"a\"\n"					\
+		     "   .align 4\n\t"							\
+		     "   .long 	2b,3b\n\t"						\
+		     ".previous"							\
+		     : "=a" (ret__)							\
+		     : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\
+	ret__; })
+
+#define rdtsc(low,high) \
+     __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
+
+#define rdtscl(low) \
+     __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
+
+#define rdtscll(val) \
+     __asm__ __volatile__("rdtsc" : "=A" (val))
+
+#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
+
+#define rdpmc(counter,low,high) \
+     __asm__ __volatile__("rdpmc" \
+			  : "=a" (low), "=d" (high) \
+			  : "c" (counter))
+
+/* symbolic names for some interesting MSRs */
+/* Intel defined MSRs. */
+#define MSR_IA32_P5_MC_ADDR		0
+#define MSR_IA32_P5_MC_TYPE		1
+#define MSR_IA32_PLATFORM_ID		0x17
+#define MSR_IA32_EBL_CR_POWERON		0x2a
+
+#define MSR_IA32_APICBASE		0x1b
+#define MSR_IA32_APICBASE_BSP		(1<<8)
+#define MSR_IA32_APICBASE_ENABLE	(1<<11)
+#define MSR_IA32_APICBASE_BASE		(0xfffff<<12)
+
+#define MSR_IA32_UCODE_WRITE		0x79
+#define MSR_IA32_UCODE_REV		0x8b
+
+#define MSR_P6_PERFCTR0		0xc1
+#define MSR_P6_PERFCTR1		0xc2
+
+#define MSR_IA32_BBL_CR_CTL		0x119
+
+#define MSR_IA32_SYSENTER_CS		0x174
+#define MSR_IA32_SYSENTER_ESP		0x175
+#define MSR_IA32_SYSENTER_EIP		0x176
+
+#define MSR_IA32_MCG_CAP		0x179
+#define MSR_IA32_MCG_STATUS		0x17a
+#define MSR_IA32_MCG_CTL		0x17b
+
+/* P4/Xeon+ specific */
+#define MSR_IA32_MCG_EAX		0x180
+#define MSR_IA32_MCG_EBX		0x181
+#define MSR_IA32_MCG_ECX		0x182
+#define MSR_IA32_MCG_EDX		0x183
+#define MSR_IA32_MCG_ESI		0x184
+#define MSR_IA32_MCG_EDI		0x185
+#define MSR_IA32_MCG_EBP		0x186
+#define MSR_IA32_MCG_ESP		0x187
+#define MSR_IA32_MCG_EFLAGS		0x188
+#define MSR_IA32_MCG_EIP		0x189
+#define MSR_IA32_MCG_RESERVED		0x18A
+
+#define MSR_P6_EVNTSEL0			0x186
+#define MSR_P6_EVNTSEL1			0x187
+
+#define MSR_IA32_PERF_STATUS		0x198
+#define MSR_IA32_PERF_CTL		0x199
+
+#define MSR_IA32_THERM_CONTROL		0x19a
+#define MSR_IA32_THERM_INTERRUPT	0x19b
+#define MSR_IA32_THERM_STATUS		0x19c
+#define MSR_IA32_MISC_ENABLE		0x1a0
+
+#define MSR_IA32_DEBUGCTLMSR		0x1d9
+#define MSR_IA32_LASTBRANCHFROMIP	0x1db
+#define MSR_IA32_LASTBRANCHTOIP		0x1dc
+#define MSR_IA32_LASTINTFROMIP		0x1dd
+#define MSR_IA32_LASTINTTOIP		0x1de
+
+#define MSR_IA32_MC0_CTL		0x400
+#define MSR_IA32_MC0_STATUS		0x401
+#define MSR_IA32_MC0_ADDR		0x402
+#define MSR_IA32_MC0_MISC		0x403
+
+/* Pentium IV performance counter MSRs */
+#define MSR_P4_BPU_PERFCTR0 		0x300
+#define MSR_P4_BPU_PERFCTR1 		0x301
+#define MSR_P4_BPU_PERFCTR2 		0x302
+#define MSR_P4_BPU_PERFCTR3 		0x303
+#define MSR_P4_MS_PERFCTR0 		0x304
+#define MSR_P4_MS_PERFCTR1 		0x305
+#define MSR_P4_MS_PERFCTR2 		0x306
+#define MSR_P4_MS_PERFCTR3 		0x307
+#define MSR_P4_FLAME_PERFCTR0 		0x308
+#define MSR_P4_FLAME_PERFCTR1 		0x309
+#define MSR_P4_FLAME_PERFCTR2 		0x30a
+#define MSR_P4_FLAME_PERFCTR3 		0x30b
+#define MSR_P4_IQ_PERFCTR0 		0x30c
+#define MSR_P4_IQ_PERFCTR1 		0x30d
+#define MSR_P4_IQ_PERFCTR2 		0x30e
+#define MSR_P4_IQ_PERFCTR3 		0x30f
+#define MSR_P4_IQ_PERFCTR4 		0x310
+#define MSR_P4_IQ_PERFCTR5 		0x311
+#define MSR_P4_BPU_CCCR0 		0x360
+#define MSR_P4_BPU_CCCR1 		0x361
+#define MSR_P4_BPU_CCCR2 		0x362
+#define MSR_P4_BPU_CCCR3 		0x363
+#define MSR_P4_MS_CCCR0 		0x364
+#define MSR_P4_MS_CCCR1 		0x365
+#define MSR_P4_MS_CCCR2 		0x366
+#define MSR_P4_MS_CCCR3 		0x367
+#define MSR_P4_FLAME_CCCR0 		0x368
+#define MSR_P4_FLAME_CCCR1 		0x369
+#define MSR_P4_FLAME_CCCR2 		0x36a
+#define MSR_P4_FLAME_CCCR3 		0x36b
+#define MSR_P4_IQ_CCCR0 		0x36c
+#define MSR_P4_IQ_CCCR1 		0x36d
+#define MSR_P4_IQ_CCCR2 		0x36e
+#define MSR_P4_IQ_CCCR3 		0x36f
+#define MSR_P4_IQ_CCCR4 		0x370
+#define MSR_P4_IQ_CCCR5 		0x371
+#define MSR_P4_ALF_ESCR0 		0x3ca
+#define MSR_P4_ALF_ESCR1 		0x3cb
+#define MSR_P4_BPU_ESCR0 		0x3b2
+#define MSR_P4_BPU_ESCR1 		0x3b3
+#define MSR_P4_BSU_ESCR0 		0x3a0
+#define MSR_P4_BSU_ESCR1 		0x3a1
+#define MSR_P4_CRU_ESCR0 		0x3b8
+#define MSR_P4_CRU_ESCR1 		0x3b9
+#define MSR_P4_CRU_ESCR2 		0x3cc
+#define MSR_P4_CRU_ESCR3 		0x3cd
+#define MSR_P4_CRU_ESCR4 		0x3e0
+#define MSR_P4_CRU_ESCR5 		0x3e1
+#define MSR_P4_DAC_ESCR0 		0x3a8
+#define MSR_P4_DAC_ESCR1 		0x3a9
+#define MSR_P4_FIRM_ESCR0 		0x3a4
+#define MSR_P4_FIRM_ESCR1 		0x3a5
+#define MSR_P4_FLAME_ESCR0 		0x3a6
+#define MSR_P4_FLAME_ESCR1 		0x3a7
+#define MSR_P4_FSB_ESCR0 		0x3a2
+#define MSR_P4_FSB_ESCR1 		0x3a3
+#define MSR_P4_IQ_ESCR0 		0x3ba
+#define MSR_P4_IQ_ESCR1 		0x3bb
+#define MSR_P4_IS_ESCR0 		0x3b4
+#define MSR_P4_IS_ESCR1 		0x3b5
+#define MSR_P4_ITLB_ESCR0 		0x3b6
+#define MSR_P4_ITLB_ESCR1 		0x3b7
+#define MSR_P4_IX_ESCR0 		0x3c8
+#define MSR_P4_IX_ESCR1 		0x3c9
+#define MSR_P4_MOB_ESCR0 		0x3aa
+#define MSR_P4_MOB_ESCR1 		0x3ab
+#define MSR_P4_MS_ESCR0 		0x3c0
+#define MSR_P4_MS_ESCR1 		0x3c1
+#define MSR_P4_PMH_ESCR0 		0x3ac
+#define MSR_P4_PMH_ESCR1 		0x3ad
+#define MSR_P4_RAT_ESCR0 		0x3bc
+#define MSR_P4_RAT_ESCR1 		0x3bd
+#define MSR_P4_SAAT_ESCR0 		0x3ae
+#define MSR_P4_SAAT_ESCR1 		0x3af
+#define MSR_P4_SSU_ESCR0 		0x3be
+#define MSR_P4_SSU_ESCR1 		0x3bf    /* guess: not defined in manual */
+#define MSR_P4_TBPU_ESCR0 		0x3c2
+#define MSR_P4_TBPU_ESCR1 		0x3c3
+#define MSR_P4_TC_ESCR0 		0x3c4
+#define MSR_P4_TC_ESCR1 		0x3c5
+#define MSR_P4_U2L_ESCR0 		0x3b0
+#define MSR_P4_U2L_ESCR1 		0x3b1
+
+/* AMD Defined MSRs */
+#define MSR_K6_EFER			0xC0000080
+#define MSR_K6_STAR			0xC0000081
+#define MSR_K6_WHCR			0xC0000082
+#define MSR_K6_UWCCR			0xC0000085
+#define MSR_K6_EPMR			0xC0000086
+#define MSR_K6_PSOR			0xC0000087
+#define MSR_K6_PFIR			0xC0000088
+
+#define MSR_K7_EVNTSEL0			0xC0010000
+#define MSR_K7_EVNTSEL1			0xC0010001
+#define MSR_K7_EVNTSEL2			0xC0010002
+#define MSR_K7_EVNTSEL3			0xC0010003
+#define MSR_K7_PERFCTR0			0xC0010004
+#define MSR_K7_PERFCTR1			0xC0010005
+#define MSR_K7_PERFCTR2			0xC0010006
+#define MSR_K7_PERFCTR3			0xC0010007
+#define MSR_K7_HWCR			0xC0010015
+#define MSR_K7_CLK_CTL			0xC001001b
+#define MSR_K7_FID_VID_CTL		0xC0010041
+#define MSR_K7_FID_VID_STATUS		0xC0010042
+
+/* extended feature register */
+#define MSR_EFER 			0xc0000080
+
+/* EFER bits: */
+
+/* Execute Disable enable */
+#define _EFER_NX			11
+#define EFER_NX				(1<<_EFER_NX)
+
+/* Centaur-Hauls/IDT defined MSRs. */
+#define MSR_IDT_FCR1			0x107
+#define MSR_IDT_FCR2			0x108
+#define MSR_IDT_FCR3			0x109
+#define MSR_IDT_FCR4			0x10a
+
+#define MSR_IDT_MCR0			0x110
+#define MSR_IDT_MCR1			0x111
+#define MSR_IDT_MCR2			0x112
+#define MSR_IDT_MCR3			0x113
+#define MSR_IDT_MCR4			0x114
+#define MSR_IDT_MCR5			0x115
+#define MSR_IDT_MCR6			0x116
+#define MSR_IDT_MCR7			0x117
+#define MSR_IDT_MCR_CTRL		0x120
+
+/* VIA Cyrix defined MSRs*/
+#define MSR_VIA_FCR			0x1107
+#define MSR_VIA_LONGHAUL		0x110a
+#define MSR_VIA_RNG			0x110b
+#define MSR_VIA_BCR2			0x1147
+
+/* Transmeta defined MSRs */
+#define MSR_TMTA_LONGRUN_CTRL		0x80868010
+#define MSR_TMTA_LONGRUN_FLAGS		0x80868011
+#define MSR_TMTA_LRTI_READOUT		0x80868018
+#define MSR_TMTA_LRTI_VOLT_MHZ		0x8086801a
+
+#endif /* __ASM_MSR_H */
diff --git a/include/asm-i386/mtrr.h b/include/asm-i386/mtrr.h
new file mode 100644
index 0000000..5b6ceda
--- /dev/null
+++ b/include/asm-i386/mtrr.h
@@ -0,0 +1,107 @@
+/*  Generic MTRR (Memory Type Range Register) ioctls.
+
+    Copyright (C) 1997-1999  Richard Gooch
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Library General Public
+    License as published by the Free Software Foundation; either
+    version 2 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Library General Public License for more details.
+
+    You should have received a copy of the GNU Library General Public
+    License along with this library; if not, write to the Free
+    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+    Richard Gooch may be reached by email at  rgooch@atnf.csiro.au
+    The postal address is:
+      Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
+*/
+#ifndef _LINUX_MTRR_H
+#define _LINUX_MTRR_H
+
+#include <linux/config.h>
+#include <linux/ioctl.h>
+
+#define	MTRR_IOCTL_BASE	'M'
+
+struct mtrr_sentry
+{
+    unsigned long base;    /*  Base address     */
+    unsigned int size;    /*  Size of region   */
+    unsigned int type;     /*  Type of region   */
+};
+
+struct mtrr_gentry
+{
+    unsigned int regnum;   /*  Register number  */
+    unsigned long base;    /*  Base address     */
+    unsigned int size;    /*  Size of region   */
+    unsigned int type;     /*  Type of region   */
+};
+
+/*  These are the various ioctls  */
+#define MTRRIOC_ADD_ENTRY        _IOW(MTRR_IOCTL_BASE,  0, struct mtrr_sentry)
+#define MTRRIOC_SET_ENTRY        _IOW(MTRR_IOCTL_BASE,  1, struct mtrr_sentry)
+#define MTRRIOC_DEL_ENTRY        _IOW(MTRR_IOCTL_BASE,  2, struct mtrr_sentry)
+#define MTRRIOC_GET_ENTRY        _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
+#define MTRRIOC_KILL_ENTRY       _IOW(MTRR_IOCTL_BASE,  4, struct mtrr_sentry)
+#define MTRRIOC_ADD_PAGE_ENTRY   _IOW(MTRR_IOCTL_BASE,  5, struct mtrr_sentry)
+#define MTRRIOC_SET_PAGE_ENTRY   _IOW(MTRR_IOCTL_BASE,  6, struct mtrr_sentry)
+#define MTRRIOC_DEL_PAGE_ENTRY   _IOW(MTRR_IOCTL_BASE,  7, struct mtrr_sentry)
+#define MTRRIOC_GET_PAGE_ENTRY   _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
+#define MTRRIOC_KILL_PAGE_ENTRY  _IOW(MTRR_IOCTL_BASE,  9, struct mtrr_sentry)
+
+/*  These are the region types  */
+#define MTRR_TYPE_UNCACHABLE 0
+#define MTRR_TYPE_WRCOMB     1
+/*#define MTRR_TYPE_         2*/
+/*#define MTRR_TYPE_         3*/
+#define MTRR_TYPE_WRTHROUGH  4
+#define MTRR_TYPE_WRPROT     5
+#define MTRR_TYPE_WRBACK     6
+#define MTRR_NUM_TYPES       7
+
+#ifdef __KERNEL__
+
+/*  The following functions are for use by other drivers  */
+# ifdef CONFIG_MTRR
+extern int mtrr_add (unsigned long base, unsigned long size,
+		     unsigned int type, char increment);
+extern int mtrr_add_page (unsigned long base, unsigned long size,
+		     unsigned int type, char increment);
+extern int mtrr_del (int reg, unsigned long base, unsigned long size);
+extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
+extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
+#  else
+static __inline__ int mtrr_add (unsigned long base, unsigned long size,
+				unsigned int type, char increment)
+{
+    return -ENODEV;
+}
+static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
+				unsigned int type, char increment)
+{
+    return -ENODEV;
+}
+static __inline__ int mtrr_del (int reg, unsigned long base,
+				unsigned long size)
+{
+    return -ENODEV;
+}
+static __inline__ int mtrr_del_page (int reg, unsigned long base,
+				unsigned long size)
+{
+    return -ENODEV;
+}
+
+static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;}
+
+#  endif
+
+#endif
+
+#endif  /*  _LINUX_MTRR_H  */
diff --git a/include/asm-i386/namei.h b/include/asm-i386/namei.h
new file mode 100644
index 0000000..8148650
--- /dev/null
+++ b/include/asm-i386/namei.h
@@ -0,0 +1,17 @@
+/* $Id: namei.h,v 1.1 1996/12/13 14:48:21 jj Exp $
+ * linux/include/asm-i386/namei.h
+ *
+ * Included from linux/fs/namei.c
+ */
+
+#ifndef __I386_NAMEI_H
+#define __I386_NAMEI_H
+
+/* This dummy routine maybe changed to something useful
+ * for /usr/gnemul/ emulation stuff.
+ * Look at asm-sparc/namei.h for details.
+ */
+
+#define __emul_prefix() NULL
+
+#endif /* __I386_NAMEI_H */
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h
new file mode 100644
index 0000000..21f1663
--- /dev/null
+++ b/include/asm-i386/nmi.h
@@ -0,0 +1,28 @@
+/*
+ *  linux/include/asm-i386/nmi.h
+ */
+#ifndef ASM_NMI_H
+#define ASM_NMI_H
+
+#include <linux/pm.h>
+ 
+struct pt_regs;
+ 
+typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
+ 
+/** 
+ * set_nmi_callback
+ *
+ * Set a handler for an NMI. Only one handler may be
+ * set. Return 1 if the NMI was handled.
+ */
+void set_nmi_callback(nmi_callback_t callback);
+ 
+/** 
+ * unset_nmi_callback
+ *
+ * Remove the handler previously set.
+ */
+void unset_nmi_callback(void);
+ 
+#endif /* ASM_NMI_H */
diff --git a/include/asm-i386/node.h b/include/asm-i386/node.h
new file mode 100644
index 0000000..e13c6ff
--- /dev/null
+++ b/include/asm-i386/node.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_I386_NODE_H_
+#define _ASM_I386_NODE_H_
+
+#include <linux/device.h>
+#include <linux/mmzone.h>
+#include <linux/node.h>
+#include <linux/topology.h>
+#include <linux/nodemask.h>
+
+struct i386_node {
+	struct node node;
+};
+extern struct i386_node node_devices[MAX_NUMNODES];
+
+static inline int arch_register_node(int num){
+	int p_node;
+	struct node *parent = NULL;
+
+	if (!node_online(num))
+		return 0;
+	p_node = parent_node(num);
+
+	if (p_node != num)
+		parent = &node_devices[p_node].node;
+
+	return register_node(&node_devices[num].node, num, parent);
+}
+
+#endif /* _ASM_I386_NODE_H_ */
diff --git a/include/asm-i386/numaq.h b/include/asm-i386/numaq.h
new file mode 100644
index 0000000..38f710d
--- /dev/null
+++ b/include/asm-i386/numaq.h
@@ -0,0 +1,164 @@
+/*
+ * Written by: Patricia Gaughen, IBM Corporation
+ *
+ * Copyright (C) 2002, IBM Corp.
+ *
+ * All rights reserved.          
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <gone@us.ibm.com>
+ */
+
+#ifndef NUMAQ_H
+#define NUMAQ_H
+
+#ifdef CONFIG_X86_NUMAQ
+
+extern int get_memcfg_numaq(void);
+
+/*
+ * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the
+ */
+#define SYS_CFG_DATA_PRIV_ADDR		0x0009d000 /* place for scd in private quad space */
+
+/*
+ * Communication area for each processor on lynxer-processor tests.
+ *
+ * NOTE: If you change the size of this eachproc structure you need
+ *       to change the definition for EACH_QUAD_SIZE.
+ */
+struct eachquadmem {
+	unsigned int	priv_mem_start;		/* Starting address of this */
+						/* quad's private memory. */
+						/* This is always 0. */
+						/* In MB. */
+	unsigned int	priv_mem_size;		/* Size of this quad's */
+						/* private memory. */
+						/* In MB. */
+	unsigned int	low_shrd_mem_strp_start;/* Starting address of this */
+						/* quad's low shared block */
+						/* (untranslated). */
+						/* In MB. */
+	unsigned int	low_shrd_mem_start;	/* Starting address of this */
+						/* quad's low shared memory */
+						/* (untranslated). */
+						/* In MB. */
+	unsigned int	low_shrd_mem_size;	/* Size of this quad's low */
+						/* shared memory. */
+						/* In MB. */
+	unsigned int	lmmio_copb_start;	/* Starting address of this */
+						/* quad's local memory */
+						/* mapped I/O in the */
+						/* compatibility OPB. */
+						/* In MB. */
+	unsigned int	lmmio_copb_size;	/* Size of this quad's local */
+						/* memory mapped I/O in the */
+						/* compatibility OPB. */
+						/* In MB. */
+	unsigned int	lmmio_nopb_start;	/* Starting address of this */
+						/* quad's local memory */
+						/* mapped I/O in the */
+						/* non-compatibility OPB. */
+						/* In MB. */
+	unsigned int	lmmio_nopb_size;	/* Size of this quad's local */
+						/* memory mapped I/O in the */
+						/* non-compatibility OPB. */
+						/* In MB. */
+	unsigned int	io_apic_0_start;	/* Starting address of I/O */
+						/* APIC 0. */
+	unsigned int	io_apic_0_sz;		/* Size I/O APIC 0. */
+	unsigned int	io_apic_1_start;	/* Starting address of I/O */
+						/* APIC 1. */
+	unsigned int	io_apic_1_sz;		/* Size I/O APIC 1. */
+	unsigned int	hi_shrd_mem_start;	/* Starting address of this */
+						/* quad's high shared memory.*/
+						/* In MB. */
+	unsigned int	hi_shrd_mem_size;	/* Size of this quad's high */
+						/* shared memory. */
+						/* In MB. */
+	unsigned int	mps_table_addr;		/* Address of this quad's */
+						/* MPS tables from BIOS, */
+						/* in system space.*/
+	unsigned int	lcl_MDC_pio_addr;	/* Port-I/O address for */
+						/* local access of MDC. */
+	unsigned int	rmt_MDC_mmpio_addr;	/* MM-Port-I/O address for */
+						/* remote access of MDC. */
+	unsigned int	mm_port_io_start;	/* Starting address of this */
+						/* quad's memory mapped Port */
+						/* I/O space. */
+	unsigned int	mm_port_io_size;	/* Size of this quad's memory*/
+						/* mapped Port I/O space. */
+	unsigned int	mm_rmt_io_apic_start;	/* Starting address of this */
+						/* quad's memory mapped */
+						/* remote I/O APIC space. */
+	unsigned int	mm_rmt_io_apic_size;	/* Size of this quad's memory*/
+						/* mapped remote I/O APIC */
+						/* space. */
+	unsigned int	mm_isa_start;		/* Starting address of this */
+						/* quad's memory mapped ISA */
+						/* space (contains MDC */
+						/* memory space). */
+	unsigned int	mm_isa_size;		/* Size of this quad's memory*/
+						/* mapped ISA space (contains*/
+						/* MDC memory space). */
+	unsigned int	rmt_qmi_addr;		/* Remote addr to access QMI.*/
+	unsigned int	lcl_qmi_addr;		/* Local addr to access QMI. */
+};
+
+/*
+ * Note: This structure must be NOT be changed unless the multiproc and
+ * OS are changed to reflect the new structure.
+ */
+struct sys_cfg_data {
+	unsigned int	quad_id;
+	unsigned int	bsp_proc_id; /* Boot Strap Processor in this quad. */
+	unsigned int	scd_version; /* Version number of this table. */
+	unsigned int	first_quad_id;
+	unsigned int	quads_present31_0; /* 1 bit for each quad */
+	unsigned int	quads_present63_32; /* 1 bit for each quad */
+	unsigned int	config_flags;
+	unsigned int	boot_flags;
+	unsigned int	csr_start_addr; /* Absolute value (not in MB) */
+	unsigned int	csr_size; /* Absolute value (not in MB) */
+	unsigned int	lcl_apic_start_addr; /* Absolute value (not in MB) */
+	unsigned int	lcl_apic_size; /* Absolute value (not in MB) */
+	unsigned int	low_shrd_mem_base; /* 0 or 512MB or 1GB */
+	unsigned int	low_shrd_mem_quad_offset; /* 0,128M,256M,512M,1G */
+					/* may not be totally populated */
+	unsigned int	split_mem_enbl; /* 0 for no low shared memory */ 
+	unsigned int	mmio_sz; /* Size of total system memory mapped I/O */
+				 /* (in MB). */
+	unsigned int	quad_spin_lock; /* Spare location used for quad */
+					/* bringup. */
+	unsigned int	nonzero55; /* For checksumming. */
+	unsigned int	nonzeroaa; /* For checksumming. */
+	unsigned int	scd_magic_number;
+	unsigned int	system_type;
+	unsigned int	checksum;
+	/*
+	 *	memory configuration area for each quad
+	 */
+        struct	eachquadmem eq[MAX_NUMNODES];	/* indexed by quad id */
+};
+
+static inline unsigned long *get_zholes_size(int nid)
+{
+	return NULL;
+}
+#endif /* CONFIG_X86_NUMAQ */
+#endif /* NUMAQ_H */
+
diff --git a/include/asm-i386/numnodes.h b/include/asm-i386/numnodes.h
new file mode 100644
index 0000000..a61f38c
--- /dev/null
+++ b/include/asm-i386/numnodes.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_MAX_NUMNODES_H
+#define _ASM_MAX_NUMNODES_H
+
+#include <linux/config.h>
+
+#ifdef CONFIG_X86_NUMAQ
+
+/* Max 16 Nodes */
+#define NODES_SHIFT	4
+
+#elif defined(CONFIG_ACPI_SRAT)
+
+/* Max 8 Nodes */
+#define NODES_SHIFT	3
+
+#endif /* CONFIG_X86_NUMAQ */
+
+#endif /* _ASM_MAX_NUMNODES_H */
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
new file mode 100644
index 0000000..ed13969
--- /dev/null
+++ b/include/asm-i386/page.h
@@ -0,0 +1,153 @@
+#ifndef _I386_PAGE_H
+#define _I386_PAGE_H
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT	12
+#define PAGE_SIZE	(1UL << PAGE_SHIFT)
+#define PAGE_MASK	(~(PAGE_SIZE-1))
+
+#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
+#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <linux/config.h>
+
+#ifdef CONFIG_X86_USE_3DNOW
+
+#include <asm/mmx.h>
+
+#define clear_page(page)	mmx_clear_page((void *)(page))
+#define copy_page(to,from)	mmx_copy_page(to,from)
+
+#else
+
+/*
+ *	On older X86 processors it's not a win to use MMX here it seems.
+ *	Maybe the K6-III ?
+ */
+ 
+#define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
+#define copy_page(to,from)	memcpy((void *)(to), (void *)(from), PAGE_SIZE)
+
+#endif
+
+#define clear_user_page(page, vaddr, pg)	clear_page(page)
+#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
+
+#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+
+/*
+ * These are used to make use of C type-checking..
+ */
+extern int nx_enabled;
+#ifdef CONFIG_X86_PAE
+extern unsigned long long __supported_pte_mask;
+typedef struct { unsigned long pte_low, pte_high; } pte_t;
+typedef struct { unsigned long long pmd; } pmd_t;
+typedef struct { unsigned long long pgd; } pgd_t;
+typedef struct { unsigned long long pgprot; } pgprot_t;
+#define pmd_val(x)	((x).pmd)
+#define pte_val(x)	((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+#define __pmd(x) ((pmd_t) { (x) } )
+#define HPAGE_SHIFT	21
+#else
+typedef struct { unsigned long pte_low; } pte_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+#define boot_pte_t pte_t /* or would you rather have a typedef */
+#define pte_val(x)	((x).pte_low)
+#define HPAGE_SHIFT	22
+#endif
+#define PTE_MASK	PAGE_MASK
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define HPAGE_SIZE	((1UL) << HPAGE_SHIFT)
+#define HPAGE_MASK	(~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#endif
+
+#define pgd_val(x)	((x).pgd)
+#define pgprot_val(x)	((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x)	((pgprot_t) { (x) } )
+
+#endif /* !__ASSEMBLY__ */
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/*
+ * This handles the memory map.. We could make this a config
+ * option, but too many people screw it up, and too few need
+ * it.
+ *
+ * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
+ * a virtual address space of one gigabyte, which limits the
+ * amount of physical memory you can use to about 950MB. 
+ *
+ * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
+ * and CONFIG_HIGHMEM64G options in the kernel configuration.
+ */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This much address space is reserved for vmalloc() and iomap()
+ * as well as fixmap mappings.
+ */
+extern unsigned int __VMALLOC_RESERVE;
+
+/* Pure 2^n version of get_order */
+static __inline__ int get_order(unsigned long size)
+{
+	int order;
+
+	size = (size-1) >> (PAGE_SHIFT-1);
+	order = -1;
+	do {
+		size >>= 1;
+		order++;
+	} while (size);
+	return order;
+}
+
+extern int sysctl_legacy_va_layout;
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef __ASSEMBLY__
+#define __PAGE_OFFSET		(0xC0000000)
+#else
+#define __PAGE_OFFSET		(0xC0000000UL)
+#endif
+
+
+#define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)
+#define VMALLOC_RESERVE		((unsigned long)__VMALLOC_RESERVE)
+#define MAXMEM			(-__PAGE_OFFSET-__VMALLOC_RESERVE)
+#define __pa(x)			((unsigned long)(x)-PAGE_OFFSET)
+#define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
+#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
+#ifndef CONFIG_DISCONTIGMEM
+#define pfn_to_page(pfn)	(mem_map + (pfn))
+#define page_to_pfn(page)	((unsigned long)((page) - mem_map))
+#define pfn_valid(pfn)		((pfn) < max_mapnr)
+#endif /* !CONFIG_DISCONTIGMEM */
+#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+
+#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#define VM_DATA_DEFAULT_FLAGS \
+	(VM_READ | VM_WRITE | \
+	((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
+		 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_PAGE_H */
diff --git a/include/asm-i386/param.h b/include/asm-i386/param.h
new file mode 100644
index 0000000..b644052
--- /dev/null
+++ b/include/asm-i386/param.h
@@ -0,0 +1,23 @@
+#ifndef _ASMi386_PARAM_H
+#define _ASMi386_PARAM_H
+
+#ifdef __KERNEL__
+# define HZ		1000		/* Internal kernel timer frequency */
+# define USER_HZ	100		/* .. some user interfaces are in "ticks" */
+# define CLOCKS_PER_SEC		(USER_HZ)	/* like times() */
+#endif
+
+#ifndef HZ
+#define HZ 100
+#endif
+
+#define EXEC_PAGESIZE	4096
+
+#ifndef NOGROUP
+#define NOGROUP		(-1)
+#endif
+
+#define MAXHOSTNAMELEN	64	/* max length of hostname */
+#define COMMAND_LINE_SIZE 256
+
+#endif
diff --git a/include/asm-i386/parport.h b/include/asm-i386/parport.h
new file mode 100644
index 0000000..fa0e321
--- /dev/null
+++ b/include/asm-i386/parport.h
@@ -0,0 +1,18 @@
+/*
+ * parport.h: ia32-specific parport initialisation
+ *
+ * Copyright (C) 1999, 2000  Tim Waugh <tim@cyberelk.demon.co.uk>
+ *
+ * This file should only be included by drivers/parport/parport_pc.c.
+ */
+
+#ifndef _ASM_I386_PARPORT_H
+#define _ASM_I386_PARPORT_H 1
+
+static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
+static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
+{
+	return parport_pc_find_isa_ports (autoirq, autodma);
+}
+
+#endif /* !(_ASM_I386_PARPORT_H) */
diff --git a/include/asm-i386/pci-direct.h b/include/asm-i386/pci-direct.h
new file mode 100644
index 0000000..4f6738b
--- /dev/null
+++ b/include/asm-i386/pci-direct.h
@@ -0,0 +1 @@
+#include "asm-x86_64/pci-direct.h"
diff --git a/include/asm-i386/pci.h b/include/asm-i386/pci.h
new file mode 100644
index 0000000..fb749b8
--- /dev/null
+++ b/include/asm-i386/pci.h
@@ -0,0 +1,110 @@
+#ifndef __i386_PCI_H
+#define __i386_PCI_H
+
+#include <linux/config.h>
+
+#ifdef __KERNEL__
+#include <linux/mm.h>		/* for struct page */
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+   already-configured bus numbers - to be used for buggy BIOSes
+   or architectures with incomplete PCI setup by the loader */
+
+#ifdef CONFIG_PCI
+extern unsigned int pcibios_assign_all_busses(void);
+#else
+#define pcibios_assign_all_busses()	0
+#endif
+#define pcibios_scan_all_fns(a, b)	0
+
+extern unsigned long pci_mem_start;
+#define PCIBIOS_MIN_IO		0x1000
+#define PCIBIOS_MIN_MEM		(pci_mem_start)
+
+#define PCIBIOS_MIN_CARDBUS_IO	0x4000
+
+void pcibios_config_init(void);
+struct pci_bus * pcibios_scan_root(int bus);
+
+void pcibios_set_master(struct pci_dev *dev);
+void pcibios_penalize_isa_irq(int irq);
+struct irq_routing_table *pcibios_get_irq_routing_table(void);
+int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
+
+/* Dynamic DMA mapping stuff.
+ * i386 has everything mapped statically.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <asm/scatterlist.h>
+#include <linux/string.h>
+#include <asm/io.h>
+
+struct pci_dev;
+
+/* The PCI address space does equal the physical memory
+ * address space.  The networking and block device layers use
+ * this boolean for bounce buffer decisions.
+ */
+#define PCI_DMA_BUS_IS_PHYS	(1)
+
+/* pci_unmap_{page,single} is a nop so... */
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
+#define pci_unmap_addr(PTR, ADDR_NAME)		(0)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)	do { } while (0)
+#define pci_unmap_len(PTR, LEN_NAME)		(0)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)	do { } while (0)
+
+/* This is always fine. */
+#define pci_dac_dma_supported(pci_dev, mask)	(1)
+
+static inline dma64_addr_t
+pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
+{
+	return ((dma64_addr_t) page_to_phys(page) +
+		(dma64_addr_t) offset);
+}
+
+static inline struct page *
+pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
+{
+	return pfn_to_page(dma_addr >> PAGE_SHIFT);
+}
+
+static inline unsigned long
+pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
+{
+	return (dma_addr & ~PAGE_MASK);
+}
+
+static inline void
+pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
+{
+}
+
+static inline void
+pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
+{
+	flush_write_buffers();
+}
+
+#define HAVE_PCI_MMAP
+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+			       enum pci_mmap_state mmap_state, int write_combine);
+
+
+static inline void pcibios_add_platform_entries(struct pci_dev *dev)
+{
+}
+
+#endif /* __KERNEL__ */
+
+/* implement the pci_ DMA API in terms of the generic device dma_ one */
+#include <asm-generic/pci-dma-compat.h>
+
+/* generic pci stuff */
+#include <asm-generic/pci.h>
+
+#endif /* __i386_PCI_H */
diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h
new file mode 100644
index 0000000..5764afa
--- /dev/null
+++ b/include/asm-i386/percpu.h
@@ -0,0 +1,6 @@
+#ifndef __ARCH_I386_PERCPU__
+#define __ARCH_I386_PERCPU__
+
+#include <asm-generic/percpu.h>
+
+#endif /* __ARCH_I386_PERCPU__ */
diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h
new file mode 100644
index 0000000..0380c3d
--- /dev/null
+++ b/include/asm-i386/pgalloc.h
@@ -0,0 +1,50 @@
+#ifndef _I386_PGALLOC_H
+#define _I386_PGALLOC_H
+
+#include <linux/config.h>
+#include <asm/fixmap.h>
+#include <linux/threads.h>
+#include <linux/mm.h>		/* for struct page */
+
+#define pmd_populate_kernel(mm, pmd, pte) \
+		set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
+
+#define pmd_populate(mm, pmd, pte) 				\
+	set_pmd(pmd, __pmd(_PAGE_TABLE +			\
+		((unsigned long long)page_to_pfn(pte) <<	\
+			(unsigned long long) PAGE_SHIFT)))
+/*
+ * Allocate and free page tables.
+ */
+extern pgd_t *pgd_alloc(struct mm_struct *);
+extern void pgd_free(pgd_t *pgd);
+
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
+extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
+
+static inline void pte_free_kernel(pte_t *pte)
+{
+	free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct page *pte)
+{
+	__free_page(pte);
+}
+
+
+#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
+
+#ifdef CONFIG_X86_PAE
+/*
+ * In the PAE case we free the pmds as part of the pgd.
+ */
+#define pmd_alloc_one(mm, addr)		({ BUG(); ((pmd_t *)2); })
+#define pmd_free(x)			do { } while (0)
+#define __pmd_free_tlb(tlb,x)		do { } while (0)
+#define pud_populate(mm, pmd, pte)	BUG()
+#endif
+
+#define check_pgt_cache()	do { } while (0)
+
+#endif /* _I386_PGALLOC_H */
diff --git a/include/asm-i386/pgtable-2level-defs.h b/include/asm-i386/pgtable-2level-defs.h
new file mode 100644
index 0000000..0251807
--- /dev/null
+++ b/include/asm-i386/pgtable-2level-defs.h
@@ -0,0 +1,18 @@
+#ifndef _I386_PGTABLE_2LEVEL_DEFS_H
+#define _I386_PGTABLE_2LEVEL_DEFS_H
+
+/*
+ * traditional i386 two-level paging structure:
+ */
+
+#define PGDIR_SHIFT	22
+#define PTRS_PER_PGD	1024
+
+/*
+ * the i386 is two-level, so we don't really have any
+ * PMD directory physically.
+ */
+
+#define PTRS_PER_PTE	1024
+
+#endif /* _I386_PGTABLE_2LEVEL_DEFS_H */
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
new file mode 100644
index 0000000..fa07bd6
--- /dev/null
+++ b/include/asm-i386/pgtable-2level.h
@@ -0,0 +1,69 @@
+#ifndef _I386_PGTABLE_2LEVEL_H
+#define _I386_PGTABLE_2LEVEL_H
+
+#include <asm-generic/pgtable-nopmd.h>
+
+#define pte_ERROR(e) \
+	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
+#define pgd_ERROR(e) \
+	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * Certain architectures need to do special things when PTEs
+ * within a page table are directly modified.  Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
+
+#define ptep_get_and_clear(mm,addr,xp)	__pte(xchg(&(xp)->pte_low, 0))
+#define pte_same(a, b)		((a).pte_low == (b).pte_low)
+#define pte_page(x)		pfn_to_page(pte_pfn(x))
+#define pte_none(x)		(!(x).pte_low)
+#define pte_pfn(x)		((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
+#define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot)	__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+
+#define pmd_page_kernel(pmd) \
+((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+/*
+ * All present user pages are user-executable:
+ */
+static inline int pte_exec(pte_t pte)
+{
+	return pte_user(pte);
+}
+
+/*
+ * All present pages are kernel-executable:
+ */
+static inline int pte_exec_kernel(pte_t pte)
+{
+	return 1;
+}
+
+/*
+ * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
+ * into this range:
+ */
+#define PTE_FILE_MAX_BITS	29
+
+#define pte_to_pgoff(pte) \
+	((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
+
+#define pgoff_to_pte(off) \
+	((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
+
+/* Encode and de-code a swap entry */
+#define __swp_type(x)			(((x).val >> 1) & 0x1f)
+#define __swp_offset(x)			((x).val >> 8)
+#define __swp_entry(type, offset)	((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
+#define __pte_to_swp_entry(pte)		((swp_entry_t) { (pte).pte_low })
+#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
+
+#endif /* _I386_PGTABLE_2LEVEL_H */
diff --git a/include/asm-i386/pgtable-3level-defs.h b/include/asm-i386/pgtable-3level-defs.h
new file mode 100644
index 0000000..eb3a1ea
--- /dev/null
+++ b/include/asm-i386/pgtable-3level-defs.h
@@ -0,0 +1,22 @@
+#ifndef _I386_PGTABLE_3LEVEL_DEFS_H
+#define _I386_PGTABLE_3LEVEL_DEFS_H
+
+/*
+ * PGDIR_SHIFT determines what a top-level page table entry can map
+ */
+#define PGDIR_SHIFT	30
+#define PTRS_PER_PGD	4
+
+/*
+ * PMD_SHIFT determines the size of the area a middle-level
+ * page table can map
+ */
+#define PMD_SHIFT	21
+#define PTRS_PER_PMD	512
+
+/*
+ * entries per page directory level
+ */
+#define PTRS_PER_PTE	512
+
+#endif /* _I386_PGTABLE_3LEVEL_DEFS_H */
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
new file mode 100644
index 0000000..d609f9c
--- /dev/null
+++ b/include/asm-i386/pgtable-3level.h
@@ -0,0 +1,160 @@
+#ifndef _I386_PGTABLE_3LEVEL_H
+#define _I386_PGTABLE_3LEVEL_H
+
+#include <asm-generic/pgtable-nopud.h>
+
+/*
+ * Intel Physical Address Extension (PAE) Mode - three-level page
+ * tables on PPro+ CPUs.
+ *
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ */
+
+#define pte_ERROR(e) \
+	printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
+#define pmd_ERROR(e) \
+	printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
+#define pgd_ERROR(e) \
+	printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
+
+#define pud_none(pud)				0
+#define pud_bad(pud)				0
+#define pud_present(pud)			1
+
+/*
+ * Is the pte executable?
+ */
+static inline int pte_x(pte_t pte)
+{
+	return !(pte_val(pte) & _PAGE_NX);
+}
+
+/*
+ * All present user-pages with !NX bit are user-executable:
+ */
+static inline int pte_exec(pte_t pte)
+{
+	return pte_user(pte) && pte_x(pte);
+}
+/*
+ * All present pages with !NX bit are kernel-executable:
+ */
+static inline int pte_exec_kernel(pte_t pte)
+{
+	return pte_x(pte);
+}
+
+/* Rules for using set_pte: the pte being assigned *must* be
+ * either not present or in a state where the hardware will
+ * not attempt to update the pte.  In places where this is
+ * not possible, use pte_get_and_clear to obtain the old pte
+ * value and then use set_pte to update it.  -ben
+ */
+static inline void set_pte(pte_t *ptep, pte_t pte)
+{
+	ptep->pte_high = pte.pte_high;
+	smp_wmb();
+	ptep->pte_low = pte.pte_low;
+}
+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+
+#define __HAVE_ARCH_SET_PTE_ATOMIC
+#define set_pte_atomic(pteptr,pteval) \
+		set_64bit((unsigned long long *)(pteptr),pte_val(pteval))
+#define set_pmd(pmdptr,pmdval) \
+		set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval))
+#define set_pud(pudptr,pudval) \
+		set_64bit((unsigned long long *)(pudptr),pud_val(pudval))
+
+/*
+ * Pentium-II erratum A13: in PAE mode we explicitly have to flush
+ * the TLB via cr3 if the top-level pgd is changed...
+ * We do not let the generic code free and clear pgd entries due to
+ * this erratum.
+ */
+static inline void pud_clear (pud_t * pud) { }
+
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+
+#define pmd_page_kernel(pmd) \
+((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+#define pud_page(pud) \
+((struct page *) __va(pud_val(pud) & PAGE_MASK))
+
+#define pud_page_kernel(pud) \
+((unsigned long) __va(pud_val(pud) & PAGE_MASK))
+
+
+/* Find an entry in the second-level page table.. */
+#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
+			pmd_index(address))
+
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+	pte_t res;
+
+	/* xchg acts as a barrier before the setting of the high bits */
+	res.pte_low = xchg(&ptep->pte_low, 0);
+	res.pte_high = ptep->pte_high;
+	ptep->pte_high = 0;
+
+	return res;
+}
+
+static inline int pte_same(pte_t a, pte_t b)
+{
+	return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
+}
+
+#define pte_page(x)	pfn_to_page(pte_pfn(x))
+
+static inline int pte_none(pte_t pte)
+{
+	return !pte.pte_low && !pte.pte_high;
+}
+
+static inline unsigned long pte_pfn(pte_t pte)
+{
+	return (pte.pte_low >> PAGE_SHIFT) |
+		(pte.pte_high << (32 - PAGE_SHIFT));
+}
+
+extern unsigned long long __supported_pte_mask;
+
+static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
+{
+	pte_t pte;
+
+	pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
+					(pgprot_val(pgprot) >> 32);
+	pte.pte_high &= (__supported_pte_mask >> 32);
+	pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
+							__supported_pte_mask;
+	return pte;
+}
+
+static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
+{
+	return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | \
+			pgprot_val(pgprot)) & __supported_pte_mask);
+}
+
+/*
+ * Bits 0, 6 and 7 are taken in the low part of the pte,
+ * put the 32 bits of offset into the high part.
+ */
+#define pte_to_pgoff(pte) ((pte).pte_high)
+#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
+#define PTE_FILE_MAX_BITS       32
+
+/* Encode and de-code a swap entry */
+#define __swp_type(x)			(((x).val) & 0x1f)
+#define __swp_offset(x)			((x).val >> 5)
+#define __swp_entry(type, offset)	((swp_entry_t){(type) | (offset) << 5})
+#define __pte_to_swp_entry(pte)		((swp_entry_t){ (pte).pte_high })
+#define __swp_entry_to_pte(x)		((pte_t){ 0, (x).val })
+
+#define __pmd_free_tlb(tlb, x)		do { } while (0)
+
+#endif /* _I386_PGTABLE_3LEVEL_H */
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
new file mode 100644
index 0000000..488c2b4
--- /dev/null
+++ b/include/asm-i386/pgtable.h
@@ -0,0 +1,422 @@
+#ifndef _I386_PGTABLE_H
+#define _I386_PGTABLE_H
+
+#include <linux/config.h>
+
+/*
+ * The Linux memory management assumes a three-level page table setup. On
+ * the i386, we use that, but "fold" the mid level into the top-level page
+ * table, so that we physically have the same two-level page table as the
+ * i386 mmu expects.
+ *
+ * This file contains the functions and defines necessary to modify and use
+ * the i386 page table tree.
+ */
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#include <asm/fixmap.h>
+#include <linux/threads.h>
+
+#ifndef _I386_BITOPS_H
+#include <asm/bitops.h>
+#endif
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+extern unsigned long empty_zero_page[1024];
+extern pgd_t swapper_pg_dir[1024];
+extern kmem_cache_t *pgd_cache;
+extern kmem_cache_t *pmd_cache;
+extern spinlock_t pgd_lock;
+extern struct page *pgd_list;
+
+void pmd_ctor(void *, kmem_cache_t *, unsigned long);
+void pgd_ctor(void *, kmem_cache_t *, unsigned long);
+void pgd_dtor(void *, kmem_cache_t *, unsigned long);
+void pgtable_cache_init(void);
+void paging_init(void);
+
+/*
+ * The Linux x86 paging architecture is 'compile-time dual-mode', it
+ * implements both the traditional 2-level x86 page tables and the
+ * newer 3-level PAE-mode page tables.
+ */
+#ifdef CONFIG_X86_PAE
+# include <asm/pgtable-3level-defs.h>
+# define PMD_SIZE	(1UL << PMD_SHIFT)
+# define PMD_MASK	(~(PMD_SIZE-1))
+#else
+# include <asm/pgtable-2level-defs.h>
+#endif
+
+#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
+#define PGDIR_MASK	(~(PGDIR_SIZE-1))
+
+#define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_PGD_NR	0
+
+#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
+#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
+
+#define TWOLEVEL_PGDIR_SHIFT	22
+#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
+#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
+
+/* Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts.  That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ */
+#define VMALLOC_OFFSET	(8*1024*1024)
+#define VMALLOC_START	(((unsigned long) high_memory + vmalloc_earlyreserve + \
+			2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
+#ifdef CONFIG_HIGHMEM
+# define VMALLOC_END	(PKMAP_BASE-2*PAGE_SIZE)
+#else
+# define VMALLOC_END	(FIXADDR_START-2*PAGE_SIZE)
+#endif
+
+/*
+ * The 4MB page is guessing..  Detailed in the infamous "Chapter H"
+ * of the Pentium details, but assuming intel did the straightforward
+ * thing, this bit set in the page directory entry just means that
+ * the page directory entry points directly to a 4MB-aligned block of
+ * memory. 
+ */
+#define _PAGE_BIT_PRESENT	0
+#define _PAGE_BIT_RW		1
+#define _PAGE_BIT_USER		2
+#define _PAGE_BIT_PWT		3
+#define _PAGE_BIT_PCD		4
+#define _PAGE_BIT_ACCESSED	5
+#define _PAGE_BIT_DIRTY		6
+#define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page, Pentium+, if present.. */
+#define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
+#define _PAGE_BIT_UNUSED1	9	/* available for programmer */
+#define _PAGE_BIT_UNUSED2	10
+#define _PAGE_BIT_UNUSED3	11
+#define _PAGE_BIT_NX		63
+
+#define _PAGE_PRESENT	0x001
+#define _PAGE_RW	0x002
+#define _PAGE_USER	0x004
+#define _PAGE_PWT	0x008
+#define _PAGE_PCD	0x010
+#define _PAGE_ACCESSED	0x020
+#define _PAGE_DIRTY	0x040
+#define _PAGE_PSE	0x080	/* 4 MB (or 2MB) page, Pentium+, if present.. */
+#define _PAGE_GLOBAL	0x100	/* Global TLB entry PPro+ */
+#define _PAGE_UNUSED1	0x200	/* available for programmer */
+#define _PAGE_UNUSED2	0x400
+#define _PAGE_UNUSED3	0x800
+
+#define _PAGE_FILE	0x040	/* set:pagecache unset:swap */
+#define _PAGE_PROTNONE	0x080	/* If not present */
+#ifdef CONFIG_X86_PAE
+#define _PAGE_NX	(1ULL<<_PAGE_BIT_NX)
+#else
+#define _PAGE_NX	0
+#endif
+
+#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_CHG_MASK	(PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define PAGE_NONE \
+	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED \
+	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
+
+#define PAGE_SHARED_EXEC \
+	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY_NOEXEC \
+	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_COPY_EXEC \
+	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY \
+	PAGE_COPY_NOEXEC
+#define PAGE_READONLY \
+	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_READONLY_EXEC \
+	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+
+#define _PAGE_KERNEL \
+	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
+#define _PAGE_KERNEL_EXEC \
+	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+
+extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
+#define __PAGE_KERNEL_RO		(__PAGE_KERNEL & ~_PAGE_RW)
+#define __PAGE_KERNEL_NOCACHE		(__PAGE_KERNEL | _PAGE_PCD)
+#define __PAGE_KERNEL_LARGE		(__PAGE_KERNEL | _PAGE_PSE)
+#define __PAGE_KERNEL_LARGE_EXEC	(__PAGE_KERNEL_EXEC | _PAGE_PSE)
+
+#define PAGE_KERNEL		__pgprot(__PAGE_KERNEL)
+#define PAGE_KERNEL_RO		__pgprot(__PAGE_KERNEL_RO)
+#define PAGE_KERNEL_EXEC	__pgprot(__PAGE_KERNEL_EXEC)
+#define PAGE_KERNEL_NOCACHE	__pgprot(__PAGE_KERNEL_NOCACHE)
+#define PAGE_KERNEL_LARGE	__pgprot(__PAGE_KERNEL_LARGE)
+#define PAGE_KERNEL_LARGE_EXEC	__pgprot(__PAGE_KERNEL_LARGE_EXEC)
+
+/*
+ * The i386 can't do page protection for execute, and considers that
+ * the same are read. Also, write permissions imply read permissions.
+ * This is the closest we can get..
+ */
+#define __P000	PAGE_NONE
+#define __P001	PAGE_READONLY
+#define __P010	PAGE_COPY
+#define __P011	PAGE_COPY
+#define __P100	PAGE_READONLY_EXEC
+#define __P101	PAGE_READONLY_EXEC
+#define __P110	PAGE_COPY_EXEC
+#define __P111	PAGE_COPY_EXEC
+
+#define __S000	PAGE_NONE
+#define __S001	PAGE_READONLY
+#define __S010	PAGE_SHARED
+#define __S011	PAGE_SHARED
+#define __S100	PAGE_READONLY_EXEC
+#define __S101	PAGE_READONLY_EXEC
+#define __S110	PAGE_SHARED_EXEC
+#define __S111	PAGE_SHARED_EXEC
+
+/*
+ * Define this if things work differently on an i386 and an i486:
+ * it will (on an i486) warn about kernel memory accesses that are
+ * done without a 'verify_area(VERIFY_WRITE,..)'
+ */
+#undef TEST_VERIFY_AREA
+
+/* The boot page tables (all created as a single array) */
+extern unsigned long pg0[];
+
+#define pte_present(x)	((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pte_clear(mm,addr,xp)	do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
+
+#define pmd_none(x)	(!pmd_val(x))
+#define pmd_present(x)	(pmd_val(x) & _PAGE_PRESENT)
+#define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
+#define	pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+
+
+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_user(pte_t pte)		{ return (pte).pte_low & _PAGE_USER; }
+static inline int pte_read(pte_t pte)		{ return (pte).pte_low & _PAGE_USER; }
+static inline int pte_dirty(pte_t pte)		{ return (pte).pte_low & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte)		{ return (pte).pte_low & _PAGE_ACCESSED; }
+static inline int pte_write(pte_t pte)		{ return (pte).pte_low & _PAGE_RW; }
+
+/*
+ * The following only works if pte_present() is not true.
+ */
+static inline int pte_file(pte_t pte)		{ return (pte).pte_low & _PAGE_FILE; }
+
+static inline pte_t pte_rdprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_USER; return pte; }
+static inline pte_t pte_exprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_USER; return pte; }
+static inline pte_t pte_mkclean(pte_t pte)	{ (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkold(pte_t pte)	{ (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_wrprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_RW; return pte; }
+static inline pte_t pte_mkread(pte_t pte)	{ (pte).pte_low |= _PAGE_USER; return pte; }
+static inline pte_t pte_mkexec(pte_t pte)	{ (pte).pte_low |= _PAGE_USER; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte)	{ (pte).pte_low |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte)	{ (pte).pte_low |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte)	{ (pte).pte_low |= _PAGE_RW; return pte; }
+
+#ifdef CONFIG_X86_PAE
+# include <asm/pgtable-3level.h>
+#else
+# include <asm/pgtable-2level.h>
+#endif
+
+static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+	if (!pte_dirty(*ptep))
+		return 0;
+	return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low);
+}
+
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+	if (!pte_young(*ptep))
+		return 0;
+	return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low);
+}
+
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+	clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
+}
+
+/*
+ * Macro to mark a page protection value as "uncacheable".  On processors which do not support
+ * it, this is a no-op.
+ */
+#define pgprot_noncached(prot)	((boot_cpu_data.x86 > 3)					  \
+				 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
+#define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE)
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+	pte.pte_low &= _PAGE_CHG_MASK;
+	pte.pte_low |= pgprot_val(newprot);
+#ifdef CONFIG_X86_PAE
+	/*
+	 * Chop off the NX bit (if present), and add the NX portion of
+	 * the newprot (if present):
+	 */
+	pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
+	pte.pte_high |= (pgprot_val(newprot) >> 32) & \
+					(__supported_pte_mask >> 32);
+#endif
+	return pte;
+}
+
+#define page_pte(page) page_pte_prot(page, __pgprot(0))
+
+#define pmd_large(pmd) \
+((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
+
+/*
+ * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
+ *
+ * this macro returns the index of the entry in the pgd page which would
+ * control the given virtual address
+ */
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#define pgd_index_k(addr) pgd_index(addr)
+
+/*
+ * pgd_offset() returns a (pgd_t *)
+ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
+ */
+#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+
+/*
+ * a shortcut which implies the use of the kernel's pgd, instead
+ * of a process's
+ */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/*
+ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
+ *
+ * this macro returns the index of the entry in the pmd page which would
+ * control the given virtual address
+ */
+#define pmd_index(address) \
+		(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+
+/*
+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
+ *
+ * this macro returns the index of the entry in the pte page which would
+ * control the given virtual address
+ */
+#define pte_index(address) \
+		(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) \
+	((pte_t *) pmd_page_kernel(*(dir)) +  pte_index(address))
+
+/*
+ * Helper function that returns the kernel pagetable entry controlling
+ * the virtual address 'address'. NULL means no pagetable entry present.
+ * NOTE: the return type is pte_t but if the pmd is PSE then we return it
+ * as a pte too.
+ */
+extern pte_t *lookup_address(unsigned long address);
+
+/*
+ * Make a given kernel text page executable/non-executable.
+ * Returns the previous executability setting of that page (which
+ * is used to restore the previous state). Used by the SMP bootup code.
+ * NOTE: this is an __init function for security reasons.
+ */
+#ifdef CONFIG_X86_PAE
+ extern int set_kernel_exec(unsigned long vaddr, int enable);
+#else
+ static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
+#endif
+
+extern void noexec_setup(const char *str);
+
+#if defined(CONFIG_HIGHPTE)
+#define pte_offset_map(dir, address) \
+	((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
+#define pte_offset_map_nested(dir, address) \
+	((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
+#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
+#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
+#else
+#define pte_offset_map(dir, address) \
+	((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
+#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+#endif
+
+/*
+ * The i386 doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ *
+ * Also, we only update the dirty/accessed state if we set
+ * the dirty bit by hand in the kernel, since the hardware
+ * will do the accessed bit for us, and we don't want to
+ * race with other CPU's that might be updating the dirty
+ * bit at the same time.
+ */
+#define update_mmu_cache(vma,address,pte) do { } while (0)
+#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+	do {								  \
+		if (__dirty) {						  \
+			(__ptep)->pte_low = (__entry).pte_low;	  	  \
+			flush_tlb_page(__vma, __address);		  \
+		}							  \
+	} while (0)
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef CONFIG_DISCONTIGMEM
+#define kern_addr_valid(addr)	(1)
+#endif /* !CONFIG_DISCONTIGMEM */
+
+#define io_remap_page_range(vma, vaddr, paddr, size, prot)		\
+		remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
+		remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#define MK_IOSPACE_PFN(space, pfn)	(pfn)
+#define GET_IOSPACE(pfn)		0
+#define GET_PFN(pfn)			(pfn)
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTE_SAME
+#include <asm-generic/pgtable.h>
+
+#endif /* _I386_PGTABLE_H */
diff --git a/include/asm-i386/poll.h b/include/asm-i386/poll.h
new file mode 100644
index 0000000..aecc80a
--- /dev/null
+++ b/include/asm-i386/poll.h
@@ -0,0 +1,26 @@
+#ifndef __i386_POLL_H
+#define __i386_POLL_H
+
+/* These are specified by iBCS2 */
+#define POLLIN		0x0001
+#define POLLPRI		0x0002
+#define POLLOUT		0x0004
+#define POLLERR		0x0008
+#define POLLHUP		0x0010
+#define POLLNVAL	0x0020
+
+/* The rest seem to be more-or-less nonstandard. Check them! */
+#define POLLRDNORM	0x0040
+#define POLLRDBAND	0x0080
+#define POLLWRNORM	0x0100
+#define POLLWRBAND	0x0200
+#define POLLMSG		0x0400
+#define POLLREMOVE	0x1000
+
+struct pollfd {
+	int fd;
+	short events;
+	short revents;
+};
+
+#endif
diff --git a/include/asm-i386/posix_types.h b/include/asm-i386/posix_types.h
new file mode 100644
index 0000000..4e47ed0
--- /dev/null
+++ b/include/asm-i386/posix_types.h
@@ -0,0 +1,82 @@
+#ifndef __ARCH_I386_POSIX_TYPES_H
+#define __ARCH_I386_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc.  Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned long	__kernel_ino_t;
+typedef unsigned short	__kernel_mode_t;
+typedef unsigned short	__kernel_nlink_t;
+typedef long		__kernel_off_t;
+typedef int		__kernel_pid_t;
+typedef unsigned short	__kernel_ipc_pid_t;
+typedef unsigned short	__kernel_uid_t;
+typedef unsigned short	__kernel_gid_t;
+typedef unsigned int	__kernel_size_t;
+typedef int		__kernel_ssize_t;
+typedef int		__kernel_ptrdiff_t;
+typedef long		__kernel_time_t;
+typedef long		__kernel_suseconds_t;
+typedef long		__kernel_clock_t;
+typedef int		__kernel_timer_t;
+typedef int		__kernel_clockid_t;
+typedef int		__kernel_daddr_t;
+typedef char *		__kernel_caddr_t;
+typedef unsigned short	__kernel_uid16_t;
+typedef unsigned short	__kernel_gid16_t;
+typedef unsigned int	__kernel_uid32_t;
+typedef unsigned int	__kernel_gid32_t;
+
+typedef unsigned short	__kernel_old_uid_t;
+typedef unsigned short	__kernel_old_gid_t;
+typedef unsigned short	__kernel_old_dev_t;
+
+#ifdef __GNUC__
+typedef long long	__kernel_loff_t;
+#endif
+
+typedef struct {
+#if defined(__KERNEL__) || defined(__USE_ALL)
+	int	val[2];
+#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
+	int	__val[2];
+#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
+} __kernel_fsid_t;
+
+#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
+
+#undef	__FD_SET
+#define __FD_SET(fd,fdsetp) \
+		__asm__ __volatile__("btsl %1,%0": \
+			"=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
+
+#undef	__FD_CLR
+#define __FD_CLR(fd,fdsetp) \
+		__asm__ __volatile__("btrl %1,%0": \
+			"=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
+
+#undef	__FD_ISSET
+#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
+		unsigned char __result; \
+		__asm__ __volatile__("btl %1,%2 ; setb %0" \
+			:"=q" (__result) :"r" ((int) (fd)), \
+			"m" (*(__kernel_fd_set *) (fdsetp))); \
+		__result; }))
+
+#undef	__FD_ZERO
+#define __FD_ZERO(fdsetp) \
+do { \
+	int __d0, __d1; \
+	__asm__ __volatile__("cld ; rep ; stosl" \
+			:"=m" (*(__kernel_fd_set *) (fdsetp)), \
+			  "=&c" (__d0), "=&D" (__d1) \
+			:"a" (0), "1" (__FDSET_LONGS), \
+			"2" ((__kernel_fd_set *) (fdsetp)) : "memory"); \
+} while (0)
+
+#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
+
+#endif
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
new file mode 100644
index 0000000..be258b0
--- /dev/null
+++ b/include/asm-i386/processor.h
@@ -0,0 +1,682 @@
+/*
+ * include/asm-i386/processor.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+#ifndef __ASM_I386_PROCESSOR_H
+#define __ASM_I386_PROCESSOR_H
+
+#include <asm/vm86.h>
+#include <asm/math_emu.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/types.h>
+#include <asm/sigcontext.h>
+#include <asm/cpufeature.h>
+#include <asm/msr.h>
+#include <asm/system.h>
+#include <linux/cache.h>
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <asm/percpu.h>
+
+/* flag for disabling the tsc */
+extern int tsc_disable;
+
+struct desc_struct {
+	unsigned long a,b;
+};
+
+#define desc_empty(desc) \
+		(!((desc)->a + (desc)->b))
+
+#define desc_equal(desc1, desc2) \
+		(((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
+
+/*
+ *  CPU type and hardware bug flags. Kept separately for each CPU.
+ *  Members of this structure are referenced in head.S, so think twice
+ *  before touching them. [mj]
+ */
+
+struct cpuinfo_x86 {
+	__u8	x86;		/* CPU family */
+	__u8	x86_vendor;	/* CPU vendor */
+	__u8	x86_model;
+	__u8	x86_mask;
+	char	wp_works_ok;	/* It doesn't on 386's */
+	char	hlt_works_ok;	/* Problems on some 486Dx4's and old 386's */
+	char	hard_math;
+	char	rfu;
+       	int	cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */
+	unsigned long	x86_capability[NCAPINTS];
+	char	x86_vendor_id[16];
+	char	x86_model_id[64];
+	int 	x86_cache_size;  /* in KB - valid for CPUS which support this
+				    call  */
+	int 	x86_cache_alignment;	/* In bytes */
+	int	fdiv_bug;
+	int	f00f_bug;
+	int	coma_bug;
+	unsigned long loops_per_jiffy;
+	unsigned char x86_num_cores;
+} __attribute__((__aligned__(SMP_CACHE_BYTES)));
+
+#define X86_VENDOR_INTEL 0
+#define X86_VENDOR_CYRIX 1
+#define X86_VENDOR_AMD 2
+#define X86_VENDOR_UMC 3
+#define X86_VENDOR_NEXGEN 4
+#define X86_VENDOR_CENTAUR 5
+#define X86_VENDOR_RISE 6
+#define X86_VENDOR_TRANSMETA 7
+#define X86_VENDOR_NSC 8
+#define X86_VENDOR_NUM 9
+#define X86_VENDOR_UNKNOWN 0xff
+
+/*
+ * capabilities of CPUs
+ */
+
+extern struct cpuinfo_x86 boot_cpu_data;
+extern struct cpuinfo_x86 new_cpu_data;
+extern struct tss_struct doublefault_tss;
+DECLARE_PER_CPU(struct tss_struct, init_tss);
+
+#ifdef CONFIG_SMP
+extern struct cpuinfo_x86 cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#else
+#define cpu_data (&boot_cpu_data)
+#define current_cpu_data boot_cpu_data
+#endif
+
+extern	int phys_proc_id[NR_CPUS];
+extern char ignore_fpu_irq;
+
+extern void identify_cpu(struct cpuinfo_x86 *);
+extern void print_cpu_info(struct cpuinfo_x86 *);
+extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
+
+#ifdef CONFIG_X86_HT
+extern void detect_ht(struct cpuinfo_x86 *c);
+#else
+static inline void detect_ht(struct cpuinfo_x86 *c) {}
+#endif
+
+/*
+ * EFLAGS bits
+ */
+#define X86_EFLAGS_CF	0x00000001 /* Carry Flag */
+#define X86_EFLAGS_PF	0x00000004 /* Parity Flag */
+#define X86_EFLAGS_AF	0x00000010 /* Auxillary carry Flag */
+#define X86_EFLAGS_ZF	0x00000040 /* Zero Flag */
+#define X86_EFLAGS_SF	0x00000080 /* Sign Flag */
+#define X86_EFLAGS_TF	0x00000100 /* Trap Flag */
+#define X86_EFLAGS_IF	0x00000200 /* Interrupt Flag */
+#define X86_EFLAGS_DF	0x00000400 /* Direction Flag */
+#define X86_EFLAGS_OF	0x00000800 /* Overflow Flag */
+#define X86_EFLAGS_IOPL	0x00003000 /* IOPL mask */
+#define X86_EFLAGS_NT	0x00004000 /* Nested Task */
+#define X86_EFLAGS_RF	0x00010000 /* Resume Flag */
+#define X86_EFLAGS_VM	0x00020000 /* Virtual Mode */
+#define X86_EFLAGS_AC	0x00040000 /* Alignment Check */
+#define X86_EFLAGS_VIF	0x00080000 /* Virtual Interrupt Flag */
+#define X86_EFLAGS_VIP	0x00100000 /* Virtual Interrupt Pending */
+#define X86_EFLAGS_ID	0x00200000 /* CPUID detection flag */
+
+/*
+ * Generic CPUID function
+ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
+ * resulting in stale register contents being returned.
+ */
+static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
+{
+	__asm__("cpuid"
+		: "=a" (*eax),
+		  "=b" (*ebx),
+		  "=c" (*ecx),
+		  "=d" (*edx)
+		: "0" (op), "c"(0));
+}
+
+/* Some CPUID calls want 'count' to be placed in ecx */
+static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
+	       	int *edx)
+{
+	__asm__("cpuid"
+		: "=a" (*eax),
+		  "=b" (*ebx),
+		  "=c" (*ecx),
+		  "=d" (*edx)
+		: "0" (op), "c" (count));
+}
+
+/*
+ * CPUID functions returning a single datum
+ */
+static inline unsigned int cpuid_eax(unsigned int op)
+{
+	unsigned int eax;
+
+	__asm__("cpuid"
+		: "=a" (eax)
+		: "0" (op)
+		: "bx", "cx", "dx");
+	return eax;
+}
+static inline unsigned int cpuid_ebx(unsigned int op)
+{
+	unsigned int eax, ebx;
+
+	__asm__("cpuid"
+		: "=a" (eax), "=b" (ebx)
+		: "0" (op)
+		: "cx", "dx" );
+	return ebx;
+}
+static inline unsigned int cpuid_ecx(unsigned int op)
+{
+	unsigned int eax, ecx;
+
+	__asm__("cpuid"
+		: "=a" (eax), "=c" (ecx)
+		: "0" (op)
+		: "bx", "dx" );
+	return ecx;
+}
+static inline unsigned int cpuid_edx(unsigned int op)
+{
+	unsigned int eax, edx;
+
+	__asm__("cpuid"
+		: "=a" (eax), "=d" (edx)
+		: "0" (op)
+		: "bx", "cx");
+	return edx;
+}
+
+#define load_cr3(pgdir) \
+	asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir)))
+
+
+/*
+ * Intel CPU features in CR4
+ */
+#define X86_CR4_VME		0x0001	/* enable vm86 extensions */
+#define X86_CR4_PVI		0x0002	/* virtual interrupts flag enable */
+#define X86_CR4_TSD		0x0004	/* disable time stamp at ipl 3 */
+#define X86_CR4_DE		0x0008	/* enable debugging extensions */
+#define X86_CR4_PSE		0x0010	/* enable page size extensions */
+#define X86_CR4_PAE		0x0020	/* enable physical address extensions */
+#define X86_CR4_MCE		0x0040	/* Machine check enable */
+#define X86_CR4_PGE		0x0080	/* enable global pages */
+#define X86_CR4_PCE		0x0100	/* enable performance counters at ipl 3 */
+#define X86_CR4_OSFXSR		0x0200	/* enable fast FPU save and restore */
+#define X86_CR4_OSXMMEXCPT	0x0400	/* enable unmasked SSE exceptions */
+
+/*
+ * Save the cr4 feature set we're using (ie
+ * Pentium 4MB enable and PPro Global page
+ * enable), so that any CPU's that boot up
+ * after us can get the correct flags.
+ */
+extern unsigned long mmu_cr4_features;
+
+static inline void set_in_cr4 (unsigned long mask)
+{
+	mmu_cr4_features |= mask;
+	__asm__("movl %%cr4,%%eax\n\t"
+		"orl %0,%%eax\n\t"
+		"movl %%eax,%%cr4\n"
+		: : "irg" (mask)
+		:"ax");
+}
+
+static inline void clear_in_cr4 (unsigned long mask)
+{
+	mmu_cr4_features &= ~mask;
+	__asm__("movl %%cr4,%%eax\n\t"
+		"andl %0,%%eax\n\t"
+		"movl %%eax,%%cr4\n"
+		: : "irg" (~mask)
+		:"ax");
+}
+
+/*
+ *      NSC/Cyrix CPU configuration register indexes
+ */
+
+#define CX86_PCR0 0x20
+#define CX86_GCR  0xb8
+#define CX86_CCR0 0xc0
+#define CX86_CCR1 0xc1
+#define CX86_CCR2 0xc2
+#define CX86_CCR3 0xc3
+#define CX86_CCR4 0xe8
+#define CX86_CCR5 0xe9
+#define CX86_CCR6 0xea
+#define CX86_CCR7 0xeb
+#define CX86_PCR1 0xf0
+#define CX86_DIR0 0xfe
+#define CX86_DIR1 0xff
+#define CX86_ARR_BASE 0xc4
+#define CX86_RCR_BASE 0xdc
+
+/*
+ *      NSC/Cyrix CPU indexed register access macros
+ */
+
+#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
+
+#define setCx86(reg, data) do { \
+	outb((reg), 0x22); \
+	outb((data), 0x23); \
+} while (0)
+
+static inline void __monitor(const void *eax, unsigned long ecx,
+		unsigned long edx)
+{
+	/* "monitor %eax,%ecx,%edx;" */
+	asm volatile(
+		".byte 0x0f,0x01,0xc8;"
+		: :"a" (eax), "c" (ecx), "d"(edx));
+}
+
+static inline void __mwait(unsigned long eax, unsigned long ecx)
+{
+	/* "mwait %eax,%ecx;" */
+	asm volatile(
+		".byte 0x0f,0x01,0xc9;"
+		: :"a" (eax), "c" (ecx));
+}
+
+/* from system description table in BIOS.  Mostly for MCA use, but
+others may find it useful. */
+extern unsigned int machine_id;
+extern unsigned int machine_submodel_id;
+extern unsigned int BIOS_revision;
+extern unsigned int mca_pentium_flag;
+
+/* Boot loader type from the setup header */
+extern int bootloader_type;
+
+/*
+ * User space process size: 3GB (default).
+ */
+#define TASK_SIZE	(PAGE_OFFSET)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
+
+#define HAVE_ARCH_PICK_MMAP_LAYOUT
+
+/*
+ * Size of io_bitmap.
+ */
+#define IO_BITMAP_BITS  65536
+#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
+#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
+#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
+#define INVALID_IO_BITMAP_OFFSET 0x8000
+#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
+
+struct i387_fsave_struct {
+	long	cwd;
+	long	swd;
+	long	twd;
+	long	fip;
+	long	fcs;
+	long	foo;
+	long	fos;
+	long	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */
+	long	status;		/* software status information */
+};
+
+struct i387_fxsave_struct {
+	unsigned short	cwd;
+	unsigned short	swd;
+	unsigned short	twd;
+	unsigned short	fop;
+	long	fip;
+	long	fcs;
+	long	foo;
+	long	fos;
+	long	mxcsr;
+	long	mxcsr_mask;
+	long	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */
+	long	xmm_space[32];	/* 8*16 bytes for each XMM-reg = 128 bytes */
+	long	padding[56];
+} __attribute__ ((aligned (16)));
+
+struct i387_soft_struct {
+	long	cwd;
+	long	swd;
+	long	twd;
+	long	fip;
+	long	fcs;
+	long	foo;
+	long	fos;
+	long	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */
+	unsigned char	ftop, changed, lookahead, no_update, rm, alimit;
+	struct info	*info;
+	unsigned long	entry_eip;
+};
+
+union i387_union {
+	struct i387_fsave_struct	fsave;
+	struct i387_fxsave_struct	fxsave;
+	struct i387_soft_struct soft;
+};
+
+typedef struct {
+	unsigned long seg;
+} mm_segment_t;
+
+struct thread_struct;
+
+struct tss_struct {
+	unsigned short	back_link,__blh;
+	unsigned long	esp0;
+	unsigned short	ss0,__ss0h;
+	unsigned long	esp1;
+	unsigned short	ss1,__ss1h;	/* ss1 is used to cache MSR_IA32_SYSENTER_CS */
+	unsigned long	esp2;
+	unsigned short	ss2,__ss2h;
+	unsigned long	__cr3;
+	unsigned long	eip;
+	unsigned long	eflags;
+	unsigned long	eax,ecx,edx,ebx;
+	unsigned long	esp;
+	unsigned long	ebp;
+	unsigned long	esi;
+	unsigned long	edi;
+	unsigned short	es, __esh;
+	unsigned short	cs, __csh;
+	unsigned short	ss, __ssh;
+	unsigned short	ds, __dsh;
+	unsigned short	fs, __fsh;
+	unsigned short	gs, __gsh;
+	unsigned short	ldt, __ldth;
+	unsigned short	trace, io_bitmap_base;
+	/*
+	 * The extra 1 is there because the CPU will access an
+	 * additional byte beyond the end of the IO permission
+	 * bitmap. The extra byte must be all 1 bits, and must
+	 * be within the limit.
+	 */
+	unsigned long	io_bitmap[IO_BITMAP_LONGS + 1];
+	/*
+	 * Cache the current maximum and the last task that used the bitmap:
+	 */
+	unsigned long io_bitmap_max;
+	struct thread_struct *io_bitmap_owner;
+	/*
+	 * pads the TSS to be cacheline-aligned (size is 0x100)
+	 */
+	unsigned long __cacheline_filler[35];
+	/*
+	 * .. and then another 0x100 bytes for emergency kernel stack
+	 */
+	unsigned long stack[64];
+} __attribute__((packed));
+
+#define ARCH_MIN_TASKALIGN	16
+
+struct thread_struct {
+/* cached TLS descriptors. */
+	struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
+	unsigned long	esp0;
+	unsigned long	sysenter_cs;
+	unsigned long	eip;
+	unsigned long	esp;
+	unsigned long	fs;
+	unsigned long	gs;
+/* Hardware debugging registers */
+	unsigned long	debugreg[8];  /* %%db0-7 debug registers */
+/* fault info */
+	unsigned long	cr2, trap_no, error_code;
+/* floating point info */
+	union i387_union	i387;
+/* virtual 86 mode info */
+	struct vm86_struct __user * vm86_info;
+	unsigned long		screen_bitmap;
+	unsigned long		v86flags, v86mask, saved_esp0;
+	unsigned int		saved_fs, saved_gs;
+/* IO permissions */
+	unsigned long	*io_bitmap_ptr;
+/* max allowed port in the bitmap, in bytes: */
+	unsigned long	io_bitmap_max;
+};
+
+#define INIT_THREAD  {							\
+	.vm86_info = NULL,						\
+	.sysenter_cs = __KERNEL_CS,					\
+	.io_bitmap_ptr = NULL,						\
+}
+
+/*
+ * Note that the .io_bitmap member must be extra-big. This is because
+ * the CPU will access an additional byte beyond the end of the IO
+ * permission bitmap. The extra byte must be all 1 bits, and must
+ * be within the limit.
+ */
+#define INIT_TSS  {							\
+	.esp0		= sizeof(init_stack) + (long)&init_stack,	\
+	.ss0		= __KERNEL_DS,					\
+	.ss1		= __KERNEL_CS,					\
+	.ldt		= GDT_ENTRY_LDT,				\
+	.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,			\
+	.io_bitmap	= { [ 0 ... IO_BITMAP_LONGS] = ~0 },		\
+}
+
+static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
+{
+	tss->esp0 = thread->esp0;
+	/* This can only happen when SEP is enabled, no need to test "SEP"arately */
+	if (unlikely(tss->ss1 != thread->sysenter_cs)) {
+		tss->ss1 = thread->sysenter_cs;
+		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+	}
+}
+
+#define start_thread(regs, new_eip, new_esp) do {		\
+	__asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0));	\
+	set_fs(USER_DS);					\
+	regs->xds = __USER_DS;					\
+	regs->xes = __USER_DS;					\
+	regs->xss = __USER_DS;					\
+	regs->xcs = __USER_CS;					\
+	regs->eip = new_eip;					\
+	regs->esp = new_esp;					\
+} while (0)
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+struct mm_struct;
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+
+/* Prepare to copy thread state - unlazy all lazy status */
+extern void prepare_to_copy(struct task_struct *tsk);
+
+/*
+ * create a kernel thread without removing it from tasklists
+ */
+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+extern unsigned long thread_saved_pc(struct task_struct *tsk);
+void show_trace(struct task_struct *task, unsigned long *stack);
+
+unsigned long get_wchan(struct task_struct *p);
+
+#define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
+#define KSTK_TOP(info)                                                 \
+({                                                                     \
+       unsigned long *__ptr = (unsigned long *)(info);                 \
+       (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
+})
+
+#define task_pt_regs(task)                                             \
+({                                                                     \
+       struct pt_regs *__regs__;                                       \
+       __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info);     \
+       __regs__ - 1;                                                   \
+})
+
+#define KSTK_EIP(task) (task_pt_regs(task)->eip)
+#define KSTK_ESP(task) (task_pt_regs(task)->esp)
+
+
+struct microcode_header {
+	unsigned int hdrver;
+	unsigned int rev;
+	unsigned int date;
+	unsigned int sig;
+	unsigned int cksum;
+	unsigned int ldrver;
+	unsigned int pf;
+	unsigned int datasize;
+	unsigned int totalsize;
+	unsigned int reserved[3];
+};
+
+struct microcode {
+	struct microcode_header hdr;
+	unsigned int bits[0];
+};
+
+typedef struct microcode microcode_t;
+typedef struct microcode_header microcode_header_t;
+
+/* microcode format is extended from prescott processors */
+struct extended_signature {
+	unsigned int sig;
+	unsigned int pf;
+	unsigned int cksum;
+};
+
+struct extended_sigtable {
+	unsigned int count;
+	unsigned int cksum;
+	unsigned int reserved[3];
+	struct extended_signature sigs[0];
+};
+/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
+#define MICROCODE_IOCFREE	_IO('6',0)
+
+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+static inline void rep_nop(void)
+{
+	__asm__ __volatile__("rep;nop": : :"memory");
+}
+
+#define cpu_relax()	rep_nop()
+
+/* generic versions from gas */
+#define GENERIC_NOP1	".byte 0x90\n"
+#define GENERIC_NOP2    	".byte 0x89,0xf6\n"
+#define GENERIC_NOP3        ".byte 0x8d,0x76,0x00\n"
+#define GENERIC_NOP4        ".byte 0x8d,0x74,0x26,0x00\n"
+#define GENERIC_NOP5        GENERIC_NOP1 GENERIC_NOP4
+#define GENERIC_NOP6	".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
+#define GENERIC_NOP7	".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
+#define GENERIC_NOP8	GENERIC_NOP1 GENERIC_NOP7
+
+/* Opteron nops */
+#define K8_NOP1 GENERIC_NOP1
+#define K8_NOP2	".byte 0x66,0x90\n" 
+#define K8_NOP3	".byte 0x66,0x66,0x90\n" 
+#define K8_NOP4	".byte 0x66,0x66,0x66,0x90\n" 
+#define K8_NOP5	K8_NOP3 K8_NOP2 
+#define K8_NOP6	K8_NOP3 K8_NOP3
+#define K8_NOP7	K8_NOP4 K8_NOP3
+#define K8_NOP8	K8_NOP4 K8_NOP4
+
+/* K7 nops */
+/* uses eax dependencies (arbitary choice) */
+#define K7_NOP1  GENERIC_NOP1
+#define K7_NOP2	".byte 0x8b,0xc0\n" 
+#define K7_NOP3	".byte 0x8d,0x04,0x20\n"
+#define K7_NOP4	".byte 0x8d,0x44,0x20,0x00\n"
+#define K7_NOP5	K7_NOP4 ASM_NOP1
+#define K7_NOP6	".byte 0x8d,0x80,0,0,0,0\n"
+#define K7_NOP7        ".byte 0x8D,0x04,0x05,0,0,0,0\n"
+#define K7_NOP8        K7_NOP7 ASM_NOP1
+
+#ifdef CONFIG_MK8
+#define ASM_NOP1 K8_NOP1
+#define ASM_NOP2 K8_NOP2
+#define ASM_NOP3 K8_NOP3
+#define ASM_NOP4 K8_NOP4
+#define ASM_NOP5 K8_NOP5
+#define ASM_NOP6 K8_NOP6
+#define ASM_NOP7 K8_NOP7
+#define ASM_NOP8 K8_NOP8
+#elif defined(CONFIG_MK7)
+#define ASM_NOP1 K7_NOP1
+#define ASM_NOP2 K7_NOP2
+#define ASM_NOP3 K7_NOP3
+#define ASM_NOP4 K7_NOP4
+#define ASM_NOP5 K7_NOP5
+#define ASM_NOP6 K7_NOP6
+#define ASM_NOP7 K7_NOP7
+#define ASM_NOP8 K7_NOP8
+#else
+#define ASM_NOP1 GENERIC_NOP1
+#define ASM_NOP2 GENERIC_NOP2
+#define ASM_NOP3 GENERIC_NOP3
+#define ASM_NOP4 GENERIC_NOP4
+#define ASM_NOP5 GENERIC_NOP5
+#define ASM_NOP6 GENERIC_NOP6
+#define ASM_NOP7 GENERIC_NOP7
+#define ASM_NOP8 GENERIC_NOP8
+#endif
+
+#define ASM_NOP_MAX 8
+
+/* Prefetch instructions for Pentium III and AMD Athlon */
+/* It's not worth to care about 3dnow! prefetches for the K6
+   because they are microcoded there and very slow.
+   However we don't do prefetches for pre XP Athlons currently
+   That should be fixed. */
+#define ARCH_HAS_PREFETCH
+extern inline void prefetch(const void *x)
+{
+	alternative_input(ASM_NOP4,
+			  "prefetchnta (%1)",
+			  X86_FEATURE_XMM,
+			  "r" (x));
+}
+
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+
+/* 3dnow! prefetch to get an exclusive cache line. Useful for 
+   spinlocks to avoid one state transition in the cache coherency protocol. */
+extern inline void prefetchw(const void *x)
+{
+	alternative_input(ASM_NOP4,
+			  "prefetchw (%1)",
+			  X86_FEATURE_3DNOW,
+			  "r" (x));
+}
+#define spin_lock_prefetch(x)	prefetchw(x)
+
+extern void select_idle_routine(const struct cpuinfo_x86 *c);
+
+#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
+
+extern unsigned long boot_option_idle_override;
+
+#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h
new file mode 100644
index 0000000..8618914
--- /dev/null
+++ b/include/asm-i386/ptrace.h
@@ -0,0 +1,69 @@
+#ifndef _I386_PTRACE_H
+#define _I386_PTRACE_H
+
+#define EBX 0
+#define ECX 1
+#define EDX 2
+#define ESI 3
+#define EDI 4
+#define EBP 5
+#define EAX 6
+#define DS 7
+#define ES 8
+#define FS 9
+#define GS 10
+#define ORIG_EAX 11
+#define EIP 12
+#define CS  13
+#define EFL 14
+#define UESP 15
+#define SS   16
+#define FRAME_SIZE 17
+
+/* this struct defines the way the registers are stored on the 
+   stack during a system call. */
+
+struct pt_regs {
+	long ebx;
+	long ecx;
+	long edx;
+	long esi;
+	long edi;
+	long ebp;
+	long eax;
+	int  xds;
+	int  xes;
+	long orig_eax;
+	long eip;
+	int  xcs;
+	long eflags;
+	long esp;
+	int  xss;
+};
+
+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
+#define PTRACE_GETREGS            12
+#define PTRACE_SETREGS            13
+#define PTRACE_GETFPREGS          14
+#define PTRACE_SETFPREGS          15
+#define PTRACE_GETFPXREGS         18
+#define PTRACE_SETFPXREGS         19
+
+#define PTRACE_OLDSETOPTIONS         21
+
+#define PTRACE_GET_THREAD_AREA    25
+#define PTRACE_SET_THREAD_AREA    26
+
+#ifdef __KERNEL__
+struct task_struct;
+extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
+#define user_mode(regs) ((VM_MASK & (regs)->eflags) || (3 & (regs)->xcs))
+#define instruction_pointer(regs) ((regs)->eip)
+#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
+extern unsigned long profile_pc(struct pt_regs *regs);
+#else
+#define profile_pc(regs) instruction_pointer(regs)
+#endif
+#endif
+
+#endif
diff --git a/include/asm-i386/resource.h b/include/asm-i386/resource.h
new file mode 100644
index 0000000..6c1ea37
--- /dev/null
+++ b/include/asm-i386/resource.h
@@ -0,0 +1,6 @@
+#ifndef _I386_RESOURCE_H
+#define _I386_RESOURCE_H
+
+#include <asm-generic/resource.h>
+
+#endif
diff --git a/include/asm-i386/rtc.h b/include/asm-i386/rtc.h
new file mode 100644
index 0000000..ffd0210
--- /dev/null
+++ b/include/asm-i386/rtc.h
@@ -0,0 +1,10 @@
+#ifndef _I386_RTC_H
+#define _I386_RTC_H
+
+/*
+ * x86 uses the default access methods for the RTC.
+ */
+
+#include <asm-generic/rtc.h>
+
+#endif
diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h
new file mode 100644
index 0000000..b57cc7a
--- /dev/null
+++ b/include/asm-i386/rwlock.h
@@ -0,0 +1,71 @@
+/* include/asm-i386/rwlock.h
+ *
+ *	Helpers used by both rw spinlocks and rw semaphores.
+ *
+ *	Based in part on code from semaphore.h and
+ *	spinlock.h Copyright 1996 Linus Torvalds.
+ *
+ *	Copyright 1999 Red Hat, Inc.
+ *
+ *	Written by Benjamin LaHaise.
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_I386_RWLOCK_H
+#define _ASM_I386_RWLOCK_H
+
+#define RW_LOCK_BIAS		 0x01000000
+#define RW_LOCK_BIAS_STR	"0x01000000"
+
+#define __build_read_lock_ptr(rw, helper)   \
+	asm volatile(LOCK "subl $1,(%0)\n\t" \
+		     "jns 1f\n" \
+		     "call " helper "\n\t" \
+		     "1:\n" \
+		     ::"a" (rw) : "memory")
+
+#define __build_read_lock_const(rw, helper)   \
+	asm volatile(LOCK "subl $1,%0\n\t" \
+		     "jns 1f\n" \
+		     "pushl %%eax\n\t" \
+		     "leal %0,%%eax\n\t" \
+		     "call " helper "\n\t" \
+		     "popl %%eax\n\t" \
+		     "1:\n" \
+		     :"=m" (*(volatile int *)rw) : : "memory")
+
+#define __build_read_lock(rw, helper)	do { \
+						if (__builtin_constant_p(rw)) \
+							__build_read_lock_const(rw, helper); \
+						else \
+							__build_read_lock_ptr(rw, helper); \
+					} while (0)
+
+#define __build_write_lock_ptr(rw, helper) \
+	asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
+		     "jz 1f\n" \
+		     "call " helper "\n\t" \
+		     "1:\n" \
+		     ::"a" (rw) : "memory")
+
+#define __build_write_lock_const(rw, helper) \
+	asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
+		     "jz 1f\n" \
+		     "pushl %%eax\n\t" \
+		     "leal %0,%%eax\n\t" \
+		     "call " helper "\n\t" \
+		     "popl %%eax\n\t" \
+		     "1:\n" \
+		     :"=m" (*(volatile int *)rw) : : "memory")
+
+#define __build_write_lock(rw, helper)	do { \
+						if (__builtin_constant_p(rw)) \
+							__build_write_lock_const(rw, helper); \
+						else \
+							__build_write_lock_ptr(rw, helper); \
+					} while (0)
+
+#endif
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h
new file mode 100644
index 0000000..7625a67
--- /dev/null
+++ b/include/asm-i386/rwsem.h
@@ -0,0 +1,288 @@
+/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+
+ *
+ * Written by David Howells (dhowells@redhat.com).
+ *
+ * Derived from asm-i386/semaphore.h
+ *
+ *
+ * The MSW of the count is the negated number of active writers and waiting
+ * lockers, and the LSW is the total number of active locks
+ *
+ * The lock count is initialized to 0 (no active and no waiting lockers).
+ *
+ * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
+ * uncontended lock. This can be determined because XADD returns the old value.
+ * Readers increment by 1 and see a positive value when uncontended, negative
+ * if there are writers (and maybe) readers waiting (in which case it goes to
+ * sleep).
+ *
+ * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
+ * be extended to 65534 by manually checking the whole MSW rather than relying
+ * on the S flag.
+ *
+ * The value of ACTIVE_BIAS supports up to 65535 active processes.
+ *
+ * This should be totally fair - if anything is waiting, a process that wants a
+ * lock will go to the back of the queue. When the currently active lock is
+ * released, if there's a writer at the front of the queue, then that and only
+ * that will be woken up; if there's a bunch of consequtive readers at the
+ * front, then they'll all be woken up, but no other readers will be.
+ */
+
+#ifndef _I386_RWSEM_H
+#define _I386_RWSEM_H
+
+#ifndef _LINUX_RWSEM_H
+#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
+#endif
+
+#ifdef __KERNEL__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+struct rwsem_waiter;
+
+extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem));
+extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem));
+extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *));
+extern struct rw_semaphore *FASTCALL(rwsem_downgrade_wake(struct rw_semaphore *sem));
+
+/*
+ * the semaphore definition
+ */
+struct rw_semaphore {
+	signed long		count;
+#define RWSEM_UNLOCKED_VALUE		0x00000000
+#define RWSEM_ACTIVE_BIAS		0x00000001
+#define RWSEM_ACTIVE_MASK		0x0000ffff
+#define RWSEM_WAITING_BIAS		(-0x00010000)
+#define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+	spinlock_t		wait_lock;
+	struct list_head	wait_list;
+#if RWSEM_DEBUG
+	int			debug;
+#endif
+};
+
+/*
+ * initialisation
+ */
+#if RWSEM_DEBUG
+#define __RWSEM_DEBUG_INIT      , 0
+#else
+#define __RWSEM_DEBUG_INIT	/* */
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
+	__RWSEM_DEBUG_INIT }
+
+#define DECLARE_RWSEM(name) \
+	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+static inline void init_rwsem(struct rw_semaphore *sem)
+{
+	sem->count = RWSEM_UNLOCKED_VALUE;
+	spin_lock_init(&sem->wait_lock);
+	INIT_LIST_HEAD(&sem->wait_list);
+#if RWSEM_DEBUG
+	sem->debug = 0;
+#endif
+}
+
+/*
+ * lock for reading
+ */
+static inline void __down_read(struct rw_semaphore *sem)
+{
+	__asm__ __volatile__(
+		"# beginning down_read\n\t"
+LOCK_PREFIX	"  incl      (%%eax)\n\t" /* adds 0x00000001, returns the old value */
+		"  js        2f\n\t" /* jump if we weren't granted the lock */
+		"1:\n\t"
+		LOCK_SECTION_START("")
+		"2:\n\t"
+		"  pushl     %%ecx\n\t"
+		"  pushl     %%edx\n\t"
+		"  call      rwsem_down_read_failed\n\t"
+		"  popl      %%edx\n\t"
+		"  popl      %%ecx\n\t"
+		"  jmp       1b\n"
+		LOCK_SECTION_END
+		"# ending down_read\n\t"
+		: "=m"(sem->count)
+		: "a"(sem), "m"(sem->count)
+		: "memory", "cc");
+}
+
+/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+	__s32 result, tmp;
+	__asm__ __volatile__(
+		"# beginning __down_read_trylock\n\t"
+		"  movl      %0,%1\n\t"
+		"1:\n\t"
+		"  movl	     %1,%2\n\t"
+		"  addl      %3,%2\n\t"
+		"  jle	     2f\n\t"
+LOCK_PREFIX	"  cmpxchgl  %2,%0\n\t"
+		"  jnz	     1b\n\t"
+		"2:\n\t"
+		"# ending __down_read_trylock\n\t"
+		: "+m"(sem->count), "=&a"(result), "=&r"(tmp)
+		: "i"(RWSEM_ACTIVE_READ_BIAS)
+		: "memory", "cc");
+	return result>=0 ? 1 : 0;
+}
+
+/*
+ * lock for writing
+ */
+static inline void __down_write(struct rw_semaphore *sem)
+{
+	int tmp;
+
+	tmp = RWSEM_ACTIVE_WRITE_BIAS;
+	__asm__ __volatile__(
+		"# beginning down_write\n\t"
+LOCK_PREFIX	"  xadd      %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
+		"  testl     %%edx,%%edx\n\t" /* was the count 0 before? */
+		"  jnz       2f\n\t" /* jump if we weren't granted the lock */
+		"1:\n\t"
+		LOCK_SECTION_START("")
+		"2:\n\t"
+		"  pushl     %%ecx\n\t"
+		"  call      rwsem_down_write_failed\n\t"
+		"  popl      %%ecx\n\t"
+		"  jmp       1b\n"
+		LOCK_SECTION_END
+		"# ending down_write"
+		: "=m"(sem->count), "=d"(tmp)
+		: "a"(sem), "1"(tmp), "m"(sem->count)
+		: "memory", "cc");
+}
+
+/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+	signed long ret = cmpxchg(&sem->count,
+				  RWSEM_UNLOCKED_VALUE, 
+				  RWSEM_ACTIVE_WRITE_BIAS);
+	if (ret == RWSEM_UNLOCKED_VALUE)
+		return 1;
+	return 0;
+}
+
+/*
+ * unlock after reading
+ */
+static inline void __up_read(struct rw_semaphore *sem)
+{
+	__s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
+	__asm__ __volatile__(
+		"# beginning __up_read\n\t"
+LOCK_PREFIX	"  xadd      %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
+		"  js        2f\n\t" /* jump if the lock is being waited upon */
+		"1:\n\t"
+		LOCK_SECTION_START("")
+		"2:\n\t"
+		"  decw      %%dx\n\t" /* do nothing if still outstanding active readers */
+		"  jnz       1b\n\t"
+		"  pushl     %%ecx\n\t"
+		"  call      rwsem_wake\n\t"
+		"  popl      %%ecx\n\t"
+		"  jmp       1b\n"
+		LOCK_SECTION_END
+		"# ending __up_read\n"
+		: "=m"(sem->count), "=d"(tmp)
+		: "a"(sem), "1"(tmp), "m"(sem->count)
+		: "memory", "cc");
+}
+
+/*
+ * unlock after writing
+ */
+static inline void __up_write(struct rw_semaphore *sem)
+{
+	__asm__ __volatile__(
+		"# beginning __up_write\n\t"
+		"  movl      %2,%%edx\n\t"
+LOCK_PREFIX	"  xaddl     %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
+		"  jnz       2f\n\t" /* jump if the lock is being waited upon */
+		"1:\n\t"
+		LOCK_SECTION_START("")
+		"2:\n\t"
+		"  decw      %%dx\n\t" /* did the active count reduce to 0? */
+		"  jnz       1b\n\t" /* jump back if not */
+		"  pushl     %%ecx\n\t"
+		"  call      rwsem_wake\n\t"
+		"  popl      %%ecx\n\t"
+		"  jmp       1b\n"
+		LOCK_SECTION_END
+		"# ending __up_write\n"
+		: "=m"(sem->count)
+		: "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS), "m"(sem->count)
+		: "memory", "cc", "edx");
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+	__asm__ __volatile__(
+		"# beginning __downgrade_write\n\t"
+LOCK_PREFIX	"  addl      %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
+		"  js        2f\n\t" /* jump if the lock is being waited upon */
+		"1:\n\t"
+		LOCK_SECTION_START("")
+		"2:\n\t"
+		"  pushl     %%ecx\n\t"
+		"  pushl     %%edx\n\t"
+		"  call      rwsem_downgrade_wake\n\t"
+		"  popl      %%edx\n\t"
+		"  popl      %%ecx\n\t"
+		"  jmp       1b\n"
+		LOCK_SECTION_END
+		"# ending __downgrade_write\n"
+		: "=m"(sem->count)
+		: "a"(sem), "i"(-RWSEM_WAITING_BIAS), "m"(sem->count)
+		: "memory", "cc");
+}
+
+/*
+ * implement atomic add functionality
+ */
+static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+{
+	__asm__ __volatile__(
+LOCK_PREFIX	"addl %1,%0"
+		: "=m"(sem->count)
+		: "ir"(delta), "m"(sem->count));
+}
+
+/*
+ * implement exchange and add functionality
+ */
+static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
+{
+	int tmp = delta;
+
+	__asm__ __volatile__(
+LOCK_PREFIX	"xadd %0,(%2)"
+		: "+r"(tmp), "=m"(sem->count)
+		: "r"(sem), "m"(sem->count)
+		: "memory");
+
+	return tmp+delta;
+}
+
+#endif /* __KERNEL__ */
+#endif /* _I386_RWSEM_H */
diff --git a/include/asm-i386/scatterlist.h b/include/asm-i386/scatterlist.h
new file mode 100644
index 0000000..55d6c95
--- /dev/null
+++ b/include/asm-i386/scatterlist.h
@@ -0,0 +1,21 @@
+#ifndef _I386_SCATTERLIST_H
+#define _I386_SCATTERLIST_H
+
+struct scatterlist {
+    struct page		*page;
+    unsigned int	offset;
+    dma_addr_t		dma_address;
+    unsigned int	length;
+};
+
+/* These macros should be used after a pci_map_sg call has been done
+ * to get bus addresses of each of the SG entries and their lengths.
+ * You should only work with the number of sg entries pci_map_sg
+ * returns.
+ */
+#define sg_dma_address(sg)	((sg)->dma_address)
+#define sg_dma_len(sg)		((sg)->length)
+
+#define ISA_DMA_THRESHOLD (0x00ffffff)
+
+#endif /* !(_I386_SCATTERLIST_H) */
diff --git a/include/asm-i386/seccomp.h b/include/asm-i386/seccomp.h
new file mode 100644
index 0000000..18da19e
--- /dev/null
+++ b/include/asm-i386/seccomp.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_SECCOMP_H
+
+#include <linux/thread_info.h>
+
+#ifdef TIF_32BIT
+#error "unexpected TIF_32BIT on i386"
+#endif
+
+#include <linux/unistd.h>
+
+#define __NR_seccomp_read __NR_read
+#define __NR_seccomp_write __NR_write
+#define __NR_seccomp_exit __NR_exit
+#define __NR_seccomp_sigreturn __NR_sigreturn
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/include/asm-i386/sections.h b/include/asm-i386/sections.h
new file mode 100644
index 0000000..2dcbb92
--- /dev/null
+++ b/include/asm-i386/sections.h
@@ -0,0 +1,7 @@
+#ifndef _I386_SECTIONS_H
+#define _I386_SECTIONS_H
+
+/* nothing to see, move along */
+#include <asm-generic/sections.h>
+
+#endif
diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h
new file mode 100644
index 0000000..bb5ff5b
--- /dev/null
+++ b/include/asm-i386/segment.h
@@ -0,0 +1,101 @@
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+/*
+ * The layout of the per-CPU GDT under Linux:
+ *
+ *   0 - null
+ *   1 - reserved
+ *   2 - reserved
+ *   3 - reserved
+ *
+ *   4 - unused			<==== new cacheline
+ *   5 - unused
+ *
+ *  ------- start of TLS (Thread-Local Storage) segments:
+ *
+ *   6 - TLS segment #1			[ glibc's TLS segment ]
+ *   7 - TLS segment #2			[ Wine's %fs Win32 segment ]
+ *   8 - TLS segment #3
+ *   9 - reserved
+ *  10 - reserved
+ *  11 - reserved
+ *
+ *  ------- start of kernel segments:
+ *
+ *  12 - kernel code segment		<==== new cacheline
+ *  13 - kernel data segment
+ *  14 - default user CS
+ *  15 - default user DS
+ *  16 - TSS
+ *  17 - LDT
+ *  18 - PNPBIOS support (16->32 gate)
+ *  19 - PNPBIOS support
+ *  20 - PNPBIOS support
+ *  21 - PNPBIOS support
+ *  22 - PNPBIOS support
+ *  23 - APM BIOS support
+ *  24 - APM BIOS support
+ *  25 - APM BIOS support 
+ *
+ *  26 - ESPFIX small SS
+ *  27 - unused
+ *  28 - unused
+ *  29 - unused
+ *  30 - unused
+ *  31 - TSS for double fault handler
+ */
+#define GDT_ENTRY_TLS_ENTRIES	3
+#define GDT_ENTRY_TLS_MIN	6
+#define GDT_ENTRY_TLS_MAX 	(GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+
+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
+
+#define GDT_ENTRY_DEFAULT_USER_CS	14
+#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
+
+#define GDT_ENTRY_DEFAULT_USER_DS	15
+#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
+
+#define GDT_ENTRY_KERNEL_BASE	12
+
+#define GDT_ENTRY_KERNEL_CS		(GDT_ENTRY_KERNEL_BASE + 0)
+#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
+
+#define GDT_ENTRY_KERNEL_DS		(GDT_ENTRY_KERNEL_BASE + 1)
+#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
+
+#define GDT_ENTRY_TSS			(GDT_ENTRY_KERNEL_BASE + 4)
+#define GDT_ENTRY_LDT			(GDT_ENTRY_KERNEL_BASE + 5)
+
+#define GDT_ENTRY_PNPBIOS_BASE		(GDT_ENTRY_KERNEL_BASE + 6)
+#define GDT_ENTRY_APMBIOS_BASE		(GDT_ENTRY_KERNEL_BASE + 11)
+
+#define GDT_ENTRY_ESPFIX_SS		(GDT_ENTRY_KERNEL_BASE + 14)
+#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
+
+#define GDT_ENTRY_DOUBLEFAULT_TSS	31
+
+/*
+ * The GDT has 32 entries
+ */
+#define GDT_ENTRIES 32
+
+#define GDT_SIZE (GDT_ENTRIES * 8)
+
+/* Simple and small GDT entries for booting only */
+
+#define GDT_ENTRY_BOOT_CS		2
+#define __BOOT_CS	(GDT_ENTRY_BOOT_CS * 8)
+
+#define GDT_ENTRY_BOOT_DS		(GDT_ENTRY_BOOT_CS + 1)
+#define __BOOT_DS	(GDT_ENTRY_BOOT_DS * 8)
+
+/*
+ * The interrupt descriptor table has room for 256 idt's,
+ * the global descriptor table is dependent on the number
+ * of tasks we can have..
+ */
+#define IDT_ENTRIES 256
+
+#endif
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h
new file mode 100644
index 0000000..ea563da
--- /dev/null
+++ b/include/asm-i386/semaphore.h
@@ -0,0 +1,194 @@
+#ifndef _I386_SEMAPHORE_H
+#define _I386_SEMAPHORE_H
+
+#include <linux/linkage.h>
+
+#ifdef __KERNEL__
+
+/*
+ * SMP- and interrupt-safe semaphores..
+ *
+ * (C) Copyright 1996 Linus Torvalds
+ *
+ * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
+ *                     the original code and to make semaphore waits
+ *                     interruptible so that processes waiting on
+ *                     semaphores can be killed.
+ * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
+ *		       functions in asm/sempahore-helper.h while fixing a
+ *		       potential and subtle race discovered by Ulrich Schmid
+ *		       in down_interruptible(). Since I started to play here I
+ *		       also implemented the `trylock' semaphore operation.
+ *          1999-07-02 Artur Skawina <skawina@geocities.com>
+ *                     Optimized "0(ecx)" -> "(ecx)" (the assembler does not
+ *                     do this). Changed calling sequences from push/jmp to
+ *                     traditional call/ret.
+ * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
+ *		       Some hacks to ensure compatibility with recent
+ *		       GCC snapshots, to avoid stack corruption when compiling
+ *		       with -fomit-frame-pointer. It's not sure if this will
+ *		       be fixed in GCC, as our previous implementation was a
+ *		       bit dubious.
+ *
+ * If you would like to see an analysis of this implementation, please
+ * ftp to gcom.com and download the file
+ * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
+ *
+ */
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <linux/wait.h>
+#include <linux/rwsem.h>
+
+struct semaphore {
+	atomic_t count;
+	int sleepers;
+	wait_queue_head_t wait;
+};
+
+
+#define __SEMAPHORE_INITIALIZER(name, n)				\
+{									\
+	.count		= ATOMIC_INIT(n),				\
+	.sleepers	= 0,						\
+	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
+}
+
+#define __MUTEX_INITIALIZER(name) \
+	__SEMAPHORE_INITIALIZER(name,1)
+
+#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
+	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
+
+#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
+#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
+
+static inline void sema_init (struct semaphore *sem, int val)
+{
+/*
+ *	*sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
+ *
+ * i'd rather use the more flexible initialization above, but sadly
+ * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
+ */
+	atomic_set(&sem->count, val);
+	sem->sleepers = 0;
+	init_waitqueue_head(&sem->wait);
+}
+
+static inline void init_MUTEX (struct semaphore *sem)
+{
+	sema_init(sem, 1);
+}
+
+static inline void init_MUTEX_LOCKED (struct semaphore *sem)
+{
+	sema_init(sem, 0);
+}
+
+fastcall void __down_failed(void /* special register calling convention */);
+fastcall int  __down_failed_interruptible(void  /* params in registers */);
+fastcall int  __down_failed_trylock(void  /* params in registers */);
+fastcall void __up_wakeup(void /* special register calling convention */);
+
+/*
+ * This is ugly, but we want the default case to fall through.
+ * "__down_failed" is a special asm handler that calls the C
+ * routine that actually waits. See arch/i386/kernel/semaphore.c
+ */
+static inline void down(struct semaphore * sem)
+{
+	might_sleep();
+	__asm__ __volatile__(
+		"# atomic down operation\n\t"
+		LOCK "decl %0\n\t"     /* --sem->count */
+		"js 2f\n"
+		"1:\n"
+		LOCK_SECTION_START("")
+		"2:\tlea %0,%%eax\n\t"
+		"call __down_failed\n\t"
+		"jmp 1b\n"
+		LOCK_SECTION_END
+		:"=m" (sem->count)
+		:
+		:"memory","ax");
+}
+
+/*
+ * Interruptible try to acquire a semaphore.  If we obtained
+ * it, return zero.  If we were interrupted, returns -EINTR
+ */
+static inline int down_interruptible(struct semaphore * sem)
+{
+	int result;
+
+	might_sleep();
+	__asm__ __volatile__(
+		"# atomic interruptible down operation\n\t"
+		LOCK "decl %1\n\t"     /* --sem->count */
+		"js 2f\n\t"
+		"xorl %0,%0\n"
+		"1:\n"
+		LOCK_SECTION_START("")
+		"2:\tlea %1,%%eax\n\t"
+		"call __down_failed_interruptible\n\t"
+		"jmp 1b\n"
+		LOCK_SECTION_END
+		:"=a" (result), "=m" (sem->count)
+		:
+		:"memory");
+	return result;
+}
+
+/*
+ * Non-blockingly attempt to down() a semaphore.
+ * Returns zero if we acquired it
+ */
+static inline int down_trylock(struct semaphore * sem)
+{
+	int result;
+
+	__asm__ __volatile__(
+		"# atomic interruptible down operation\n\t"
+		LOCK "decl %1\n\t"     /* --sem->count */
+		"js 2f\n\t"
+		"xorl %0,%0\n"
+		"1:\n"
+		LOCK_SECTION_START("")
+		"2:\tlea %1,%%eax\n\t"
+		"call __down_failed_trylock\n\t"
+		"jmp 1b\n"
+		LOCK_SECTION_END
+		:"=a" (result), "=m" (sem->count)
+		:
+		:"memory");
+	return result;
+}
+
+/*
+ * Note! This is subtle. We jump to wake people up only if
+ * the semaphore was negative (== somebody was waiting on it).
+ * The default case (no contention) will result in NO
+ * jumps for both down() and up().
+ */
+static inline void up(struct semaphore * sem)
+{
+	__asm__ __volatile__(
+		"# atomic up operation\n\t"
+		LOCK "incl %0\n\t"     /* ++sem->count */
+		"jle 2f\n"
+		"1:\n"
+		LOCK_SECTION_START("")
+		"2:\tlea %0,%%eax\n\t"
+		"call __up_wakeup\n\t"
+		"jmp 1b\n"
+		LOCK_SECTION_END
+		".subsection 0\n"
+		:"=m" (sem->count)
+		:
+		:"memory","ax");
+}
+
+#endif
+#endif
diff --git a/include/asm-i386/sembuf.h b/include/asm-i386/sembuf.h
new file mode 100644
index 0000000..3238351
--- /dev/null
+++ b/include/asm-i386/sembuf.h
@@ -0,0 +1,25 @@
+#ifndef _I386_SEMBUF_H
+#define _I386_SEMBUF_H
+
+/* 
+ * The semid64_ds structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct semid64_ds {
+	struct ipc64_perm sem_perm;		/* permissions .. see ipc.h */
+	__kernel_time_t	sem_otime;		/* last semop time */
+	unsigned long	__unused1;
+	__kernel_time_t	sem_ctime;		/* last change time */
+	unsigned long	__unused2;
+	unsigned long	sem_nsems;		/* no. of semaphores in array */
+	unsigned long	__unused3;
+	unsigned long	__unused4;
+};
+
+#endif /* _I386_SEMBUF_H */
diff --git a/include/asm-i386/serial.h b/include/asm-i386/serial.h
new file mode 100644
index 0000000..21ddecc
--- /dev/null
+++ b/include/asm-i386/serial.h
@@ -0,0 +1,130 @@
+/*
+ * include/asm-i386/serial.h
+ */
+
+#include <linux/config.h>
+
+/*
+ * This assumes you have a 1.8432 MHz clock for your UART.
+ *
+ * It'd be nice if someone built a serial card with a 24.576 MHz
+ * clock, since the 16550A is capable of handling a top speed of 1.5
+ * megabits/second; but this requires the faster clock.
+ */
+#define BASE_BAUD ( 1843200 / 16 )
+
+/* Standard COM flags (except for COM4, because of the 8514 problem) */
+#ifdef CONFIG_SERIAL_DETECT_IRQ
+#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
+#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
+#else
+#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
+#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
+#endif
+
+#ifdef CONFIG_SERIAL_MANY_PORTS
+#define FOURPORT_FLAGS ASYNC_FOURPORT
+#define ACCENT_FLAGS 0
+#define BOCA_FLAGS 0
+#define HUB6_FLAGS 0
+#endif
+
+#define MCA_COM_FLAGS	(STD_COM_FLAGS|ASYNC_BOOT_ONLYMCA)
+
+/*
+ * The following define the access methods for the HUB6 card. All
+ * access is through two ports for all 24 possible chips. The card is
+ * selected through the high 2 bits, the port on that card with the
+ * "middle" 3 bits, and the register on that port with the bottom
+ * 3 bits.
+ *
+ * While the access port and interrupt is configurable, the default
+ * port locations are 0x302 for the port control register, and 0x303
+ * for the data read/write register. Normally, the interrupt is at irq3
+ * but can be anything from 3 to 7 inclusive. Note that using 3 will
+ * require disabling com2.
+ */
+
+#define C_P(card,port) (((card)<<6|(port)<<3) + 1)
+
+#define STD_SERIAL_PORT_DEFNS			\
+	/* UART CLK   PORT IRQ     FLAGS        */			\
+	{ 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS },	/* ttyS0 */	\
+	{ 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS },	/* ttyS1 */	\
+	{ 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS },	/* ttyS2 */	\
+	{ 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS },	/* ttyS3 */
+
+
+#ifdef CONFIG_SERIAL_MANY_PORTS
+#define EXTRA_SERIAL_PORT_DEFNS			\
+	{ 0, BASE_BAUD, 0x1A0, 9, FOURPORT_FLAGS }, 	/* ttyS4 */	\
+	{ 0, BASE_BAUD, 0x1A8, 9, FOURPORT_FLAGS },	/* ttyS5 */	\
+	{ 0, BASE_BAUD, 0x1B0, 9, FOURPORT_FLAGS },	/* ttyS6 */	\
+	{ 0, BASE_BAUD, 0x1B8, 9, FOURPORT_FLAGS },	/* ttyS7 */	\
+	{ 0, BASE_BAUD, 0x2A0, 5, FOURPORT_FLAGS },	/* ttyS8 */	\
+	{ 0, BASE_BAUD, 0x2A8, 5, FOURPORT_FLAGS },	/* ttyS9 */	\
+	{ 0, BASE_BAUD, 0x2B0, 5, FOURPORT_FLAGS },	/* ttyS10 */	\
+	{ 0, BASE_BAUD, 0x2B8, 5, FOURPORT_FLAGS },	/* ttyS11 */	\
+	{ 0, BASE_BAUD, 0x330, 4, ACCENT_FLAGS },	/* ttyS12 */	\
+	{ 0, BASE_BAUD, 0x338, 4, ACCENT_FLAGS },	/* ttyS13 */	\
+	{ 0, BASE_BAUD, 0x000, 0, 0 },	/* ttyS14 (spare) */		\
+	{ 0, BASE_BAUD, 0x000, 0, 0 },	/* ttyS15 (spare) */		\
+	{ 0, BASE_BAUD, 0x100, 12, BOCA_FLAGS },	/* ttyS16 */	\
+	{ 0, BASE_BAUD, 0x108, 12, BOCA_FLAGS },	/* ttyS17 */	\
+	{ 0, BASE_BAUD, 0x110, 12, BOCA_FLAGS },	/* ttyS18 */	\
+	{ 0, BASE_BAUD, 0x118, 12, BOCA_FLAGS },	/* ttyS19 */	\
+	{ 0, BASE_BAUD, 0x120, 12, BOCA_FLAGS },	/* ttyS20 */	\
+	{ 0, BASE_BAUD, 0x128, 12, BOCA_FLAGS },	/* ttyS21 */	\
+	{ 0, BASE_BAUD, 0x130, 12, BOCA_FLAGS },	/* ttyS22 */	\
+	{ 0, BASE_BAUD, 0x138, 12, BOCA_FLAGS },	/* ttyS23 */	\
+	{ 0, BASE_BAUD, 0x140, 12, BOCA_FLAGS },	/* ttyS24 */	\
+	{ 0, BASE_BAUD, 0x148, 12, BOCA_FLAGS },	/* ttyS25 */	\
+	{ 0, BASE_BAUD, 0x150, 12, BOCA_FLAGS },	/* ttyS26 */	\
+	{ 0, BASE_BAUD, 0x158, 12, BOCA_FLAGS },	/* ttyS27 */	\
+	{ 0, BASE_BAUD, 0x160, 12, BOCA_FLAGS },	/* ttyS28 */	\
+	{ 0, BASE_BAUD, 0x168, 12, BOCA_FLAGS },	/* ttyS29 */	\
+	{ 0, BASE_BAUD, 0x170, 12, BOCA_FLAGS },	/* ttyS30 */	\
+	{ 0, BASE_BAUD, 0x178, 12, BOCA_FLAGS },	/* ttyS31 */
+#else
+#define EXTRA_SERIAL_PORT_DEFNS
+#endif
+
+/* You can have up to four HUB6's in the system, but I've only
+ * included two cards here for a total of twelve ports.
+ */
+#if (defined(CONFIG_HUB6) && defined(CONFIG_SERIAL_MANY_PORTS))
+#define HUB6_SERIAL_PORT_DFNS		\
+	{ 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,0) },  /* ttyS32 */	\
+	{ 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,1) },  /* ttyS33 */	\
+	{ 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,2) },  /* ttyS34 */	\
+	{ 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,3) },  /* ttyS35 */	\
+	{ 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,4) },  /* ttyS36 */	\
+	{ 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,5) },  /* ttyS37 */	\
+	{ 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,0) },  /* ttyS38 */	\
+	{ 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,1) },  /* ttyS39 */	\
+	{ 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,2) },  /* ttyS40 */	\
+	{ 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,3) },  /* ttyS41 */	\
+	{ 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,4) },  /* ttyS42 */	\
+	{ 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,5) },  /* ttyS43 */
+#else
+#define HUB6_SERIAL_PORT_DFNS
+#endif
+
+#ifdef CONFIG_MCA
+#define MCA_SERIAL_PORT_DFNS			\
+	{ 0, BASE_BAUD, 0x3220, 3, MCA_COM_FLAGS },	\
+	{ 0, BASE_BAUD, 0x3228, 3, MCA_COM_FLAGS },	\
+	{ 0, BASE_BAUD, 0x4220, 3, MCA_COM_FLAGS },	\
+	{ 0, BASE_BAUD, 0x4228, 3, MCA_COM_FLAGS },	\
+	{ 0, BASE_BAUD, 0x5220, 3, MCA_COM_FLAGS },	\
+	{ 0, BASE_BAUD, 0x5228, 3, MCA_COM_FLAGS },
+#else
+#define MCA_SERIAL_PORT_DFNS
+#endif
+
+#define SERIAL_PORT_DFNS		\
+	STD_SERIAL_PORT_DEFNS		\
+	EXTRA_SERIAL_PORT_DEFNS		\
+	HUB6_SERIAL_PORT_DFNS		\
+	MCA_SERIAL_PORT_DFNS
+
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h
new file mode 100644
index 0000000..8814b54
--- /dev/null
+++ b/include/asm-i386/setup.h
@@ -0,0 +1,66 @@
+/*
+ *	Just a place holder. We don't want to have to test x86 before
+ *	we include stuff
+ */
+
+#ifndef _i386_SETUP_H
+#define _i386_SETUP_H
+
+#define PFN_UP(x)	(((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+#define PFN_DOWN(x)	((x) >> PAGE_SHIFT)
+#define PFN_PHYS(x)	((x) << PAGE_SHIFT)
+
+/*
+ * Reserved space for vmalloc and iomap - defined in asm/page.h
+ */
+#define MAXMEM_PFN	PFN_DOWN(MAXMEM)
+#define MAX_NONPAE_PFN	(1 << 20)
+
+#define PARAM_SIZE 2048
+#define COMMAND_LINE_SIZE 256
+
+#define OLD_CL_MAGIC_ADDR	0x90020
+#define OLD_CL_MAGIC		0xA33F
+#define OLD_CL_BASE_ADDR	0x90000
+#define OLD_CL_OFFSET		0x90022
+#define NEW_CL_POINTER		0x228	/* Relative to real mode data */
+
+#ifndef __ASSEMBLY__
+/*
+ * This is set up by the setup-routine at boot-time
+ */
+extern unsigned char boot_params[PARAM_SIZE];
+
+#define PARAM	(boot_params)
+#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
+#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
+#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
+#define E820_MAP_NR (*(char*) (PARAM+E820NR))
+#define E820_MAP    ((struct e820entry *) (PARAM+E820MAP))
+#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
+#define IST_INFO   (*(struct ist_info *) (PARAM+0x60))
+#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
+#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
+#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
+#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
+#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
+#define EFI_MEMMAP ((efi_memory_desc_t *) *((unsigned long *)(PARAM+0x1d0)))
+#define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4)))
+#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
+#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
+#define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
+#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
+#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
+#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
+#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
+#define INITRD_START (*(unsigned long *) (PARAM+0x218))
+#define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c))
+#define EDID_INFO   (*(struct edid_info *) (PARAM+0x140))
+#define EDD_NR     (*(unsigned char *) (PARAM+EDDNR))
+#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
+#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
+#define EDD_BUF     ((struct edd_info *) (PARAM+EDDBUF))
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _i386_SETUP_H */
diff --git a/include/asm-i386/shmbuf.h b/include/asm-i386/shmbuf.h
new file mode 100644
index 0000000..d1cdc3c
--- /dev/null
+++ b/include/asm-i386/shmbuf.h
@@ -0,0 +1,42 @@
+#ifndef _I386_SHMBUF_H
+#define _I386_SHMBUF_H
+
+/* 
+ * The shmid64_ds structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct shmid64_ds {
+	struct ipc64_perm	shm_perm;	/* operation perms */
+	size_t			shm_segsz;	/* size of segment (bytes) */
+	__kernel_time_t		shm_atime;	/* last attach time */
+	unsigned long		__unused1;
+	__kernel_time_t		shm_dtime;	/* last detach time */
+	unsigned long		__unused2;
+	__kernel_time_t		shm_ctime;	/* last change time */
+	unsigned long		__unused3;
+	__kernel_pid_t		shm_cpid;	/* pid of creator */
+	__kernel_pid_t		shm_lpid;	/* pid of last operator */
+	unsigned long		shm_nattch;	/* no. of current attaches */
+	unsigned long		__unused4;
+	unsigned long		__unused5;
+};
+
+struct shminfo64 {
+	unsigned long	shmmax;
+	unsigned long	shmmin;
+	unsigned long	shmmni;
+	unsigned long	shmseg;
+	unsigned long	shmall;
+	unsigned long	__unused1;
+	unsigned long	__unused2;
+	unsigned long	__unused3;
+	unsigned long	__unused4;
+};
+
+#endif /* _I386_SHMBUF_H */
diff --git a/include/asm-i386/shmparam.h b/include/asm-i386/shmparam.h
new file mode 100644
index 0000000..786243a
--- /dev/null
+++ b/include/asm-i386/shmparam.h
@@ -0,0 +1,6 @@
+#ifndef _ASMI386_SHMPARAM_H
+#define _ASMI386_SHMPARAM_H
+
+#define	SHMLBA PAGE_SIZE		 /* attach addr a multiple of this */
+
+#endif /* _ASMI386_SHMPARAM_H */
diff --git a/include/asm-i386/sigcontext.h b/include/asm-i386/sigcontext.h
new file mode 100644
index 0000000..aaef089
--- /dev/null
+++ b/include/asm-i386/sigcontext.h
@@ -0,0 +1,85 @@
+#ifndef _ASMi386_SIGCONTEXT_H
+#define _ASMi386_SIGCONTEXT_H
+
+#include <linux/compiler.h>
+
+/*
+ * As documented in the iBCS2 standard..
+ *
+ * The first part of "struct _fpstate" is just the normal i387
+ * hardware setup, the extra "status" word is used to save the
+ * coprocessor status word before entering the handler.
+ *
+ * Pentium III FXSR, SSE support
+ *	Gareth Hughes <gareth@valinux.com>, May 2000
+ *
+ * The FPU state data structure has had to grow to accommodate the
+ * extended FPU state required by the Streaming SIMD Extensions.
+ * There is no documented standard to accomplish this at the moment.
+ */
+struct _fpreg {
+	unsigned short significand[4];
+	unsigned short exponent;
+};
+
+struct _fpxreg {
+	unsigned short significand[4];
+	unsigned short exponent;
+	unsigned short padding[3];
+};
+
+struct _xmmreg {
+	unsigned long element[4];
+};
+
+struct _fpstate {
+	/* Regular FPU environment */
+	unsigned long 	cw;
+	unsigned long	sw;
+	unsigned long	tag;
+	unsigned long	ipoff;
+	unsigned long	cssel;
+	unsigned long	dataoff;
+	unsigned long	datasel;
+	struct _fpreg	_st[8];
+	unsigned short	status;
+	unsigned short	magic;		/* 0xffff = regular FPU data only */
+
+	/* FXSR FPU environment */
+	unsigned long	_fxsr_env[6];	/* FXSR FPU env is ignored */
+	unsigned long	mxcsr;
+	unsigned long	reserved;
+	struct _fpxreg	_fxsr_st[8];	/* FXSR FPU reg data is ignored */
+	struct _xmmreg	_xmm[8];
+	unsigned long	padding[56];
+};
+
+#define X86_FXSR_MAGIC		0x0000
+
+struct sigcontext {
+	unsigned short gs, __gsh;
+	unsigned short fs, __fsh;
+	unsigned short es, __esh;
+	unsigned short ds, __dsh;
+	unsigned long edi;
+	unsigned long esi;
+	unsigned long ebp;
+	unsigned long esp;
+	unsigned long ebx;
+	unsigned long edx;
+	unsigned long ecx;
+	unsigned long eax;
+	unsigned long trapno;
+	unsigned long err;
+	unsigned long eip;
+	unsigned short cs, __csh;
+	unsigned long eflags;
+	unsigned long esp_at_signal;
+	unsigned short ss, __ssh;
+	struct _fpstate __user * fpstate;
+	unsigned long oldmask;
+	unsigned long cr2;
+};
+
+
+#endif
diff --git a/include/asm-i386/siginfo.h b/include/asm-i386/siginfo.h
new file mode 100644
index 0000000..fe18f98
--- /dev/null
+++ b/include/asm-i386/siginfo.h
@@ -0,0 +1,6 @@
+#ifndef _I386_SIGINFO_H
+#define _I386_SIGINFO_H
+
+#include <asm-generic/siginfo.h>
+
+#endif
diff --git a/include/asm-i386/signal.h b/include/asm-i386/signal.h
new file mode 100644
index 0000000..7ef343b
--- /dev/null
+++ b/include/asm-i386/signal.h
@@ -0,0 +1,237 @@
+#ifndef _ASMi386_SIGNAL_H
+#define _ASMi386_SIGNAL_H
+
+#include <linux/types.h>
+#include <linux/linkage.h>
+#include <linux/time.h>
+#include <linux/compiler.h>
+
+/* Avoid too many header ordering problems.  */
+struct siginfo;
+
+#ifdef __KERNEL__
+/* Most things should be clean enough to redefine this at will, if care
+   is taken to make libc match.  */
+
+#define _NSIG		64
+#define _NSIG_BPW	32
+#define _NSIG_WORDS	(_NSIG / _NSIG_BPW)
+
+typedef unsigned long old_sigset_t;		/* at least 32 bits */
+
+typedef struct {
+	unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#else
+/* Here we must cater to libcs that poke about in kernel headers.  */
+
+#define NSIG		32
+typedef unsigned long sigset_t;
+
+#endif /* __KERNEL__ */
+
+#define SIGHUP		 1
+#define SIGINT		 2
+#define SIGQUIT		 3
+#define SIGILL		 4
+#define SIGTRAP		 5
+#define SIGABRT		 6
+#define SIGIOT		 6
+#define SIGBUS		 7
+#define SIGFPE		 8
+#define SIGKILL		 9
+#define SIGUSR1		10
+#define SIGSEGV		11
+#define SIGUSR2		12
+#define SIGPIPE		13
+#define SIGALRM		14
+#define SIGTERM		15
+#define SIGSTKFLT	16
+#define SIGCHLD		17
+#define SIGCONT		18
+#define SIGSTOP		19
+#define SIGTSTP		20
+#define SIGTTIN		21
+#define SIGTTOU		22
+#define SIGURG		23
+#define SIGXCPU		24
+#define SIGXFSZ		25
+#define SIGVTALRM	26
+#define SIGPROF		27
+#define SIGWINCH	28
+#define SIGIO		29
+#define SIGPOLL		SIGIO
+/*
+#define SIGLOST		29
+*/
+#define SIGPWR		30
+#define SIGSYS		31
+#define	SIGUNUSED	31
+
+/* These should not be considered constants from userland.  */
+#define SIGRTMIN	32
+#define SIGRTMAX	_NSIG
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP	0x00000001u
+#define SA_NOCLDWAIT	0x00000002u
+#define SA_SIGINFO	0x00000004u
+#define SA_ONSTACK	0x08000000u
+#define SA_RESTART	0x10000000u
+#define SA_NODEFER	0x40000000u
+#define SA_RESETHAND	0x80000000u
+
+#define SA_NOMASK	SA_NODEFER
+#define SA_ONESHOT	SA_RESETHAND
+#define SA_INTERRUPT	0x20000000 /* dummy -- ignored */
+
+#define SA_RESTORER	0x04000000
+
+/* 
+ * sigaltstack controls
+ */
+#define SS_ONSTACK	1
+#define SS_DISABLE	2
+
+#define MINSIGSTKSZ	2048
+#define SIGSTKSZ	8192
+
+#ifdef __KERNEL__
+
+/*
+ * These values of sa_flags are used only by the kernel as part of the
+ * irq handling routines.
+ *
+ * SA_INTERRUPT is also used by the irq handling routines.
+ * SA_SHIRQ is for shared interrupt support on PCI and EISA.
+ */
+#define SA_PROBE		SA_ONESHOT
+#define SA_SAMPLE_RANDOM	SA_RESTART
+#define SA_SHIRQ		0x04000000
+#endif
+
+#define SIG_BLOCK          0	/* for blocking signals */
+#define SIG_UNBLOCK        1	/* for unblocking signals */
+#define SIG_SETMASK        2	/* for setting the signal mask */
+
+/* Type of a signal handler.  */
+typedef void __signalfn_t(int);
+typedef __signalfn_t __user *__sighandler_t;
+
+typedef void __restorefn_t(void);
+typedef __restorefn_t __user *__sigrestore_t;
+
+#define SIG_DFL	((__sighandler_t)0)	/* default signal handling */
+#define SIG_IGN	((__sighandler_t)1)	/* ignore signal */
+#define SIG_ERR	((__sighandler_t)-1)	/* error return from signal */
+
+#ifdef __KERNEL__
+struct old_sigaction {
+	__sighandler_t sa_handler;
+	old_sigset_t sa_mask;
+	unsigned long sa_flags;
+	__sigrestore_t sa_restorer;
+};
+
+struct sigaction {
+	__sighandler_t sa_handler;
+	unsigned long sa_flags;
+	__sigrestore_t sa_restorer;
+	sigset_t sa_mask;		/* mask last for extensibility */
+};
+
+struct k_sigaction {
+	struct sigaction sa;
+};
+#else
+/* Here we must cater to libcs that poke about in kernel headers.  */
+
+struct sigaction {
+	union {
+	  __sighandler_t _sa_handler;
+	  void (*_sa_sigaction)(int, struct siginfo *, void *);
+	} _u;
+	sigset_t sa_mask;
+	unsigned long sa_flags;
+	void (*sa_restorer)(void);
+};
+
+#define sa_handler	_u._sa_handler
+#define sa_sigaction	_u._sa_sigaction
+
+#endif /* __KERNEL__ */
+
+typedef struct sigaltstack {
+	void __user *ss_sp;
+	int ss_flags;
+	size_t ss_size;
+} stack_t;
+
+#ifdef __KERNEL__
+#include <asm/sigcontext.h>
+
+#define __HAVE_ARCH_SIG_BITOPS
+
+static __inline__ void sigaddset(sigset_t *set, int _sig)
+{
+	__asm__("btsl %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc");
+}
+
+static __inline__ void sigdelset(sigset_t *set, int _sig)
+{
+	__asm__("btrl %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc");
+}
+
+static __inline__ int __const_sigismember(sigset_t *set, int _sig)
+{
+	unsigned long sig = _sig - 1;
+	return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW));
+}
+
+static __inline__ int __gen_sigismember(sigset_t *set, int _sig)
+{
+	int ret;
+	__asm__("btl %2,%1\n\tsbbl %0,%0"
+		: "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
+	return ret;
+}
+
+#define sigismember(set,sig)			\
+	(__builtin_constant_p(sig) ?		\
+	 __const_sigismember((set),(sig)) :	\
+	 __gen_sigismember((set),(sig)))
+
+static __inline__ int sigfindinword(unsigned long word)
+{
+	__asm__("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc");
+	return word;
+}
+
+struct pt_regs;
+extern int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset));
+
+#define ptrace_signal_deliver(regs, cookie)		\
+	do {						\
+		if (current->ptrace & PT_DTRACE) {	\
+			current->ptrace &= ~PT_DTRACE;	\
+			(regs)->eflags &= ~TF_MASK;	\
+		}					\
+	} while (0)
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
new file mode 100644
index 0000000..dd14912
--- /dev/null
+++ b/include/asm-i386/smp.h
@@ -0,0 +1,90 @@
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+/*
+ * We need the APIC definitions automatically as part of 'smp.h'
+ */
+#ifndef __ASSEMBLY__
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+#ifndef __ASSEMBLY__
+#include <asm/fixmap.h>
+#include <asm/bitops.h>
+#include <asm/mpspec.h>
+#ifdef CONFIG_X86_IO_APIC
+#include <asm/io_apic.h>
+#endif
+#include <asm/apic.h>
+#endif
+#endif
+
+#define BAD_APICID 0xFFu
+#ifdef CONFIG_SMP
+#ifndef __ASSEMBLY__
+
+/*
+ * Private routines/data
+ */
+ 
+extern void smp_alloc_memory(void);
+extern int pic_mode;
+extern int smp_num_siblings;
+extern cpumask_t cpu_sibling_map[];
+
+extern void smp_flush_tlb(void);
+extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
+extern void smp_invalidate_rcv(void);		/* Process an NMI */
+extern void (*mtrr_hook) (void);
+extern void zap_low_mappings (void);
+
+#define MAX_APICID 256
+extern u8 x86_cpu_to_apicid[];
+
+/*
+ * This function is needed by all SMP systems. It must _always_ be valid
+ * from the initial startup. We map APIC_BASE very early in page_setup(),
+ * so this is correct in the x86 case.
+ */
+#define __smp_processor_id() (current_thread_info()->cpu)
+
+extern cpumask_t cpu_callout_map;
+extern cpumask_t cpu_callin_map;
+#define cpu_possible_map cpu_callout_map
+
+/* We don't mark CPUs online until __cpu_up(), so we need another measure */
+static inline int num_booting_cpus(void)
+{
+	return cpus_weight(cpu_callout_map);
+}
+
+#ifdef CONFIG_X86_LOCAL_APIC
+
+#ifdef APIC_DEFINITION
+extern int hard_smp_processor_id(void);
+#else
+#include <mach_apicdef.h>
+static inline int hard_smp_processor_id(void)
+{
+	/* we don't want to mark this access volatile - bad code generation */
+	return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
+}
+#endif
+
+static __inline int logical_smp_processor_id(void)
+{
+	/* we don't want to mark this access volatile - bad code generation */
+	return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
+}
+
+#endif
+#endif /* !__ASSEMBLY__ */
+
+#define NO_PROC_ID		0xFF		/* No processor magic marker */
+
+#endif
+#endif
diff --git a/include/asm-i386/socket.h b/include/asm-i386/socket.h
new file mode 100644
index 0000000..07f6b38
--- /dev/null
+++ b/include/asm-i386/socket.h
@@ -0,0 +1,50 @@
+#ifndef _ASM_SOCKET_H
+#define _ASM_SOCKET_H
+
+#include <asm/sockios.h>
+
+/* For setsockopt(2) */
+#define SOL_SOCKET	1
+
+#define SO_DEBUG	1
+#define SO_REUSEADDR	2
+#define SO_TYPE		3
+#define SO_ERROR	4
+#define SO_DONTROUTE	5
+#define SO_BROADCAST	6
+#define SO_SNDBUF	7
+#define SO_RCVBUF	8
+#define SO_KEEPALIVE	9
+#define SO_OOBINLINE	10
+#define SO_NO_CHECK	11
+#define SO_PRIORITY	12
+#define SO_LINGER	13
+#define SO_BSDCOMPAT	14
+/* To add :#define SO_REUSEPORT 15 */
+#define SO_PASSCRED	16
+#define SO_PEERCRED	17
+#define SO_RCVLOWAT	18
+#define SO_SNDLOWAT	19
+#define SO_RCVTIMEO	20
+#define SO_SNDTIMEO	21
+
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+#define SO_SECURITY_AUTHENTICATION		22
+#define SO_SECURITY_ENCRYPTION_TRANSPORT	23
+#define SO_SECURITY_ENCRYPTION_NETWORK		24
+
+#define SO_BINDTODEVICE	25
+
+/* Socket filtering */
+#define SO_ATTACH_FILTER        26
+#define SO_DETACH_FILTER        27
+
+#define SO_PEERNAME		28
+#define SO_TIMESTAMP		29
+#define SCM_TIMESTAMP		SO_TIMESTAMP
+
+#define SO_ACCEPTCONN		30
+
+#define SO_PEERSEC		31
+
+#endif /* _ASM_SOCKET_H */
diff --git a/include/asm-i386/sockios.h b/include/asm-i386/sockios.h
new file mode 100644
index 0000000..6b747f8
--- /dev/null
+++ b/include/asm-i386/sockios.h
@@ -0,0 +1,12 @@
+#ifndef __ARCH_I386_SOCKIOS__
+#define __ARCH_I386_SOCKIOS__
+
+/* Socket-level I/O control calls. */
+#define FIOSETOWN 	0x8901
+#define SIOCSPGRP	0x8902
+#define FIOGETOWN	0x8903
+#define SIOCGPGRP	0x8904
+#define SIOCATMARK	0x8905
+#define SIOCGSTAMP	0x8906		/* Get stamp */
+
+#endif
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
new file mode 100644
index 0000000..f9ff31f
--- /dev/null
+++ b/include/asm-i386/spinlock.h
@@ -0,0 +1,250 @@
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#include <asm/atomic.h>
+#include <asm/rwlock.h>
+#include <asm/page.h>
+#include <linux/config.h>
+#include <linux/compiler.h>
+
+asmlinkage int printk(const char * fmt, ...)
+	__attribute__ ((format (printf, 1, 2)));
+
+/*
+ * Your basic SMP spinlocks, allowing only a single CPU anywhere
+ */
+
+typedef struct {
+	volatile unsigned int slock;
+#ifdef CONFIG_DEBUG_SPINLOCK
+	unsigned magic;
+#endif
+#ifdef CONFIG_PREEMPT
+	unsigned int break_lock;
+#endif
+} spinlock_t;
+
+#define SPINLOCK_MAGIC	0xdead4ead
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define SPINLOCK_MAGIC_INIT	, SPINLOCK_MAGIC
+#else
+#define SPINLOCK_MAGIC_INIT	/* */
+#endif
+
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
+
+#define spin_lock_init(x)	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
+
+/*
+ * Simple spin lock operations.  There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+#define spin_is_locked(x)	(*(volatile signed char *)(&(x)->slock) <= 0)
+#define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
+
+#define spin_lock_string \
+	"\n1:\t" \
+	"lock ; decb %0\n\t" \
+	"jns 3f\n" \
+	"2:\t" \
+	"rep;nop\n\t" \
+	"cmpb $0,%0\n\t" \
+	"jle 2b\n\t" \
+	"jmp 1b\n" \
+	"3:\n\t"
+
+#define spin_lock_string_flags \
+	"\n1:\t" \
+	"lock ; decb %0\n\t" \
+	"jns 4f\n\t" \
+	"2:\t" \
+	"testl $0x200, %1\n\t" \
+	"jz 3f\n\t" \
+	"sti\n\t" \
+	"3:\t" \
+	"rep;nop\n\t" \
+	"cmpb $0, %0\n\t" \
+	"jle 3b\n\t" \
+	"cli\n\t" \
+	"jmp 1b\n" \
+	"4:\n\t"
+
+/*
+ * This works. Despite all the confusion.
+ * (except on PPro SMP or if we are using OOSTORE)
+ * (PPro errata 66, 92)
+ */
+
+#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
+
+#define spin_unlock_string \
+	"movb $1,%0" \
+		:"=m" (lock->slock) : : "memory"
+
+
+static inline void _raw_spin_unlock(spinlock_t *lock)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+	BUG_ON(lock->magic != SPINLOCK_MAGIC);
+	BUG_ON(!spin_is_locked(lock));
+#endif
+	__asm__ __volatile__(
+		spin_unlock_string
+	);
+}
+
+#else
+
+#define spin_unlock_string \
+	"xchgb %b0, %1" \
+		:"=q" (oldval), "=m" (lock->slock) \
+		:"0" (oldval) : "memory"
+
+static inline void _raw_spin_unlock(spinlock_t *lock)
+{
+	char oldval = 1;
+#ifdef CONFIG_DEBUG_SPINLOCK
+	BUG_ON(lock->magic != SPINLOCK_MAGIC);
+	BUG_ON(!spin_is_locked(lock));
+#endif
+	__asm__ __volatile__(
+		spin_unlock_string
+	);
+}
+
+#endif
+
+static inline int _raw_spin_trylock(spinlock_t *lock)
+{
+	char oldval;
+	__asm__ __volatile__(
+		"xchgb %b0,%1"
+		:"=q" (oldval), "=m" (lock->slock)
+		:"0" (0) : "memory");
+	return oldval > 0;
+}
+
+static inline void _raw_spin_lock(spinlock_t *lock)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+	if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
+		printk("eip: %p\n", __builtin_return_address(0));
+		BUG();
+	}
+#endif
+	__asm__ __volatile__(
+		spin_lock_string
+		:"=m" (lock->slock) : : "memory");
+}
+
+static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+	if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
+		printk("eip: %p\n", __builtin_return_address(0));
+		BUG();
+	}
+#endif
+	__asm__ __volatile__(
+		spin_lock_string_flags
+		:"=m" (lock->slock) : "r" (flags) : "memory");
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+typedef struct {
+	volatile unsigned int lock;
+#ifdef CONFIG_DEBUG_SPINLOCK
+	unsigned magic;
+#endif
+#ifdef CONFIG_PREEMPT
+	unsigned int break_lock;
+#endif
+} rwlock_t;
+
+#define RWLOCK_MAGIC	0xdeaf1eed
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define RWLOCK_MAGIC_INIT	, RWLOCK_MAGIC
+#else
+#define RWLOCK_MAGIC_INIT	/* */
+#endif
+
+#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
+
+#define rwlock_init(x)	do { *(x) = RW_LOCK_UNLOCKED; } while(0)
+
+/**
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define read_can_lock(x) ((int)(x)->lock > 0)
+
+/**
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+
+/*
+ * On x86, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "contended" bit.
+ *
+ * The inline assembly is non-obvious. Think about it.
+ *
+ * Changed to use the same technique as rw semaphores.  See
+ * semaphore.h for details.  -ben
+ */
+/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
+
+static inline void _raw_read_lock(rwlock_t *rw)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+	BUG_ON(rw->magic != RWLOCK_MAGIC);
+#endif
+	__build_read_lock(rw, "__read_lock_failed");
+}
+
+static inline void _raw_write_lock(rwlock_t *rw)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+	BUG_ON(rw->magic != RWLOCK_MAGIC);
+#endif
+	__build_write_lock(rw, "__write_lock_failed");
+}
+
+#define _raw_read_unlock(rw)		asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
+#define _raw_write_unlock(rw)	asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
+
+static inline int _raw_read_trylock(rwlock_t *lock)
+{
+	atomic_t *count = (atomic_t *)lock;
+	atomic_dec(count);
+	if (atomic_read(count) >= 0)
+		return 1;
+	atomic_inc(count);
+	return 0;
+}
+
+static inline int _raw_write_trylock(rwlock_t *lock)
+{
+	atomic_t *count = (atomic_t *)lock;
+	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
+		return 1;
+	atomic_add(RW_LOCK_BIAS, count);
+	return 0;
+}
+
+#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-i386/srat.h b/include/asm-i386/srat.h
new file mode 100644
index 0000000..165ab4b
--- /dev/null
+++ b/include/asm-i386/srat.h
@@ -0,0 +1,37 @@
+/*
+ * Some of the code in this file has been gleaned from the 64 bit 
+ * discontigmem support code base.
+ *
+ * Copyright (C) 2002, IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to Pat Gaughen <gone@us.ibm.com>
+ */
+
+#ifndef _ASM_SRAT_H_
+#define _ASM_SRAT_H_
+
+#ifndef CONFIG_ACPI_SRAT
+#error CONFIG_ACPI_SRAT not defined, and srat.h header has been included
+#endif
+
+extern int get_memcfg_from_srat(void);
+extern unsigned long *get_zholes_size(int);
+
+#endif /* _ASM_SRAT_H_ */
diff --git a/include/asm-i386/stat.h b/include/asm-i386/stat.h
new file mode 100644
index 0000000..b464f80
--- /dev/null
+++ b/include/asm-i386/stat.h
@@ -0,0 +1,78 @@
+#ifndef _I386_STAT_H
+#define _I386_STAT_H
+
+struct __old_kernel_stat {
+	unsigned short st_dev;
+	unsigned short st_ino;
+	unsigned short st_mode;
+	unsigned short st_nlink;
+	unsigned short st_uid;
+	unsigned short st_gid;
+	unsigned short st_rdev;
+	unsigned long  st_size;
+	unsigned long  st_atime;
+	unsigned long  st_mtime;
+	unsigned long  st_ctime;
+};
+
+struct stat {
+	unsigned long  st_dev;
+	unsigned long  st_ino;
+	unsigned short st_mode;
+	unsigned short st_nlink;
+	unsigned short st_uid;
+	unsigned short st_gid;
+	unsigned long  st_rdev;
+	unsigned long  st_size;
+	unsigned long  st_blksize;
+	unsigned long  st_blocks;
+	unsigned long  st_atime;
+	unsigned long  st_atime_nsec;
+	unsigned long  st_mtime;
+	unsigned long  st_mtime_nsec;
+	unsigned long  st_ctime;
+	unsigned long  st_ctime_nsec;
+	unsigned long  __unused4;
+	unsigned long  __unused5;
+};
+
+/* This matches struct stat64 in glibc2.1, hence the absolutely
+ * insane amounts of padding around dev_t's.
+ */
+struct stat64 {
+	unsigned long long	st_dev;
+	unsigned char	__pad0[4];
+
+#define STAT64_HAS_BROKEN_ST_INO	1
+	unsigned long	__st_ino;
+
+	unsigned int	st_mode;
+	unsigned int	st_nlink;
+
+	unsigned long	st_uid;
+	unsigned long	st_gid;
+
+	unsigned long long	st_rdev;
+	unsigned char	__pad3[4];
+
+	long long	st_size;
+	unsigned long	st_blksize;
+
+	unsigned long	st_blocks;	/* Number 512-byte blocks allocated. */
+	unsigned long	__pad4;		/* future possible st_blocks high bits */
+
+	unsigned long	st_atime;
+	unsigned long	st_atime_nsec;
+
+	unsigned long	st_mtime;
+	unsigned int	st_mtime_nsec;
+
+	unsigned long	st_ctime;
+	unsigned long	st_ctime_nsec;
+
+	unsigned long long	st_ino;
+};
+
+#define STAT_HAVE_NSEC 1
+
+#endif
diff --git a/include/asm-i386/statfs.h b/include/asm-i386/statfs.h
new file mode 100644
index 0000000..24972c1
--- /dev/null
+++ b/include/asm-i386/statfs.h
@@ -0,0 +1,6 @@
+#ifndef _I386_STATFS_H
+#define _I386_STATFS_H
+
+#include <asm-generic/statfs.h>
+
+#endif
diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
new file mode 100644
index 0000000..1679983
--- /dev/null
+++ b/include/asm-i386/string.h
@@ -0,0 +1,449 @@
+#ifndef _I386_STRING_H_
+#define _I386_STRING_H_
+
+#ifdef __KERNEL__
+#include <linux/config.h>
+/*
+ * On a 486 or Pentium, we are better off not using the
+ * byte string operations. But on a 386 or a PPro the
+ * byte string ops are faster than doing it by hand
+ * (MUCH faster on a Pentium).
+ */
+
+/*
+ * This string-include defines all string functions as inline
+ * functions. Use gcc. It also assumes ds=es=data space, this should be
+ * normal. Most of the string-functions are rather heavily hand-optimized,
+ * see especially strsep,strstr,str[c]spn. They should work, but are not
+ * very easy to understand. Everything is done entirely within the register
+ * set, making the functions fast and clean. String instructions have been
+ * used through-out, making for "slightly" unclear code :-)
+ *
+ *		NO Copyright (C) 1991, 1992 Linus Torvalds,
+ *		consider these trivial functions to be PD.
+ */
+
+/* AK: in fact I bet it would be better to move this stuff all out of line.
+ */
+
+#define __HAVE_ARCH_STRCPY
+static inline char * strcpy(char * dest,const char *src)
+{
+int d0, d1, d2;
+__asm__ __volatile__(
+	"1:\tlodsb\n\t"
+	"stosb\n\t"
+	"testb %%al,%%al\n\t"
+	"jne 1b"
+	: "=&S" (d0), "=&D" (d1), "=&a" (d2)
+	:"0" (src),"1" (dest) : "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRNCPY
+static inline char * strncpy(char * dest,const char *src,size_t count)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+	"1:\tdecl %2\n\t"
+	"js 2f\n\t"
+	"lodsb\n\t"
+	"stosb\n\t"
+	"testb %%al,%%al\n\t"
+	"jne 1b\n\t"
+	"rep\n\t"
+	"stosb\n"
+	"2:"
+	: "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
+	:"0" (src),"1" (dest),"2" (count) : "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRCAT
+static inline char * strcat(char * dest,const char * src)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+	"repne\n\t"
+	"scasb\n\t"
+	"decl %1\n"
+	"1:\tlodsb\n\t"
+	"stosb\n\t"
+	"testb %%al,%%al\n\t"
+	"jne 1b"
+	: "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
+	: "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu):"memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRNCAT
+static inline char * strncat(char * dest,const char * src,size_t count)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+	"repne\n\t"
+	"scasb\n\t"
+	"decl %1\n\t"
+	"movl %8,%3\n"
+	"1:\tdecl %3\n\t"
+	"js 2f\n\t"
+	"lodsb\n\t"
+	"stosb\n\t"
+	"testb %%al,%%al\n\t"
+	"jne 1b\n"
+	"2:\txorl %2,%2\n\t"
+	"stosb"
+	: "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
+	: "0" (src),"1" (dest),"2" (0),"3" (0xffffffffu), "g" (count)
+	: "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRCMP
+static inline int strcmp(const char * cs,const char * ct)
+{
+int d0, d1;
+register int __res;
+__asm__ __volatile__(
+	"1:\tlodsb\n\t"
+	"scasb\n\t"
+	"jne 2f\n\t"
+	"testb %%al,%%al\n\t"
+	"jne 1b\n\t"
+	"xorl %%eax,%%eax\n\t"
+	"jmp 3f\n"
+	"2:\tsbbl %%eax,%%eax\n\t"
+	"orb $1,%%al\n"
+	"3:"
+	:"=a" (__res), "=&S" (d0), "=&D" (d1)
+		     :"1" (cs),"2" (ct));
+return __res;
+}
+
+#define __HAVE_ARCH_STRNCMP
+static inline int strncmp(const char * cs,const char * ct,size_t count)
+{
+register int __res;
+int d0, d1, d2;
+__asm__ __volatile__(
+	"1:\tdecl %3\n\t"
+	"js 2f\n\t"
+	"lodsb\n\t"
+	"scasb\n\t"
+	"jne 3f\n\t"
+	"testb %%al,%%al\n\t"
+	"jne 1b\n"
+	"2:\txorl %%eax,%%eax\n\t"
+	"jmp 4f\n"
+	"3:\tsbbl %%eax,%%eax\n\t"
+	"orb $1,%%al\n"
+	"4:"
+		     :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
+		     :"1" (cs),"2" (ct),"3" (count));
+return __res;
+}
+
+#define __HAVE_ARCH_STRCHR
+static inline char * strchr(const char * s, int c)
+{
+int d0;
+register char * __res;
+__asm__ __volatile__(
+	"movb %%al,%%ah\n"
+	"1:\tlodsb\n\t"
+	"cmpb %%ah,%%al\n\t"
+	"je 2f\n\t"
+	"testb %%al,%%al\n\t"
+	"jne 1b\n\t"
+	"movl $1,%1\n"
+	"2:\tmovl %1,%0\n\t"
+	"decl %0"
+	:"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
+return __res;
+}
+
+#define __HAVE_ARCH_STRRCHR
+static inline char * strrchr(const char * s, int c)
+{
+int d0, d1;
+register char * __res;
+__asm__ __volatile__(
+	"movb %%al,%%ah\n"
+	"1:\tlodsb\n\t"
+	"cmpb %%ah,%%al\n\t"
+	"jne 2f\n\t"
+	"leal -1(%%esi),%0\n"
+	"2:\ttestb %%al,%%al\n\t"
+	"jne 1b"
+	:"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
+return __res;
+}
+
+#define __HAVE_ARCH_STRLEN
+static inline size_t strlen(const char * s)
+{
+int d0;
+register int __res;
+__asm__ __volatile__(
+	"repne\n\t"
+	"scasb\n\t"
+	"notl %0\n\t"
+	"decl %0"
+	:"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffffu));
+return __res;
+}
+
+static inline void * __memcpy(void * to, const void * from, size_t n)
+{
+int d0, d1, d2;
+__asm__ __volatile__(
+	"rep ; movsl\n\t"
+	"testb $2,%b4\n\t"
+	"je 1f\n\t"
+	"movsw\n"
+	"1:\ttestb $1,%b4\n\t"
+	"je 2f\n\t"
+	"movsb\n"
+	"2:"
+	: "=&c" (d0), "=&D" (d1), "=&S" (d2)
+	:"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
+	: "memory");
+return (to);
+}
+
+/*
+ * This looks horribly ugly, but the compiler can optimize it totally,
+ * as the count is constant.
+ */
+static inline void * __constant_memcpy(void * to, const void * from, size_t n)
+{
+	if (n <= 128)
+		return __builtin_memcpy(to, from, n);
+
+#define COMMON(x) \
+__asm__ __volatile__( \
+	"rep ; movsl" \
+	x \
+	: "=&c" (d0), "=&D" (d1), "=&S" (d2) \
+	: "0" (n/4),"1" ((long) to),"2" ((long) from) \
+	: "memory");
+{
+	int d0, d1, d2;
+	switch (n % 4) {
+		case 0: COMMON(""); return to;
+		case 1: COMMON("\n\tmovsb"); return to;
+		case 2: COMMON("\n\tmovsw"); return to;
+		default: COMMON("\n\tmovsw\n\tmovsb"); return to;
+	}
+}
+  
+#undef COMMON
+}
+
+#define __HAVE_ARCH_MEMCPY
+
+#ifdef CONFIG_X86_USE_3DNOW
+
+#include <asm/mmx.h>
+
+/*
+ *	This CPU favours 3DNow strongly (eg AMD Athlon)
+ */
+
+static inline void * __constant_memcpy3d(void * to, const void * from, size_t len)
+{
+	if (len < 512)
+		return __constant_memcpy(to, from, len);
+	return _mmx_memcpy(to, from, len);
+}
+
+static __inline__ void *__memcpy3d(void *to, const void *from, size_t len)
+{
+	if (len < 512)
+		return __memcpy(to, from, len);
+	return _mmx_memcpy(to, from, len);
+}
+
+#define memcpy(t, f, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy3d((t),(f),(n)) : \
+ __memcpy3d((t),(f),(n)))
+
+#else
+
+/*
+ *	No 3D Now!
+ */
+ 
+#define memcpy(t, f, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy((t),(f),(n)) : \
+ __memcpy((t),(f),(n)))
+
+#endif
+
+#define __HAVE_ARCH_MEMMOVE
+void *memmove(void * dest,const void * src, size_t n);
+
+#define memcmp __builtin_memcmp
+
+#define __HAVE_ARCH_MEMCHR
+static inline void * memchr(const void * cs,int c,size_t count)
+{
+int d0;
+register void * __res;
+if (!count)
+	return NULL;
+__asm__ __volatile__(
+	"repne\n\t"
+	"scasb\n\t"
+	"je 1f\n\t"
+	"movl $1,%0\n"
+	"1:\tdecl %0"
+	:"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
+return __res;
+}
+
+static inline void * __memset_generic(void * s, char c,size_t count)
+{
+int d0, d1;
+__asm__ __volatile__(
+	"rep\n\t"
+	"stosb"
+	: "=&c" (d0), "=&D" (d1)
+	:"a" (c),"1" (s),"0" (count)
+	:"memory");
+return s;
+}
+
+/* we might want to write optimized versions of these later */
+#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count))
+
+/*
+ * memset(x,0,y) is a reasonably common thing to do, so we want to fill
+ * things 32 bits at a time even when we don't know the size of the
+ * area at compile-time..
+ */
+static inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
+{
+int d0, d1;
+__asm__ __volatile__(
+	"rep ; stosl\n\t"
+	"testb $2,%b3\n\t"
+	"je 1f\n\t"
+	"stosw\n"
+	"1:\ttestb $1,%b3\n\t"
+	"je 2f\n\t"
+	"stosb\n"
+	"2:"
+	: "=&c" (d0), "=&D" (d1)
+	:"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
+	:"memory");
+return (s);	
+}
+
+/* Added by Gertjan van Wingerde to make minix and sysv module work */
+#define __HAVE_ARCH_STRNLEN
+static inline size_t strnlen(const char * s, size_t count)
+{
+int d0;
+register int __res;
+__asm__ __volatile__(
+	"movl %2,%0\n\t"
+	"jmp 2f\n"
+	"1:\tcmpb $0,(%0)\n\t"
+	"je 3f\n\t"
+	"incl %0\n"
+	"2:\tdecl %1\n\t"
+	"cmpl $-1,%1\n\t"
+	"jne 1b\n"
+	"3:\tsubl %2,%0"
+	:"=a" (__res), "=&d" (d0)
+	:"c" (s),"1" (count));
+return __res;
+}
+/* end of additional stuff */
+
+#define __HAVE_ARCH_STRSTR
+
+extern char *strstr(const char *cs, const char *ct);
+
+/*
+ * This looks horribly ugly, but the compiler can optimize it totally,
+ * as we by now know that both pattern and count is constant..
+ */
+static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
+{
+	switch (count) {
+		case 0:
+			return s;
+		case 1:
+			*(unsigned char *)s = pattern;
+			return s;
+		case 2:
+			*(unsigned short *)s = pattern;
+			return s;
+		case 3:
+			*(unsigned short *)s = pattern;
+			*(2+(unsigned char *)s) = pattern;
+			return s;
+		case 4:
+			*(unsigned long *)s = pattern;
+			return s;
+	}
+#define COMMON(x) \
+__asm__  __volatile__( \
+	"rep ; stosl" \
+	x \
+	: "=&c" (d0), "=&D" (d1) \
+	: "a" (pattern),"0" (count/4),"1" ((long) s) \
+	: "memory")
+{
+	int d0, d1;
+	switch (count % 4) {
+		case 0: COMMON(""); return s;
+		case 1: COMMON("\n\tstosb"); return s;
+		case 2: COMMON("\n\tstosw"); return s;
+		default: COMMON("\n\tstosw\n\tstosb"); return s;
+	}
+}
+  
+#undef COMMON
+}
+
+#define __constant_c_x_memset(s, c, count) \
+(__builtin_constant_p(count) ? \
+ __constant_c_and_count_memset((s),(c),(count)) : \
+ __constant_c_memset((s),(c),(count)))
+
+#define __memset(s, c, count) \
+(__builtin_constant_p(count) ? \
+ __constant_count_memset((s),(c),(count)) : \
+ __memset_generic((s),(c),(count)))
+
+#define __HAVE_ARCH_MEMSET
+#define memset(s, c, count) \
+(__builtin_constant_p(c) ? \
+ __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \
+ __memset((s),(c),(count)))
+
+/*
+ * find the first occurrence of byte 'c', or 1 past the area if none
+ */
+#define __HAVE_ARCH_MEMSCAN
+static inline void * memscan(void * addr, int c, size_t size)
+{
+	if (!size)
+		return addr;
+	__asm__("repnz; scasb\n\t"
+		"jnz 1f\n\t"
+		"dec %%edi\n"
+		"1:"
+		: "=D" (addr), "=c" (size)
+		: "0" (addr), "1" (size), "a" (c));
+	return addr;
+}
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/include/asm-i386/suspend.h b/include/asm-i386/suspend.h
new file mode 100644
index 0000000..dfc1114
--- /dev/null
+++ b/include/asm-i386/suspend.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2001-2002 Pavel Machek <pavel@suse.cz>
+ * Based on code
+ * Copyright 2001 Patrick Mochel <mochel@osdl.org>
+ */
+#include <asm/desc.h>
+#include <asm/i387.h>
+
+static inline int
+arch_prepare_suspend(void)
+{
+	/* If you want to make non-PSE machine work, turn off paging
+           in swsusp_arch_suspend. swsusp_pg_dir should have identity mapping, so
+           it could work...  */
+	if (!cpu_has_pse) {
+		printk(KERN_ERR "PSE is required for swsusp.\n");
+		return -EPERM;
+	}
+	return 0;
+}
+
+/* image of the saved processor state */
+struct saved_context {
+  	u16 es, fs, gs, ss;
+	unsigned long cr0, cr2, cr3, cr4;
+	u16 gdt_pad;
+	u16 gdt_limit;
+	unsigned long gdt_base;
+	u16 idt_pad;
+	u16 idt_limit;
+	unsigned long idt_base;
+	u16 ldt;
+	u16 tss;
+	unsigned long tr;
+	unsigned long safety;
+	unsigned long return_address;
+} __attribute__((packed));
+
+#define loaddebug(thread,register) \
+               __asm__("movl %0,%%db" #register  \
+                       : /* no output */ \
+                       :"r" ((thread)->debugreg[register]))
+
+#ifdef CONFIG_ACPI_SLEEP
+extern unsigned long saved_eip;
+extern unsigned long saved_esp;
+extern unsigned long saved_ebp;
+extern unsigned long saved_ebx;
+extern unsigned long saved_esi;
+extern unsigned long saved_edi;
+
+static inline void acpi_save_register_state(unsigned long return_point)
+{
+	saved_eip = return_point;
+	asm volatile ("movl %%esp,%0" : "=m" (saved_esp));
+	asm volatile ("movl %%ebp,%0" : "=m" (saved_ebp));
+	asm volatile ("movl %%ebx,%0" : "=m" (saved_ebx));
+	asm volatile ("movl %%edi,%0" : "=m" (saved_edi));
+	asm volatile ("movl %%esi,%0" : "=m" (saved_esi));
+}
+
+#define acpi_restore_register_state()  do {} while (0)
+
+/* routines for saving/restoring kernel state */
+extern int acpi_save_state_mem(void);
+#endif
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
new file mode 100644
index 0000000..6f74d4c
--- /dev/null
+++ b/include/asm-i386/system.h
@@ -0,0 +1,473 @@
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <asm/segment.h>
+#include <asm/cpufeature.h>
+#include <linux/bitops.h> /* for LOCK_PREFIX */
+
+#ifdef __KERNEL__
+
+struct task_struct;	/* one of the stranger aspects of C forward declarations.. */
+extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
+
+#define switch_to(prev,next,last) do {					\
+	unsigned long esi,edi;						\
+	asm volatile("pushfl\n\t"					\
+		     "pushl %%ebp\n\t"					\
+		     "movl %%esp,%0\n\t"	/* save ESP */		\
+		     "movl %5,%%esp\n\t"	/* restore ESP */	\
+		     "movl $1f,%1\n\t"		/* save EIP */		\
+		     "pushl %6\n\t"		/* restore EIP */	\
+		     "jmp __switch_to\n"				\
+		     "1:\t"						\
+		     "popl %%ebp\n\t"					\
+		     "popfl"						\
+		     :"=m" (prev->thread.esp),"=m" (prev->thread.eip),	\
+		      "=a" (last),"=S" (esi),"=D" (edi)			\
+		     :"m" (next->thread.esp),"m" (next->thread.eip),	\
+		      "2" (prev), "d" (next));				\
+} while (0)
+
+#define _set_base(addr,base) do { unsigned long __pr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
+	"rorl $16,%%edx\n\t" \
+	"movb %%dl,%2\n\t" \
+	"movb %%dh,%3" \
+	:"=&d" (__pr) \
+	:"m" (*((addr)+2)), \
+	 "m" (*((addr)+4)), \
+	 "m" (*((addr)+7)), \
+         "0" (base) \
+        ); } while(0)
+
+#define _set_limit(addr,limit) do { unsigned long __lr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
+	"rorl $16,%%edx\n\t" \
+	"movb %2,%%dh\n\t" \
+	"andb $0xf0,%%dh\n\t" \
+	"orb %%dh,%%dl\n\t" \
+	"movb %%dl,%2" \
+	:"=&d" (__lr) \
+	:"m" (*(addr)), \
+	 "m" (*((addr)+6)), \
+	 "0" (limit) \
+        ); } while(0)
+
+#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
+
+static inline unsigned long _get_base(char * addr)
+{
+	unsigned long __base;
+	__asm__("movb %3,%%dh\n\t"
+		"movb %2,%%dl\n\t"
+		"shll $16,%%edx\n\t"
+		"movw %1,%%dx"
+		:"=&d" (__base)
+		:"m" (*((addr)+2)),
+		 "m" (*((addr)+4)),
+		 "m" (*((addr)+7)));
+	return __base;
+}
+
+#define get_base(ldt) _get_base( ((char *)&(ldt)) )
+
+/*
+ * Load a segment. Fall back on loading the zero
+ * segment if something goes wrong..
+ */
+#define loadsegment(seg,value)			\
+	asm volatile("\n"			\
+		"1:\t"				\
+		"movl %0,%%" #seg "\n"		\
+		"2:\n"				\
+		".section .fixup,\"ax\"\n"	\
+		"3:\t"				\
+		"pushl $0\n\t"			\
+		"popl %%" #seg "\n\t"		\
+		"jmp 2b\n"			\
+		".previous\n"			\
+		".section __ex_table,\"a\"\n\t"	\
+		".align 4\n\t"			\
+		".long 1b,3b\n"			\
+		".previous"			\
+		: :"m" (*(unsigned int *)&(value)))
+
+/*
+ * Save a segment register away
+ */
+#define savesegment(seg, value) \
+	asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value)))
+
+/*
+ * Clear and set 'TS' bit respectively
+ */
+#define clts() __asm__ __volatile__ ("clts")
+#define read_cr0() ({ \
+	unsigned int __dummy; \
+	__asm__( \
+		"movl %%cr0,%0\n\t" \
+		:"=r" (__dummy)); \
+	__dummy; \
+})
+#define write_cr0(x) \
+	__asm__("movl %0,%%cr0": :"r" (x));
+
+#define read_cr4() ({ \
+	unsigned int __dummy; \
+	__asm__( \
+		"movl %%cr4,%0\n\t" \
+		:"=r" (__dummy)); \
+	__dummy; \
+})
+#define write_cr4(x) \
+	__asm__("movl %0,%%cr4": :"r" (x));
+#define stts() write_cr0(8 | read_cr0())
+
+#endif	/* __KERNEL__ */
+
+#define wbinvd() \
+	__asm__ __volatile__ ("wbinvd": : :"memory");
+
+static inline unsigned long get_limit(unsigned long segment)
+{
+	unsigned long __limit;
+	__asm__("lsll %1,%0"
+		:"=r" (__limit):"r" (segment));
+	return __limit+1;
+}
+
+#define nop() __asm__ __volatile__ ("nop")
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+
+#define tas(ptr) (xchg((ptr),1))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+
+/*
+ * The semantics of XCHGCMP8B are a bit strange, this is why
+ * there is a loop and the loading of %%eax and %%edx has to
+ * be inside. This inlines well in most cases, the cached
+ * cost is around ~38 cycles. (in the future we might want
+ * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
+ * might have an implicit FPU-save as a cost, so it's not
+ * clear which path to go.)
+ *
+ * cmpxchg8b must be used with the lock prefix here to allow
+ * the instruction to be executed atomically, see page 3-102
+ * of the instruction set reference 24319102.pdf. We need
+ * the reader side to see the coherent 64bit value.
+ */
+static inline void __set_64bit (unsigned long long * ptr,
+		unsigned int low, unsigned int high)
+{
+	__asm__ __volatile__ (
+		"\n1:\t"
+		"movl (%0), %%eax\n\t"
+		"movl 4(%0), %%edx\n\t"
+		"lock cmpxchg8b (%0)\n\t"
+		"jnz 1b"
+		: /* no outputs */
+		:	"D"(ptr),
+			"b"(low),
+			"c"(high)
+		:	"ax","dx","memory");
+}
+
+static inline void __set_64bit_constant (unsigned long long *ptr,
+						 unsigned long long value)
+{
+	__set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
+}
+#define ll_low(x)	*(((unsigned int*)&(x))+0)
+#define ll_high(x)	*(((unsigned int*)&(x))+1)
+
+static inline void __set_64bit_var (unsigned long long *ptr,
+			 unsigned long long value)
+{
+	__set_64bit(ptr,ll_low(value), ll_high(value));
+}
+
+#define set_64bit(ptr,value) \
+(__builtin_constant_p(value) ? \
+ __set_64bit_constant(ptr, value) : \
+ __set_64bit_var(ptr, value) )
+
+#define _set_64bit(ptr,value) \
+(__builtin_constant_p(value) ? \
+ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
+ __set_64bit(ptr, ll_low(value), ll_high(value)) )
+
+/*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
+ *	  but generally the primitive is invalid, *ptr is output argument. --ANK
+ */
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+	switch (size) {
+		case 1:
+			__asm__ __volatile__("xchgb %b0,%1"
+				:"=q" (x)
+				:"m" (*__xg(ptr)), "0" (x)
+				:"memory");
+			break;
+		case 2:
+			__asm__ __volatile__("xchgw %w0,%1"
+				:"=r" (x)
+				:"m" (*__xg(ptr)), "0" (x)
+				:"memory");
+			break;
+		case 4:
+			__asm__ __volatile__("xchgl %0,%1"
+				:"=r" (x)
+				:"m" (*__xg(ptr)), "0" (x)
+				:"memory");
+			break;
+	}
+	return x;
+}
+
+/*
+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
+ * store NEW in MEM.  Return the initial value in MEM.  Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#ifdef CONFIG_X86_CMPXCHG
+#define __HAVE_ARCH_CMPXCHG 1
+#endif
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+				      unsigned long new, int size)
+{
+	unsigned long prev;
+	switch (size) {
+	case 1:
+		__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+				     : "=a"(prev)
+				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
+				     : "memory");
+		return prev;
+	case 2:
+		__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+				     : "=a"(prev)
+				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
+				     : "memory");
+		return prev;
+	case 4:
+		__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
+				     : "=a"(prev)
+				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
+				     : "memory");
+		return prev;
+	}
+	return old;
+}
+
+#define cmpxchg(ptr,o,n)\
+	((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+					(unsigned long)(n),sizeof(*(ptr))))
+    
+#ifdef __KERNEL__
+struct alt_instr { 
+	__u8 *instr; 		/* original instruction */
+	__u8 *replacement;
+	__u8  cpuid;		/* cpuid bit set for replacement */
+	__u8  instrlen;		/* length of original instruction */
+	__u8  replacementlen; 	/* length of new instruction, <= instrlen */ 
+	__u8  pad;
+}; 
+#endif
+
+/* 
+ * Alternative instructions for different CPU types or capabilities.
+ * 
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ * 
+ * length of oldinstr must be longer or equal the length of newinstr
+ * It can be padded with nops as needed.
+ * 
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, newinstr, feature) 	\
+	asm volatile ("661:\n\t" oldinstr "\n662:\n" 		     \
+		      ".section .altinstructions,\"a\"\n"     	     \
+		      "  .align 4\n"				       \
+		      "  .long 661b\n"            /* label */          \
+		      "  .long 663f\n"		  /* new instruction */ 	\
+		      "  .byte %c0\n"             /* feature bit */    \
+		      "  .byte 662b-661b\n"       /* sourcelen */      \
+		      "  .byte 664f-663f\n"       /* replacementlen */ \
+		      ".previous\n"						\
+		      ".section .altinstr_replacement,\"ax\"\n"			\
+		      "663:\n\t" newinstr "\n664:\n"   /* replacement */    \
+		      ".previous" :: "i" (feature) : "memory")  
+
+/*
+ * Alternative inline assembly with input.
+ * 
+ * Pecularities:
+ * No memory clobber here. 
+ * Argument numbers start with 1.
+ * Best is to use constraints that are fixed size (like (%1) ... "r")
+ * If you use variable sized constraints like "m" or "g" in the 
+ * replacement maake sure to pad to the worst case length.
+ */
+#define alternative_input(oldinstr, newinstr, feature, input...)		\
+	asm volatile ("661:\n\t" oldinstr "\n662:\n"				\
+		      ".section .altinstructions,\"a\"\n"			\
+		      "  .align 4\n"						\
+		      "  .long 661b\n"            /* label */			\
+		      "  .long 663f\n"		  /* new instruction */ 	\
+		      "  .byte %c0\n"             /* feature bit */		\
+		      "  .byte 662b-661b\n"       /* sourcelen */		\
+		      "  .byte 664f-663f\n"       /* replacementlen */ 		\
+		      ".previous\n"						\
+		      ".section .altinstr_replacement,\"ax\"\n"			\
+		      "663:\n\t" newinstr "\n664:\n"   /* replacement */ 	\
+		      ".previous" :: "i" (feature), ##input)
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ *
+ * For now, "wmb()" doesn't actually do anything, as all
+ * Intel CPU's follow what Intel calls a *Processor Order*,
+ * in which all writes are seen in the program order even
+ * outside the CPU.
+ *
+ * I expect future Intel CPU's to have a weaker ordering,
+ * but I'd also expect them to finally get their act together
+ * and add some real memory barriers if so.
+ *
+ * Some non intel clones support out of order store. wmb() ceases to be a
+ * nop for these.
+ */
+ 
+
+/* 
+ * Actually only lfence would be needed for mb() because all stores done 
+ * by the kernel should be already ordered. But keep a full barrier for now. 
+ */
+
+#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
+#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
+
+/**
+ * read_barrier_depends - Flush all pending reads that subsequents reads
+ * depend on.
+ *
+ * No data-dependent reads from memory-like regions are ever reordered
+ * over this barrier.  All reads preceding this primitive are guaranteed
+ * to access memory (but not necessarily other CPUs' caches) before any
+ * reads following this primitive that depend on the data return by
+ * any of the preceding reads.  This primitive is much lighter weight than
+ * rmb() on most CPUs, and is never heavier weight than is
+ * rmb().
+ *
+ * These ordering constraints are respected by both the local CPU
+ * and the compiler.
+ *
+ * Ordering is not guaranteed by anything other than these primitives,
+ * not even by data dependencies.  See the documentation for
+ * memory_barrier() for examples and URLs to more information.
+ *
+ * For example, the following code would force ordering (the initial
+ * value of "a" is zero, "b" is one, and "p" is "&a"):
+ *
+ * <programlisting>
+ *	CPU 0				CPU 1
+ *
+ *	b = 2;
+ *	memory_barrier();
+ *	p = &b;				q = p;
+ *					read_barrier_depends();
+ *					d = *q;
+ * </programlisting>
+ *
+ * because the read of "*q" depends on the read of "p" and these
+ * two reads are separated by a read_barrier_depends().  However,
+ * the following code, with the same initial values for "a" and "b":
+ *
+ * <programlisting>
+ *	CPU 0				CPU 1
+ *
+ *	a = 2;
+ *	memory_barrier();
+ *	b = 3;				y = b;
+ *					read_barrier_depends();
+ *					x = a;
+ * </programlisting>
+ *
+ * does not enforce ordering, since there is no data dependency between
+ * the read of "a" and the read of "b".  Therefore, on some CPUs, such
+ * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
+ * in cases like thiswhere there are no data dependencies.
+ **/
+
+#define read_barrier_depends()	do { } while(0)
+
+#ifdef CONFIG_X86_OOSTORE
+/* Actually there are no OOO store capable CPUs for now that do SSE, 
+   but make it already an possibility. */
+#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+#else
+#define wmb()	__asm__ __volatile__ ("": : :"memory")
+#endif
+
+#ifdef CONFIG_SMP
+#define smp_mb()	mb()
+#define smp_rmb()	rmb()
+#define smp_wmb()	wmb()
+#define smp_read_barrier_depends()	read_barrier_depends()
+#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#else
+#define smp_mb()	barrier()
+#define smp_rmb()	barrier()
+#define smp_wmb()	barrier()
+#define smp_read_barrier_depends()	do { } while(0)
+#define set_mb(var, value) do { var = value; barrier(); } while (0)
+#endif
+
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+/* interrupt control.. */
+#define local_save_flags(x)	do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0)
+#define local_irq_restore(x) 	do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0)
+#define local_irq_disable() 	__asm__ __volatile__("cli": : :"memory")
+#define local_irq_enable()	__asm__ __volatile__("sti": : :"memory")
+/* used in the idle loop; sti takes one instruction cycle to complete */
+#define safe_halt()		__asm__ __volatile__("sti; hlt": : :"memory")
+
+#define irqs_disabled()			\
+({					\
+	unsigned long flags;		\
+	local_save_flags(flags);	\
+	!(flags & (1<<9));		\
+})
+
+/* For spinlocks etc */
+#define local_irq_save(x)	__asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
+
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#define HAVE_DISABLE_HLT
+void disable_hlt(void);
+void enable_hlt(void);
+
+extern int es7000_plat;
+void cpu_idle_wait(void);
+
+extern unsigned long arch_align_stack(unsigned long sp);
+
+#endif
diff --git a/include/asm-i386/termbits.h b/include/asm-i386/termbits.h
new file mode 100644
index 0000000..72c10e3
--- /dev/null
+++ b/include/asm-i386/termbits.h
@@ -0,0 +1,173 @@
+#ifndef __ARCH_I386_TERMBITS_H__
+#define __ARCH_I386_TERMBITS_H__
+
+#include <linux/posix_types.h>
+
+typedef unsigned char	cc_t;
+typedef unsigned int	speed_t;
+typedef unsigned int	tcflag_t;
+
+#define NCCS 19
+struct termios {
+	tcflag_t c_iflag;		/* input mode flags */
+	tcflag_t c_oflag;		/* output mode flags */
+	tcflag_t c_cflag;		/* control mode flags */
+	tcflag_t c_lflag;		/* local mode flags */
+	cc_t c_line;			/* line discipline */
+	cc_t c_cc[NCCS];		/* control characters */
+};
+
+/* c_cc characters */
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
+
+/* c_iflag bits */
+#define IGNBRK	0000001
+#define BRKINT	0000002
+#define IGNPAR	0000004
+#define PARMRK	0000010
+#define INPCK	0000020
+#define ISTRIP	0000040
+#define INLCR	0000100
+#define IGNCR	0000200
+#define ICRNL	0000400
+#define IUCLC	0001000
+#define IXON	0002000
+#define IXANY	0004000
+#define IXOFF	0010000
+#define IMAXBEL	0020000
+#define IUTF8	0040000
+
+/* c_oflag bits */
+#define OPOST	0000001
+#define OLCUC	0000002
+#define ONLCR	0000004
+#define OCRNL	0000010
+#define ONOCR	0000020
+#define ONLRET	0000040
+#define OFILL	0000100
+#define OFDEL	0000200
+#define NLDLY	0000400
+#define   NL0	0000000
+#define   NL1	0000400
+#define CRDLY	0003000
+#define   CR0	0000000
+#define   CR1	0001000
+#define   CR2	0002000
+#define   CR3	0003000
+#define TABDLY	0014000
+#define   TAB0	0000000
+#define   TAB1	0004000
+#define   TAB2	0010000
+#define   TAB3	0014000
+#define   XTABS	0014000
+#define BSDLY	0020000
+#define   BS0	0000000
+#define   BS1	0020000
+#define VTDLY	0040000
+#define   VT0	0000000
+#define   VT1	0040000
+#define FFDLY	0100000
+#define   FF0	0000000
+#define   FF1	0100000
+
+/* c_cflag bit meaning */
+#define CBAUD	0010017
+#define  B0	0000000		/* hang up */
+#define  B50	0000001
+#define  B75	0000002
+#define  B110	0000003
+#define  B134	0000004
+#define  B150	0000005
+#define  B200	0000006
+#define  B300	0000007
+#define  B600	0000010
+#define  B1200	0000011
+#define  B1800	0000012
+#define  B2400	0000013
+#define  B4800	0000014
+#define  B9600	0000015
+#define  B19200	0000016
+#define  B38400	0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE	0000060
+#define   CS5	0000000
+#define   CS6	0000020
+#define   CS7	0000040
+#define   CS8	0000060
+#define CSTOPB	0000100
+#define CREAD	0000200
+#define PARENB	0000400
+#define PARODD	0001000
+#define HUPCL	0002000
+#define CLOCAL	0004000
+#define CBAUDEX 0010000
+#define    B57600 0010001
+#define   B115200 0010002
+#define   B230400 0010003
+#define   B460800 0010004
+#define   B500000 0010005
+#define   B576000 0010006
+#define   B921600 0010007
+#define  B1000000 0010010
+#define  B1152000 0010011
+#define  B1500000 0010012
+#define  B2000000 0010013
+#define  B2500000 0010014
+#define  B3000000 0010015
+#define  B3500000 0010016
+#define  B4000000 0010017
+#define CIBAUD	  002003600000	/* input baud rate (not used) */
+#define CMSPAR	  010000000000		/* mark or space (stick) parity */
+#define CRTSCTS	  020000000000		/* flow control */
+
+/* c_lflag bits */
+#define ISIG	0000001
+#define ICANON	0000002
+#define XCASE	0000004
+#define ECHO	0000010
+#define ECHOE	0000020
+#define ECHOK	0000040
+#define ECHONL	0000100
+#define NOFLSH	0000200
+#define TOSTOP	0000400
+#define ECHOCTL	0001000
+#define ECHOPRT	0002000
+#define ECHOKE	0004000
+#define FLUSHO	0010000
+#define PENDIN	0040000
+#define IEXTEN	0100000
+
+/* tcflow() and TCXONC use these */
+#define	TCOOFF		0
+#define	TCOON		1
+#define	TCIOFF		2
+#define	TCION		3
+
+/* tcflush() and TCFLSH use these */
+#define	TCIFLUSH	0
+#define	TCOFLUSH	1
+#define	TCIOFLUSH	2
+
+/* tcsetattr uses these */
+#define	TCSANOW		0
+#define	TCSADRAIN	1
+#define	TCSAFLUSH	2
+
+#endif
diff --git a/include/asm-i386/termios.h b/include/asm-i386/termios.h
new file mode 100644
index 0000000..03f5485
--- /dev/null
+++ b/include/asm-i386/termios.h
@@ -0,0 +1,107 @@
+#ifndef _I386_TERMIOS_H
+#define _I386_TERMIOS_H
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct winsize {
+	unsigned short ws_row;
+	unsigned short ws_col;
+	unsigned short ws_xpixel;
+	unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+	unsigned short c_iflag;		/* input mode flags */
+	unsigned short c_oflag;		/* output mode flags */
+	unsigned short c_cflag;		/* control mode flags */
+	unsigned short c_lflag;		/* local mode flags */
+	unsigned char c_line;		/* line discipline */
+	unsigned char c_cc[NCC];	/* control characters */
+};
+
+/* modem lines */
+#define TIOCM_LE	0x001
+#define TIOCM_DTR	0x002
+#define TIOCM_RTS	0x004
+#define TIOCM_ST	0x008
+#define TIOCM_SR	0x010
+#define TIOCM_CTS	0x020
+#define TIOCM_CAR	0x040
+#define TIOCM_RNG	0x080
+#define TIOCM_DSR	0x100
+#define TIOCM_CD	TIOCM_CAR
+#define TIOCM_RI	TIOCM_RNG
+#define TIOCM_OUT1	0x2000
+#define TIOCM_OUT2	0x4000
+#define TIOCM_LOOP	0x8000
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+/* line disciplines */
+#define N_TTY		0
+#define N_SLIP		1
+#define N_MOUSE		2
+#define N_PPP		3
+#define N_STRIP		4
+#define N_AX25		5
+#define N_X25		6	/* X.25 async */
+#define N_6PACK		7
+#define N_MASC		8	/* Reserved for Mobitex module <kaz@cafe.net> */
+#define N_R3964		9	/* Reserved for Simatic R3964 module */
+#define N_PROFIBUS_FDL	10	/* Reserved for Profibus <Dave@mvhi.com> */
+#define N_IRDA		11	/* Linux IR - http://irda.sourceforge.net/ */
+#define N_SMSBLOCK	12	/* SMS block mode - for talking to GSM data cards about SMS messages */
+#define N_HDLC		13	/* synchronous HDLC */
+#define N_SYNC_PPP	14	/* synchronous PPP */
+#define N_HCI		15  /* Bluetooth HCI UART */
+
+#ifdef __KERNEL__
+#include <linux/module.h>
+
+/*	intr=^C		quit=^\		erase=del	kill=^U
+	eof=^D		vtime=\0	vmin=\1		sxtc=\0
+	start=^Q	stop=^S		susp=^Z		eol=\0
+	reprint=^R	discard=^U	werase=^W	lnext=^V
+	eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
+	unsigned short __tmp; \
+	get_user(__tmp,&(termio)->x); \
+	*(unsigned short *) &(termios)->x = __tmp; \
+}
+
+#define user_termio_to_kernel_termios(termios, termio) \
+({ \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
+	copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
+})
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+#define kernel_termios_to_user_termio(termio, termios) \
+({ \
+	put_user((termios)->c_iflag, &(termio)->c_iflag); \
+	put_user((termios)->c_oflag, &(termio)->c_oflag); \
+	put_user((termios)->c_cflag, &(termio)->c_cflag); \
+	put_user((termios)->c_lflag, &(termio)->c_lflag); \
+	put_user((termios)->c_line,  &(termio)->c_line); \
+	copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
+})
+
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
+
+#endif	/* __KERNEL__ */
+
+#endif	/* _I386_TERMIOS_H */
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
new file mode 100644
index 0000000..2cd5727
--- /dev/null
+++ b/include/asm-i386/thread_info.h
@@ -0,0 +1,174 @@
+/* thread_info.h: i386 low-level thread information
+ *
+ * Copyright (C) 2002  David Howells (dhowells@redhat.com)
+ * - Incorporating suggestions made by Linus Torvalds and Dave Miller
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/compiler.h>
+#include <asm/page.h>
+
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#endif
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants must also be changed
+ */
+#ifndef __ASSEMBLY__
+
+struct thread_info {
+	struct task_struct	*task;		/* main task structure */
+	struct exec_domain	*exec_domain;	/* execution domain */
+	unsigned long		flags;		/* low level flags */
+	unsigned long		status;		/* thread-synchronous flags */
+	__u32			cpu;		/* current CPU */
+	__s32			preempt_count; /* 0 => preemptable, <0 => BUG */
+
+
+	mm_segment_t		addr_limit;	/* thread address space:
+					 	   0-0xBFFFFFFF for user-thead
+						   0-0xFFFFFFFF for kernel-thread
+						*/
+	struct restart_block    restart_block;
+
+	unsigned long           previous_esp;   /* ESP of the previous stack in case
+						   of nested (IRQ) stacks
+						*/
+	__u8			supervisor_stack[0];
+};
+
+#else /* !__ASSEMBLY__ */
+
+#include <asm/asm_offsets.h>
+
+#endif
+
+#define PREEMPT_ACTIVE		0x10000000
+#ifdef CONFIG_4KSTACKS
+#define THREAD_SIZE            (4096)
+#else
+#define THREAD_SIZE		(8192)
+#endif
+
+#define STACK_WARN             (THREAD_SIZE/8)
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#ifndef __ASSEMBLY__
+
+#define INIT_THREAD_INFO(tsk)			\
+{						\
+	.task		= &tsk,			\
+	.exec_domain	= &default_exec_domain,	\
+	.flags		= 0,			\
+	.cpu		= 0,			\
+	.preempt_count	= 1,			\
+	.addr_limit	= KERNEL_DS,		\
+	.restart_block = {			\
+		.fn = do_no_restart_syscall,	\
+	},					\
+}
+
+#define init_thread_info	(init_thread_union.thread_info)
+#define init_stack		(init_thread_union.stack)
+
+
+/* how to get the thread information struct from C */
+static inline struct thread_info *current_thread_info(void)
+{
+	struct thread_info *ti;
+	__asm__("andl %%esp,%0; ":"=r" (ti) : "0" (~(THREAD_SIZE - 1)));
+	return ti;
+}
+
+/* how to get the current stack pointer from C */
+register unsigned long current_stack_pointer asm("esp") __attribute_used__;
+
+/* thread information allocation */
+#ifdef CONFIG_DEBUG_STACK_USAGE
+#define alloc_thread_info(tsk)					\
+	({							\
+		struct thread_info *ret;			\
+								\
+		ret = kmalloc(THREAD_SIZE, GFP_KERNEL);		\
+		if (ret)					\
+			memset(ret, 0, THREAD_SIZE);		\
+		ret;						\
+	})
+#else
+#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL)
+#endif
+
+#define free_thread_info(info)	kfree(info)
+#define get_thread_info(ti) get_task_struct((ti)->task)
+#define put_thread_info(ti) put_task_struct((ti)->task)
+
+#else /* !__ASSEMBLY__ */
+
+/* how to get the thread information struct from ASM */
+#define GET_THREAD_INFO(reg) \
+	movl $-THREAD_SIZE, reg; \
+	andl %esp, reg
+
+/* use this one if reg already contains %esp */
+#define GET_THREAD_INFO_WITH_ESP(reg) \
+	andl $-THREAD_SIZE, reg
+
+#endif
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_SYSCALL_TRACE	0	/* syscall trace active */
+#define TIF_NOTIFY_RESUME	1	/* resumption notification requested */
+#define TIF_SIGPENDING		2	/* signal pending */
+#define TIF_NEED_RESCHED	3	/* rescheduling necessary */
+#define TIF_SINGLESTEP		4	/* restore singlestep on return to user mode */
+#define TIF_IRET		5	/* return with iret */
+#define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
+#define TIF_SECCOMP		8	/* secure computing */
+#define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_MEMDIE		17
+
+#define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP		(1<<TIF_SINGLESTEP)
+#define _TIF_IRET		(1<<TIF_IRET)
+#define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
+#define _TIF_SECCOMP		(1<<TIF_SECCOMP)
+#define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
+
+/* work to do on interrupt/exception return */
+#define _TIF_WORK_MASK \
+  (0x0000FFFF & ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|_TIF_SECCOMP))
+/* work to do on any return to u-space */
+#define _TIF_ALLWORK_MASK	(0x0000FFFF & ~_TIF_SECCOMP)
+
+/*
+ * Thread-synchronous status.
+ *
+ * This is different from the flags in that nobody else
+ * ever touches our thread-synchronous status, so we don't
+ * have to worry about atomic accesses.
+ */
+#define TS_USEDFPU		0x0001	/* FPU was used by this task this quantum (SMP) */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h
new file mode 100644
index 0000000..40c54f6
--- /dev/null
+++ b/include/asm-i386/timer.h
@@ -0,0 +1,64 @@
+#ifndef _ASMi386_TIMER_H
+#define _ASMi386_TIMER_H
+#include <linux/init.h>
+
+/**
+ * struct timer_ops - used to define a timer source
+ *
+ * @name: name of the timer.
+ * @init: Probes and initializes the timer. Takes clock= override 
+ *        string as an argument. Returns 0 on success, anything else
+ *        on failure.
+ * @mark_offset: called by the timer interrupt.
+ * @get_offset:  called by gettimeofday(). Returns the number of microseconds
+ *               since the last timer interupt.
+ * @monotonic_clock: returns the number of nanoseconds since the init of the
+ *                   timer.
+ * @delay: delays this many clock cycles.
+ */
+struct timer_opts {
+	char* name;
+	void (*mark_offset)(void);
+	unsigned long (*get_offset)(void);
+	unsigned long long (*monotonic_clock)(void);
+	void (*delay)(unsigned long);
+};
+
+struct init_timer_opts {
+	int (*init)(char *override);
+	struct timer_opts *opts;
+};
+
+#define TICK_SIZE (tick_nsec / 1000)
+
+extern struct timer_opts* __init select_timer(void);
+extern void clock_fallback(void);
+void setup_pit_timer(void);
+
+/* Modifiers for buggy PIT handling */
+
+extern int pit_latch_buggy;
+
+extern struct timer_opts *cur_timer;
+extern int timer_ack;
+
+/* list of externed timers */
+extern struct timer_opts timer_none;
+extern struct timer_opts timer_pit;
+extern struct init_timer_opts timer_pit_init;
+extern struct init_timer_opts timer_tsc_init;
+#ifdef CONFIG_X86_CYCLONE_TIMER
+extern struct init_timer_opts timer_cyclone_init;
+#endif
+
+extern unsigned long calibrate_tsc(void);
+extern void init_cpu_khz(void);
+#ifdef CONFIG_HPET_TIMER
+extern struct init_timer_opts timer_hpet_init;
+extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr);
+#endif
+
+#ifdef CONFIG_X86_PM_TIMER
+extern struct init_timer_opts timer_pmtmr_init;
+#endif
+#endif
diff --git a/include/asm-i386/timex.h b/include/asm-i386/timex.h
new file mode 100644
index 0000000..b41e484
--- /dev/null
+++ b/include/asm-i386/timex.h
@@ -0,0 +1,52 @@
+/*
+ * linux/include/asm-i386/timex.h
+ *
+ * i386 architecture timex specifications
+ */
+#ifndef _ASMi386_TIMEX_H
+#define _ASMi386_TIMEX_H
+
+#include <linux/config.h>
+#include <asm/processor.h>
+
+#ifdef CONFIG_X86_ELAN
+#  define CLOCK_TICK_RATE 1189200 /* AMD Elan has different frequency! */
+#else
+#  define CLOCK_TICK_RATE 1193182 /* Underlying HZ */
+#endif
+
+
+/*
+ * Standard way to access the cycle counter on i586+ CPUs.
+ * Currently only used on SMP.
+ *
+ * If you really have a SMP machine with i486 chips or older,
+ * compile for that, and this will just always return zero.
+ * That's ok, it just means that the nicer scheduling heuristics
+ * won't work for you.
+ *
+ * We only use the low 32 bits, and we'd simply better make sure
+ * that we reschedule before that wraps. Scheduling at least every
+ * four billion cycles just basically sounds like a good idea,
+ * regardless of how fast the machine is. 
+ */
+typedef unsigned long long cycles_t;
+
+static inline cycles_t get_cycles (void)
+{
+	unsigned long long ret=0;
+
+#ifndef CONFIG_X86_TSC
+	if (!cpu_has_tsc)
+		return 0;
+#endif
+
+#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
+	rdtscll(ret);
+#endif
+	return ret;
+}
+
+extern unsigned long cpu_khz;
+
+#endif
diff --git a/include/asm-i386/tlb.h b/include/asm-i386/tlb.h
new file mode 100644
index 0000000..c006c5c
--- /dev/null
+++ b/include/asm-i386/tlb.h
@@ -0,0 +1,20 @@
+#ifndef _I386_TLB_H
+#define _I386_TLB_H
+
+/*
+ * x86 doesn't need any special per-pte or
+ * per-vma handling..
+ */
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+
+/*
+ * .. because we flush the whole mm when it
+ * fills up.
+ */
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#include <asm-generic/tlb.h>
+
+#endif
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h
new file mode 100644
index 0000000..f22fab0
--- /dev/null
+++ b/include/asm-i386/tlbflush.h
@@ -0,0 +1,147 @@
+#ifndef _I386_TLBFLUSH_H
+#define _I386_TLBFLUSH_H
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <asm/processor.h>
+
+#define __flush_tlb()							\
+	do {								\
+		unsigned int tmpreg;					\
+									\
+		__asm__ __volatile__(					\
+			"movl %%cr3, %0;              \n"		\
+			"movl %0, %%cr3;  # flush TLB \n"		\
+			: "=r" (tmpreg)					\
+			:: "memory");					\
+	} while (0)
+
+/*
+ * Global pages have to be flushed a bit differently. Not a real
+ * performance problem because this does not happen often.
+ */
+#define __flush_tlb_global()						\
+	do {								\
+		unsigned int tmpreg;					\
+									\
+		__asm__ __volatile__(					\
+			"movl %1, %%cr4;  # turn off PGE     \n"	\
+			"movl %%cr3, %0;                     \n"	\
+			"movl %0, %%cr3;  # flush TLB        \n"	\
+			"movl %2, %%cr4;  # turn PGE back on \n"	\
+			: "=&r" (tmpreg)				\
+			: "r" (mmu_cr4_features & ~X86_CR4_PGE),	\
+			  "r" (mmu_cr4_features)			\
+			: "memory");					\
+	} while (0)
+
+extern unsigned long pgkern_mask;
+
+# define __flush_tlb_all()						\
+	do {								\
+		if (cpu_has_pge)					\
+			__flush_tlb_global();				\
+		else							\
+			__flush_tlb();					\
+	} while (0)
+
+#define cpu_has_invlpg	(boot_cpu_data.x86 > 3)
+
+#define __flush_tlb_single(addr) \
+	__asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
+
+#ifdef CONFIG_X86_INVLPG
+# define __flush_tlb_one(addr) __flush_tlb_single(addr)
+#else
+# define __flush_tlb_one(addr)						\
+	do {								\
+		if (cpu_has_invlpg)					\
+			__flush_tlb_single(addr);			\
+		else							\
+			__flush_tlb();					\
+	} while (0)
+#endif
+
+/*
+ * TLB flushing:
+ *
+ *  - flush_tlb() flushes the current mm struct TLBs
+ *  - flush_tlb_all() flushes all processes TLBs
+ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ *  - flush_tlb_page(vma, vmaddr) flushes one page
+ *  - flush_tlb_range(vma, start, end) flushes a range of pages
+ *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
+ *
+ * ..but the i386 has somewhat limited tlb flushing capabilities,
+ * and page-granular flushes are available only on i486 and up.
+ */
+
+#ifndef CONFIG_SMP
+
+#define flush_tlb() __flush_tlb()
+#define flush_tlb_all() __flush_tlb_all()
+#define local_flush_tlb() __flush_tlb()
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+	if (mm == current->active_mm)
+		__flush_tlb();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+	unsigned long addr)
+{
+	if (vma->vm_mm == current->active_mm)
+		__flush_tlb_one(addr);
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+	unsigned long start, unsigned long end)
+{
+	if (vma->vm_mm == current->active_mm)
+		__flush_tlb();
+}
+
+#else
+
+#include <asm/smp.h>
+
+#define local_flush_tlb() \
+	__flush_tlb()
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+
+#define flush_tlb()	flush_tlb_current_task()
+
+static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
+{
+	flush_tlb_mm(vma->vm_mm);
+}
+
+#define TLBSTATE_OK	1
+#define TLBSTATE_LAZY	2
+
+struct tlb_state
+{
+	struct mm_struct *active_mm;
+	int state;
+	char __cacheline_padding[L1_CACHE_BYTES-8];
+};
+DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
+
+
+#endif
+
+#define flush_tlb_kernel_range(start, end) flush_tlb_all()
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+				      unsigned long start, unsigned long end)
+{
+	/* i386 does not keep any page table caches in TLB */
+}
+
+#endif /* _I386_TLBFLUSH_H */
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
new file mode 100644
index 0000000..98f9e68
--- /dev/null
+++ b/include/asm-i386/topology.h
@@ -0,0 +1,108 @@
+/*
+ * linux/include/asm-i386/topology.h
+ *
+ * Written by: Matthew Dobson, IBM Corporation
+ *
+ * Copyright (C) 2002, IBM Corp.
+ *
+ * All rights reserved.          
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <colpatch@us.ibm.com>
+ */
+#ifndef _ASM_I386_TOPOLOGY_H
+#define _ASM_I386_TOPOLOGY_H
+
+#ifdef CONFIG_NUMA
+
+#include <asm/mpspec.h>
+
+#include <linux/cpumask.h>
+
+/* Mappings between logical cpu number and node number */
+extern cpumask_t node_2_cpu_mask[];
+extern int cpu_2_node[];
+
+/* Returns the number of the node containing CPU 'cpu' */
+static inline int cpu_to_node(int cpu)
+{ 
+	return cpu_2_node[cpu];
+}
+
+/* Returns the number of the node containing Node 'node'.  This architecture is flat, 
+   so it is a pretty simple function! */
+#define parent_node(node) (node)
+
+/* Returns a bitmask of CPUs on Node 'node'. */
+static inline cpumask_t node_to_cpumask(int node)
+{
+	return node_2_cpu_mask[node];
+}
+
+/* Returns the number of the first CPU on Node 'node'. */
+static inline int node_to_first_cpu(int node)
+{ 
+	cpumask_t mask = node_to_cpumask(node);
+	return first_cpu(mask);
+}
+
+/* Returns the number of the node containing PCI bus number 'busnr' */
+static inline cpumask_t __pcibus_to_cpumask(int busnr)
+{
+	return node_to_cpumask(mp_bus_id_to_node[busnr]);
+}
+#define pcibus_to_cpumask(bus)	__pcibus_to_cpumask(bus->number)
+
+/* sched_domains SD_NODE_INIT for NUMAQ machines */
+#define SD_NODE_INIT (struct sched_domain) {		\
+	.span			= CPU_MASK_NONE,	\
+	.parent			= NULL,			\
+	.groups			= NULL,			\
+	.min_interval		= 8,			\
+	.max_interval		= 32,			\
+	.busy_factor		= 32,			\
+	.imbalance_pct		= 125,			\
+	.cache_hot_time		= (10*1000000),		\
+	.cache_nice_tries	= 1,			\
+	.per_cpu_gain		= 100,			\
+	.flags			= SD_LOAD_BALANCE	\
+				| SD_BALANCE_EXEC	\
+				| SD_BALANCE_NEWIDLE	\
+				| SD_WAKE_IDLE		\
+				| SD_WAKE_BALANCE,	\
+	.last_balance		= jiffies,		\
+	.balance_interval	= 1,			\
+	.nr_balance_failed	= 0,			\
+}
+
+extern unsigned long node_start_pfn[];
+extern unsigned long node_end_pfn[];
+extern unsigned long node_remap_size[];
+
+#define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid])
+
+#else /* !CONFIG_NUMA */
+/*
+ * Other i386 platforms should define their own version of the 
+ * above macros here.
+ */
+
+#include <asm-generic/topology.h>
+
+#endif /* CONFIG_NUMA */
+
+#endif /* _ASM_I386_TOPOLOGY_H */
diff --git a/include/asm-i386/types.h b/include/asm-i386/types.h
new file mode 100644
index 0000000..901b77c
--- /dev/null
+++ b/include/asm-i386/types.h
@@ -0,0 +1,72 @@
+#ifndef _I386_TYPES_H
+#define _I386_TYPES_H
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned short umode_t;
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __KERNEL__
+
+#define BITS_PER_LONG 32
+
+#ifndef __ASSEMBLY__
+
+#include <linux/config.h>
+
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+typedef signed long long s64;
+typedef unsigned long long u64;
+
+/* DMA addresses come in generic and 64-bit flavours.  */
+
+#ifdef CONFIG_HIGHMEM64G
+typedef u64 dma_addr_t;
+#else
+typedef u32 dma_addr_t;
+#endif
+typedef u64 dma64_addr_t;
+
+#ifdef CONFIG_LBD
+typedef u64 sector_t;
+#define HAVE_SECTOR_T
+#endif
+
+typedef unsigned short kmem_bufctl_t;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
new file mode 100644
index 0000000..886867a
--- /dev/null
+++ b/include/asm-i386/uaccess.h
@@ -0,0 +1,539 @@
+#ifndef __i386_UACCESS_H
+#define __i386_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/thread_info.h>
+#include <linux/prefetch.h>
+#include <linux/string.h>
+#include <asm/page.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not.  If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ */
+
+#define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
+
+
+#define KERNEL_DS	MAKE_MM_SEG(0xFFFFFFFFUL)
+#define USER_DS		MAKE_MM_SEG(PAGE_OFFSET)
+
+#define get_ds()	(KERNEL_DS)
+#define get_fs()	(current_thread_info()->addr_limit)
+#define set_fs(x)	(current_thread_info()->addr_limit = (x))
+
+#define segment_eq(a,b)	((a).seg == (b).seg)
+
+/*
+ * movsl can be slow when source and dest are not both 8-byte aligned
+ */
+#ifdef CONFIG_X86_INTEL_USERCOPY
+extern struct movsl_mask {
+	int mask;
+} ____cacheline_aligned_in_smp movsl_mask;
+#endif
+
+#define __addr_ok(addr) ((unsigned long __force)(addr) < (current_thread_info()->addr_limit.seg))
+
+/*
+ * Test whether a block of memory is a valid user space address.
+ * Returns 0 if the range is valid, nonzero otherwise.
+ *
+ * This is equivalent to the following test:
+ * (u33)addr + (u33)size >= (u33)current->addr_limit.seg
+ *
+ * This needs 33-bit arithmetic. We have a carry...
+ */
+#define __range_ok(addr,size) ({ \
+	unsigned long flag,sum; \
+	__chk_user_ptr(addr); \
+	asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
+		:"=&r" (flag), "=r" (sum) \
+		:"1" (addr),"g" ((int)(size)),"g" (current_thread_info()->addr_limit.seg)); \
+	flag; })
+
+/**
+ * access_ok: - Checks if a user space pointer is valid
+ * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
+ *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
+ *        to write to a block, it is always safe to read from it.
+ * @addr: User space pointer to start of block to check
+ * @size: Size of block to check
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * Checks if a pointer to a block of memory in user space is valid.
+ *
+ * Returns true (nonzero) if the memory block may be valid, false (zero)
+ * if it is definitely invalid.
+ *
+ * Note that, depending on architecture, this function probably just
+ * checks that the pointer is in the user space range - after calling
+ * this function, memory access functions may still return -EFAULT.
+ */
+#define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0))
+
+/**
+ * verify_area: - Obsolete/deprecated and will go away soon,
+ * use access_ok() instead.
+ * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE
+ * @addr: User space pointer to start of block to check
+ * @size: Size of block to check
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * This function has been replaced by access_ok().
+ *
+ * Checks if a pointer to a block of memory in user space is valid.
+ *
+ * Returns zero if the memory block may be valid, -EFAULT
+ * if it is definitely invalid.
+ *
+ * See access_ok() for more details.
+ */
+static inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
+{
+	return access_ok(type,addr,size) ? 0 : -EFAULT;
+}
+
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue.  No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path.  This means when everything is well,
+ * we don't even have to jump over them.  Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry
+{
+	unsigned long insn, fixup;
+};
+
+extern int fixup_exception(struct pt_regs *regs);
+
+/*
+ * These are the main single-value transfer routines.  They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in "get_user()"
+ * and yet we don't want to do any pointers, because that is too much
+ * of a performance impact. Thus we have a few rather ugly macros here,
+ * and hide all the ugliness from the user.
+ *
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ */
+
+extern void __get_user_1(void);
+extern void __get_user_2(void);
+extern void __get_user_4(void);
+
+#define __get_user_x(size,ret,x,ptr) \
+	__asm__ __volatile__("call __get_user_" #size \
+		:"=a" (ret),"=d" (x) \
+		:"0" (ptr))
+
+
+/* Careful: we have to cast the result to the type of the pointer for sign reasons */
+/**
+ * get_user: - Get a simple variable from user space.
+ * @x:   Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define get_user(x,ptr)							\
+({	int __ret_gu;							\
+	unsigned long __val_gu;						\
+	__chk_user_ptr(ptr);						\
+	switch(sizeof (*(ptr))) {					\
+	case 1:  __get_user_x(1,__ret_gu,__val_gu,ptr); break;		\
+	case 2:  __get_user_x(2,__ret_gu,__val_gu,ptr); break;		\
+	case 4:  __get_user_x(4,__ret_gu,__val_gu,ptr); break;		\
+	default: __get_user_x(X,__ret_gu,__val_gu,ptr); break;		\
+	}								\
+	(x) = (__typeof__(*(ptr)))__val_gu;				\
+	__ret_gu;							\
+})
+
+extern void __put_user_bad(void);
+
+/*
+ * Strange magic calling convention: pointer in %ecx,
+ * value in %eax(:%edx), return value in %eax, no clobbers.
+ */
+extern void __put_user_1(void);
+extern void __put_user_2(void);
+extern void __put_user_4(void);
+extern void __put_user_8(void);
+
+#define __put_user_1(x, ptr) __asm__ __volatile__("call __put_user_1":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr))
+#define __put_user_2(x, ptr) __asm__ __volatile__("call __put_user_2":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr))
+#define __put_user_4(x, ptr) __asm__ __volatile__("call __put_user_4":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr))
+#define __put_user_8(x, ptr) __asm__ __volatile__("call __put_user_8":"=a" (__ret_pu):"A" ((typeof(*(ptr)))(x)), "c" (ptr))
+#define __put_user_X(x, ptr) __asm__ __volatile__("call __put_user_X":"=a" (__ret_pu):"c" (ptr))
+
+/**
+ * put_user: - Write a simple value into user space.
+ * @x:   Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#ifdef CONFIG_X86_WP_WORKS_OK
+
+#define put_user(x,ptr)						\
+({	int __ret_pu;						\
+	__chk_user_ptr(ptr);					\
+	switch(sizeof(*(ptr))) {				\
+	case 1: __put_user_1(x, ptr); break;			\
+	case 2: __put_user_2(x, ptr); break;			\
+	case 4: __put_user_4(x, ptr); break;			\
+	case 8: __put_user_8(x, ptr); break;			\
+	default:__put_user_X(x, ptr); break;			\
+	}							\
+	__ret_pu;						\
+})
+
+#else
+#define put_user(x,ptr)						\
+({								\
+ 	int __ret_pu;						\
+	__typeof__(*(ptr)) __pus_tmp = x;			\
+	__ret_pu=0;						\
+	if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp,		\
+				sizeof(*(ptr))) != 0))		\
+ 		__ret_pu=-EFAULT;				\
+ 	__ret_pu;						\
+ })
+
+
+#endif
+
+/**
+ * __get_user: - Get a simple variable from user space, with less checking.
+ * @x:   Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define __get_user(x,ptr) \
+  __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
+
+
+/**
+ * __put_user: - Write a simple value into user space, with less checking.
+ * @x:   Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define __put_user(x,ptr) \
+  __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
+
+#define __put_user_nocheck(x,ptr,size)				\
+({								\
+	long __pu_err;						\
+	__put_user_size((x),(ptr),(size),__pu_err,-EFAULT);	\
+	__pu_err;						\
+})
+
+
+#define __put_user_u64(x, addr, err)				\
+	__asm__ __volatile__(					\
+		"1:	movl %%eax,0(%2)\n"			\
+		"2:	movl %%edx,4(%2)\n"			\
+		"3:\n"						\
+		".section .fixup,\"ax\"\n"			\
+		"4:	movl %3,%0\n"				\
+		"	jmp 3b\n"				\
+		".previous\n"					\
+		".section __ex_table,\"a\"\n"			\
+		"	.align 4\n"				\
+		"	.long 1b,4b\n"				\
+		"	.long 2b,4b\n"				\
+		".previous"					\
+		: "=r"(err)					\
+		: "A" (x), "r" (addr), "i"(-EFAULT), "0"(err))
+
+#ifdef CONFIG_X86_WP_WORKS_OK
+
+#define __put_user_size(x,ptr,size,retval,errret)			\
+do {									\
+	retval = 0;							\
+	__chk_user_ptr(ptr);						\
+	switch (size) {							\
+	case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break;	\
+	case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \
+	case 4: __put_user_asm(x,ptr,retval,"l","","ir",errret); break;	\
+	case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\
+	  default: __put_user_bad();					\
+	}								\
+} while (0)
+
+#else
+
+#define __put_user_size(x,ptr,size,retval,errret)			\
+do {									\
+	__typeof__(*(ptr)) __pus_tmp = x;				\
+	retval = 0;							\
+									\
+	if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0))	\
+		retval = errret;					\
+} while (0)
+
+#endif
+struct __large_struct { unsigned long buf[100]; };
+#define __m(x) (*(struct __large_struct __user *)(x))
+
+/*
+ * Tell gcc we read from memory instead of writing: this is because
+ * we do not write to any memory gcc knows about, so there are no
+ * aliasing issues.
+ */
+#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
+	__asm__ __volatile__(						\
+		"1:	mov"itype" %"rtype"1,%2\n"			\
+		"2:\n"							\
+		".section .fixup,\"ax\"\n"				\
+		"3:	movl %3,%0\n"					\
+		"	jmp 2b\n"					\
+		".previous\n"						\
+		".section __ex_table,\"a\"\n"				\
+		"	.align 4\n"					\
+		"	.long 1b,3b\n"					\
+		".previous"						\
+		: "=r"(err)						\
+		: ltype (x), "m"(__m(addr)), "i"(errret), "0"(err))
+
+
+#define __get_user_nocheck(x,ptr,size)				\
+({								\
+	long __gu_err;						\
+	unsigned long __gu_val;					\
+	__get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\
+	(x) = (__typeof__(*(ptr)))__gu_val;			\
+	__gu_err;						\
+})
+
+extern long __get_user_bad(void);
+
+#define __get_user_size(x,ptr,size,retval,errret)			\
+do {									\
+	retval = 0;							\
+	__chk_user_ptr(ptr);						\
+	switch (size) {							\
+	case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break;	\
+	case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break;	\
+	case 4: __get_user_asm(x,ptr,retval,"l","","=r",errret);break;	\
+	default: (x) = __get_user_bad();				\
+	}								\
+} while (0)
+
+#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
+	__asm__ __volatile__(						\
+		"1:	mov"itype" %2,%"rtype"1\n"			\
+		"2:\n"							\
+		".section .fixup,\"ax\"\n"				\
+		"3:	movl %3,%0\n"					\
+		"	xor"itype" %"rtype"1,%"rtype"1\n"		\
+		"	jmp 2b\n"					\
+		".previous\n"						\
+		".section __ex_table,\"a\"\n"				\
+		"	.align 4\n"					\
+		"	.long 1b,3b\n"					\
+		".previous"						\
+		: "=r"(err), ltype (x)					\
+		: "m"(__m(addr)), "i"(errret), "0"(err))
+
+
+unsigned long __must_check __copy_to_user_ll(void __user *to,
+				const void *from, unsigned long n);
+unsigned long __must_check __copy_from_user_ll(void *to,
+				const void __user *from, unsigned long n);
+
+/*
+ * Here we special-case 1, 2 and 4-byte copy_*_user invocations.  On a fault
+ * we return the initial request size (1, 2 or 4), as copy_*_user should do.
+ * If a store crosses a page boundary and gets a fault, the x86 will not write
+ * anything, so this is accurate.
+ */
+
+/**
+ * __copy_to_user: - Copy a block of data into user space, with less checking.
+ * @to:   Destination address, in user space.
+ * @from: Source address, in kernel space.
+ * @n:    Number of bytes to copy.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * Copy data from kernel space to user space.  Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ */
+static inline unsigned long __must_check
+__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+{
+	if (__builtin_constant_p(n)) {
+		unsigned long ret;
+
+		switch (n) {
+		case 1:
+			__put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1);
+			return ret;
+		case 2:
+			__put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2);
+			return ret;
+		case 4:
+			__put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4);
+			return ret;
+		}
+	}
+	return __copy_to_user_ll(to, from, n);
+}
+
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+       might_sleep();
+       return __copy_to_user_inatomic(to, from, n);
+}
+
+/**
+ * __copy_from_user: - Copy a block of data from user space, with less checking.
+ * @to:   Destination address, in kernel space.
+ * @from: Source address, in user space.
+ * @n:    Number of bytes to copy.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * Copy data from user space to kernel space.  Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ *
+ * If some data could not be copied, this function will pad the copied
+ * data to the requested size using zero bytes.
+ */
+static inline unsigned long
+__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+{
+	if (__builtin_constant_p(n)) {
+		unsigned long ret;
+
+		switch (n) {
+		case 1:
+			__get_user_size(*(u8 *)to, from, 1, ret, 1);
+			return ret;
+		case 2:
+			__get_user_size(*(u16 *)to, from, 2, ret, 2);
+			return ret;
+		case 4:
+			__get_user_size(*(u32 *)to, from, 4, ret, 4);
+			return ret;
+		}
+	}
+	return __copy_from_user_ll(to, from, n);
+}
+
+static inline unsigned long
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       might_sleep();
+       return __copy_from_user_inatomic(to, from, n);
+}
+unsigned long __must_check copy_to_user(void __user *to,
+				const void *from, unsigned long n);
+unsigned long __must_check copy_from_user(void *to,
+				const void __user *from, unsigned long n);
+long __must_check strncpy_from_user(char *dst, const char __user *src,
+				long count);
+long __must_check __strncpy_from_user(char *dst,
+				const char __user *src, long count);
+
+/**
+ * strlen_user: - Get the size of a string in user space.
+ * @str: The string to measure.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * Get the size of a NUL-terminated string in user space.
+ *
+ * Returns the size of the string INCLUDING the terminating NUL.
+ * On exception, returns 0.
+ *
+ * If there is a limit on the length of a valid string, you may wish to
+ * consider using strnlen_user() instead.
+ */
+#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
+
+long strnlen_user(const char __user *str, long n);
+unsigned long __must_check clear_user(void __user *mem, unsigned long len);
+unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
+
+#endif /* __i386_UACCESS_H */
diff --git a/include/asm-i386/ucontext.h b/include/asm-i386/ucontext.h
new file mode 100644
index 0000000..b0db369
--- /dev/null
+++ b/include/asm-i386/ucontext.h
@@ -0,0 +1,12 @@
+#ifndef _ASMi386_UCONTEXT_H
+#define _ASMi386_UCONTEXT_H
+
+struct ucontext {
+	unsigned long	  uc_flags;
+	struct ucontext  *uc_link;
+	stack_t		  uc_stack;
+	struct sigcontext uc_mcontext;
+	sigset_t	  uc_sigmask;	/* mask last for extensibility */
+};
+
+#endif /* !_ASMi386_UCONTEXT_H */
diff --git a/include/asm-i386/unaligned.h b/include/asm-i386/unaligned.h
new file mode 100644
index 0000000..7acd795
--- /dev/null
+++ b/include/asm-i386/unaligned.h
@@ -0,0 +1,37 @@
+#ifndef __I386_UNALIGNED_H
+#define __I386_UNALIGNED_H
+
+/*
+ * The i386 can do unaligned accesses itself. 
+ *
+ * The strange macros are there to make sure these can't
+ * be misused in a way that makes them not work on other
+ * architectures where unaligned accesses aren't as simple.
+ */
+
+/**
+ * get_unaligned - get value from possibly mis-aligned location
+ * @ptr: pointer to value
+ *
+ * This macro should be used for accessing values larger in size than 
+ * single bytes at locations that are expected to be improperly aligned, 
+ * e.g. retrieving a u16 value from a location not u16-aligned.
+ *
+ * Note that unaligned accesses can be very expensive on some architectures.
+ */
+#define get_unaligned(ptr) (*(ptr))
+
+/**
+ * put_unaligned - put value to a possibly mis-aligned location
+ * @val: value to place
+ * @ptr: pointer to location
+ *
+ * This macro should be used for placing values larger in size than 
+ * single bytes at locations that are expected to be improperly aligned, 
+ * e.g. writing a u16 value to a location not u16-aligned.
+ *
+ * Note that unaligned accesses can be very expensive on some architectures.
+ */
+#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
+
+#endif
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
new file mode 100644
index 0000000..61bcc1b
--- /dev/null
+++ b/include/asm-i386/unistd.h
@@ -0,0 +1,466 @@
+#ifndef _ASM_I386_UNISTD_H_
+#define _ASM_I386_UNISTD_H_
+
+/*
+ * This file contains the system call numbers.
+ */
+
+#define __NR_restart_syscall      0
+#define __NR_exit		  1
+#define __NR_fork		  2
+#define __NR_read		  3
+#define __NR_write		  4
+#define __NR_open		  5
+#define __NR_close		  6
+#define __NR_waitpid		  7
+#define __NR_creat		  8
+#define __NR_link		  9
+#define __NR_unlink		 10
+#define __NR_execve		 11
+#define __NR_chdir		 12
+#define __NR_time		 13
+#define __NR_mknod		 14
+#define __NR_chmod		 15
+#define __NR_lchown		 16
+#define __NR_break		 17
+#define __NR_oldstat		 18
+#define __NR_lseek		 19
+#define __NR_getpid		 20
+#define __NR_mount		 21
+#define __NR_umount		 22
+#define __NR_setuid		 23
+#define __NR_getuid		 24
+#define __NR_stime		 25
+#define __NR_ptrace		 26
+#define __NR_alarm		 27
+#define __NR_oldfstat		 28
+#define __NR_pause		 29
+#define __NR_utime		 30
+#define __NR_stty		 31
+#define __NR_gtty		 32
+#define __NR_access		 33
+#define __NR_nice		 34
+#define __NR_ftime		 35
+#define __NR_sync		 36
+#define __NR_kill		 37
+#define __NR_rename		 38
+#define __NR_mkdir		 39
+#define __NR_rmdir		 40
+#define __NR_dup		 41
+#define __NR_pipe		 42
+#define __NR_times		 43
+#define __NR_prof		 44
+#define __NR_brk		 45
+#define __NR_setgid		 46
+#define __NR_getgid		 47
+#define __NR_signal		 48
+#define __NR_geteuid		 49
+#define __NR_getegid		 50
+#define __NR_acct		 51
+#define __NR_umount2		 52
+#define __NR_lock		 53
+#define __NR_ioctl		 54
+#define __NR_fcntl		 55
+#define __NR_mpx		 56
+#define __NR_setpgid		 57
+#define __NR_ulimit		 58
+#define __NR_oldolduname	 59
+#define __NR_umask		 60
+#define __NR_chroot		 61
+#define __NR_ustat		 62
+#define __NR_dup2		 63
+#define __NR_getppid		 64
+#define __NR_getpgrp		 65
+#define __NR_setsid		 66
+#define __NR_sigaction		 67
+#define __NR_sgetmask		 68
+#define __NR_ssetmask		 69
+#define __NR_setreuid		 70
+#define __NR_setregid		 71
+#define __NR_sigsuspend		 72
+#define __NR_sigpending		 73
+#define __NR_sethostname	 74
+#define __NR_setrlimit		 75
+#define __NR_getrlimit		 76	/* Back compatible 2Gig limited rlimit */
+#define __NR_getrusage		 77
+#define __NR_gettimeofday	 78
+#define __NR_settimeofday	 79
+#define __NR_getgroups		 80
+#define __NR_setgroups		 81
+#define __NR_select		 82
+#define __NR_symlink		 83
+#define __NR_oldlstat		 84
+#define __NR_readlink		 85
+#define __NR_uselib		 86
+#define __NR_swapon		 87
+#define __NR_reboot		 88
+#define __NR_readdir		 89
+#define __NR_mmap		 90
+#define __NR_munmap		 91
+#define __NR_truncate		 92
+#define __NR_ftruncate		 93
+#define __NR_fchmod		 94
+#define __NR_fchown		 95
+#define __NR_getpriority	 96
+#define __NR_setpriority	 97
+#define __NR_profil		 98
+#define __NR_statfs		 99
+#define __NR_fstatfs		100
+#define __NR_ioperm		101
+#define __NR_socketcall		102
+#define __NR_syslog		103
+#define __NR_setitimer		104
+#define __NR_getitimer		105
+#define __NR_stat		106
+#define __NR_lstat		107
+#define __NR_fstat		108
+#define __NR_olduname		109
+#define __NR_iopl		110
+#define __NR_vhangup		111
+#define __NR_idle		112
+#define __NR_vm86old		113
+#define __NR_wait4		114
+#define __NR_swapoff		115
+#define __NR_sysinfo		116
+#define __NR_ipc		117
+#define __NR_fsync		118
+#define __NR_sigreturn		119
+#define __NR_clone		120
+#define __NR_setdomainname	121
+#define __NR_uname		122
+#define __NR_modify_ldt		123
+#define __NR_adjtimex		124
+#define __NR_mprotect		125
+#define __NR_sigprocmask	126
+#define __NR_create_module	127
+#define __NR_init_module	128
+#define __NR_delete_module	129
+#define __NR_get_kernel_syms	130
+#define __NR_quotactl		131
+#define __NR_getpgid		132
+#define __NR_fchdir		133
+#define __NR_bdflush		134
+#define __NR_sysfs		135
+#define __NR_personality	136
+#define __NR_afs_syscall	137 /* Syscall for Andrew File System */
+#define __NR_setfsuid		138
+#define __NR_setfsgid		139
+#define __NR__llseek		140
+#define __NR_getdents		141
+#define __NR__newselect		142
+#define __NR_flock		143
+#define __NR_msync		144
+#define __NR_readv		145
+#define __NR_writev		146
+#define __NR_getsid		147
+#define __NR_fdatasync		148
+#define __NR__sysctl		149
+#define __NR_mlock		150
+#define __NR_munlock		151
+#define __NR_mlockall		152
+#define __NR_munlockall		153
+#define __NR_sched_setparam		154
+#define __NR_sched_getparam		155
+#define __NR_sched_setscheduler		156
+#define __NR_sched_getscheduler		157
+#define __NR_sched_yield		158
+#define __NR_sched_get_priority_max	159
+#define __NR_sched_get_priority_min	160
+#define __NR_sched_rr_get_interval	161
+#define __NR_nanosleep		162
+#define __NR_mremap		163
+#define __NR_setresuid		164
+#define __NR_getresuid		165
+#define __NR_vm86		166
+#define __NR_query_module	167
+#define __NR_poll		168
+#define __NR_nfsservctl		169
+#define __NR_setresgid		170
+#define __NR_getresgid		171
+#define __NR_prctl              172
+#define __NR_rt_sigreturn	173
+#define __NR_rt_sigaction	174
+#define __NR_rt_sigprocmask	175
+#define __NR_rt_sigpending	176
+#define __NR_rt_sigtimedwait	177
+#define __NR_rt_sigqueueinfo	178
+#define __NR_rt_sigsuspend	179
+#define __NR_pread64		180
+#define __NR_pwrite64		181
+#define __NR_chown		182
+#define __NR_getcwd		183
+#define __NR_capget		184
+#define __NR_capset		185
+#define __NR_sigaltstack	186
+#define __NR_sendfile		187
+#define __NR_getpmsg		188	/* some people actually want streams */
+#define __NR_putpmsg		189	/* some people actually want streams */
+#define __NR_vfork		190
+#define __NR_ugetrlimit		191	/* SuS compliant getrlimit */
+#define __NR_mmap2		192
+#define __NR_truncate64		193
+#define __NR_ftruncate64	194
+#define __NR_stat64		195
+#define __NR_lstat64		196
+#define __NR_fstat64		197
+#define __NR_lchown32		198
+#define __NR_getuid32		199
+#define __NR_getgid32		200
+#define __NR_geteuid32		201
+#define __NR_getegid32		202
+#define __NR_setreuid32		203
+#define __NR_setregid32		204
+#define __NR_getgroups32	205
+#define __NR_setgroups32	206
+#define __NR_fchown32		207
+#define __NR_setresuid32	208
+#define __NR_getresuid32	209
+#define __NR_setresgid32	210
+#define __NR_getresgid32	211
+#define __NR_chown32		212
+#define __NR_setuid32		213
+#define __NR_setgid32		214
+#define __NR_setfsuid32		215
+#define __NR_setfsgid32		216
+#define __NR_pivot_root		217
+#define __NR_mincore		218
+#define __NR_madvise		219
+#define __NR_madvise1		219	/* delete when C lib stub is removed */
+#define __NR_getdents64		220
+#define __NR_fcntl64		221
+/* 223 is unused */
+#define __NR_gettid		224
+#define __NR_readahead		225
+#define __NR_setxattr		226
+#define __NR_lsetxattr		227
+#define __NR_fsetxattr		228
+#define __NR_getxattr		229
+#define __NR_lgetxattr		230
+#define __NR_fgetxattr		231
+#define __NR_listxattr		232
+#define __NR_llistxattr		233
+#define __NR_flistxattr		234
+#define __NR_removexattr	235
+#define __NR_lremovexattr	236
+#define __NR_fremovexattr	237
+#define __NR_tkill		238
+#define __NR_sendfile64		239
+#define __NR_futex		240
+#define __NR_sched_setaffinity	241
+#define __NR_sched_getaffinity	242
+#define __NR_set_thread_area	243
+#define __NR_get_thread_area	244
+#define __NR_io_setup		245
+#define __NR_io_destroy		246
+#define __NR_io_getevents	247
+#define __NR_io_submit		248
+#define __NR_io_cancel		249
+#define __NR_fadvise64		250
+
+#define __NR_exit_group		252
+#define __NR_lookup_dcookie	253
+#define __NR_epoll_create	254
+#define __NR_epoll_ctl		255
+#define __NR_epoll_wait		256
+#define __NR_remap_file_pages	257
+#define __NR_set_tid_address	258
+#define __NR_timer_create	259
+#define __NR_timer_settime	(__NR_timer_create+1)
+#define __NR_timer_gettime	(__NR_timer_create+2)
+#define __NR_timer_getoverrun	(__NR_timer_create+3)
+#define __NR_timer_delete	(__NR_timer_create+4)
+#define __NR_clock_settime	(__NR_timer_create+5)
+#define __NR_clock_gettime	(__NR_timer_create+6)
+#define __NR_clock_getres	(__NR_timer_create+7)
+#define __NR_clock_nanosleep	(__NR_timer_create+8)
+#define __NR_statfs64		268
+#define __NR_fstatfs64		269
+#define __NR_tgkill		270
+#define __NR_utimes		271
+#define __NR_fadvise64_64	272
+#define __NR_vserver		273
+#define __NR_mbind		274
+#define __NR_get_mempolicy	275
+#define __NR_set_mempolicy	276
+#define __NR_mq_open 		277
+#define __NR_mq_unlink		(__NR_mq_open+1)
+#define __NR_mq_timedsend	(__NR_mq_open+2)
+#define __NR_mq_timedreceive	(__NR_mq_open+3)
+#define __NR_mq_notify		(__NR_mq_open+4)
+#define __NR_mq_getsetattr	(__NR_mq_open+5)
+#define __NR_sys_kexec_load	283
+#define __NR_waitid		284
+/* #define __NR_sys_setaltroot	285 */
+#define __NR_add_key		286
+#define __NR_request_key	287
+#define __NR_keyctl		288
+
+#define NR_syscalls 289
+
+/*
+ * user-visible error numbers are in the range -1 - -128: see
+ * <asm-i386/errno.h>
+ */
+#define __syscall_return(type, res) \
+do { \
+	if ((unsigned long)(res) >= (unsigned long)(-(128 + 1))) { \
+		errno = -(res); \
+		res = -1; \
+	} \
+	return (type) (res); \
+} while (0)
+
+/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
+#define _syscall0(type,name) \
+type name(void) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+	: "=a" (__res) \
+	: "0" (__NR_##name)); \
+__syscall_return(type,__res); \
+}
+
+#define _syscall1(type,name,type1,arg1) \
+type name(type1 arg1) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+	: "=a" (__res) \
+	: "0" (__NR_##name),"b" ((long)(arg1))); \
+__syscall_return(type,__res); \
+}
+
+#define _syscall2(type,name,type1,arg1,type2,arg2) \
+type name(type1 arg1,type2 arg2) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+	: "=a" (__res) \
+	: "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2))); \
+__syscall_return(type,__res); \
+}
+
+#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
+type name(type1 arg1,type2 arg2,type3 arg3) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+	: "=a" (__res) \
+	: "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
+		  "d" ((long)(arg3))); \
+__syscall_return(type,__res); \
+}
+
+#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
+type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+	: "=a" (__res) \
+	: "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
+	  "d" ((long)(arg3)),"S" ((long)(arg4))); \
+__syscall_return(type,__res); \
+} 
+
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
+	  type5,arg5) \
+type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+	: "=a" (__res) \
+	: "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
+	  "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5))); \
+__syscall_return(type,__res); \
+}
+
+#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
+	  type5,arg5,type6,arg6) \
+type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \
+{ \
+long __res; \
+__asm__ volatile ("push %%ebp ; movl %%eax,%%ebp ; movl %1,%%eax ; int $0x80 ; pop %%ebp" \
+	: "=a" (__res) \
+	: "i" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
+	  "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)), \
+	  "0" ((long)(arg6))); \
+__syscall_return(type,__res); \
+}
+
+#ifdef __KERNEL__
+#define __ARCH_WANT_IPC_PARSE_VERSION
+#define __ARCH_WANT_OLD_READDIR
+#define __ARCH_WANT_OLD_STAT
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_ALARM
+#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_PAUSE
+#define __ARCH_WANT_SYS_SGETMASK
+#define __ARCH_WANT_SYS_SIGNAL
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_WAITPID
+#define __ARCH_WANT_SYS_SOCKETCALL
+#define __ARCH_WANT_SYS_FADVISE64
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_NICE
+#define __ARCH_WANT_SYS_OLD_GETRLIMIT
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_SIGPROCMASK
+#define __ARCH_WANT_SYS_RT_SIGACTION
+#endif
+
+#ifdef __KERNEL_SYSCALLS__
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+
+/*
+ * we need this inline - forking from kernel space will result
+ * in NO COPY ON WRITE (!!!), until an execve is executed. This
+ * is no problem, but for the stack. This is handled by not letting
+ * main() use the stack at all after fork(). Thus, no function
+ * calls - which means inline code for fork too, as otherwise we
+ * would use the stack upon exit from 'fork()'.
+ *
+ * Actually only pause and fork are needed inline, so that there
+ * won't be any messing with the stack from main(), but we define
+ * some others too.
+ */
+static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
+
+asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount);
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+			unsigned long prot, unsigned long flags,
+			unsigned long fd, unsigned long pgoff);
+asmlinkage int sys_execve(struct pt_regs regs);
+asmlinkage int sys_clone(struct pt_regs regs);
+asmlinkage int sys_fork(struct pt_regs regs);
+asmlinkage int sys_vfork(struct pt_regs regs);
+asmlinkage int sys_pipe(unsigned long __user *fildes);
+asmlinkage int sys_ptrace(long request, long pid, long addr, long data);
+asmlinkage long sys_iopl(unsigned long unused);
+struct sigaction;
+asmlinkage long sys_rt_sigaction(int sig,
+				const struct sigaction __user *act,
+				struct sigaction __user *oact,
+				size_t sigsetsize);
+
+#endif
+
+/*
+ * "Conditional" syscalls
+ *
+ * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
+ * but it doesn't work on all toolchains, so we just do it by hand
+ */
+#ifndef cond_syscall
+#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
+#endif
+
+#endif /* _ASM_I386_UNISTD_H_ */
diff --git a/include/asm-i386/user.h b/include/asm-i386/user.h
new file mode 100644
index 0000000..0e85d2a
--- /dev/null
+++ b/include/asm-i386/user.h
@@ -0,0 +1,121 @@
+#ifndef _I386_USER_H
+#define _I386_USER_H
+
+#include <asm/page.h>
+/* Core file format: The core file is written in such a way that gdb
+   can understand it and provide useful information to the user (under
+   linux we use the 'trad-core' bfd).  There are quite a number of
+   obstacles to being able to view the contents of the floating point
+   registers, and until these are solved you will not be able to view the
+   contents of them.  Actually, you can read in the core file and look at
+   the contents of the user struct to find out what the floating point
+   registers contain.
+   The actual file contents are as follows:
+   UPAGE: 1 page consisting of a user struct that tells gdb what is present
+   in the file.  Directly after this is a copy of the task_struct, which
+   is currently not used by gdb, but it may come in useful at some point.
+   All of the registers are stored as part of the upage.  The upage should
+   always be only one page.
+   DATA: The data area is stored.  We use current->end_text to
+   current->brk to pick up all of the user variables, plus any memory
+   that may have been malloced.  No attempt is made to determine if a page
+   is demand-zero or if a page is totally unused, we just cover the entire
+   range.  All of the addresses are rounded in such a way that an integral
+   number of pages is written.
+   STACK: We need the stack information in order to get a meaningful
+   backtrace.  We need to write the data from (esp) to
+   current->start_stack, so we round each of these off in order to be able
+   to write an integer number of pages.
+   The minimum core file size is 3 pages, or 12288 bytes.
+*/
+
+/*
+ * Pentium III FXSR, SSE support
+ *	Gareth Hughes <gareth@valinux.com>, May 2000
+ *
+ * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for
+ * interacting with the FXSR-format floating point environment.  Floating
+ * point data can be accessed in the regular format in the usual manner,
+ * and both the standard and SIMD floating point data can be accessed via
+ * the new ptrace requests.  In either case, changes to the FPU environment
+ * will be reflected in the task's state as expected.
+ */
+
+struct user_i387_struct {
+	long	cwd;
+	long	swd;
+	long	twd;
+	long	fip;
+	long	fcs;
+	long	foo;
+	long	fos;
+	long	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */
+};
+
+struct user_fxsr_struct {
+	unsigned short	cwd;
+	unsigned short	swd;
+	unsigned short	twd;
+	unsigned short	fop;
+	long	fip;
+	long	fcs;
+	long	foo;
+	long	fos;
+	long	mxcsr;
+	long	reserved;
+	long	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */
+	long	xmm_space[32];	/* 8*16 bytes for each XMM-reg = 128 bytes */
+	long	padding[56];
+};
+
+/*
+ * This is the old layout of "struct pt_regs", and
+ * is still the layout used by user mode (the new
+ * pt_regs doesn't have all registers as the kernel
+ * doesn't use the extra segment registers)
+ */
+struct user_regs_struct {
+	long ebx, ecx, edx, esi, edi, ebp, eax;
+	unsigned short ds, __ds, es, __es;
+	unsigned short fs, __fs, gs, __gs;
+	long orig_eax, eip;
+	unsigned short cs, __cs;
+	long eflags, esp;
+	unsigned short ss, __ss;
+};
+
+/* When the kernel dumps core, it starts by dumping the user struct -
+   this will be used by gdb to figure out where the data and stack segments
+   are within the file, and what virtual addresses to use. */
+struct user{
+/* We start with the registers, to mimic the way that "memory" is returned
+   from the ptrace(3,...) function.  */
+  struct user_regs_struct regs;		/* Where the registers are actually stored */
+/* ptrace does not yet supply these.  Someday.... */
+  int u_fpvalid;		/* True if math co-processor being used. */
+                                /* for this mess. Not yet used. */
+  struct user_i387_struct i387;	/* Math Co-processor registers. */
+/* The rest of this junk is to help gdb figure out what goes where */
+  unsigned long int u_tsize;	/* Text segment size (pages). */
+  unsigned long int u_dsize;	/* Data segment size (pages). */
+  unsigned long int u_ssize;	/* Stack segment size (pages). */
+  unsigned long start_code;     /* Starting virtual address of text. */
+  unsigned long start_stack;	/* Starting virtual address of stack area.
+				   This is actually the bottom of the stack,
+				   the top of the stack is always found in the
+				   esp register.  */
+  long int signal;     		/* Signal that caused the core dump. */
+  int reserved;			/* No longer used */
+  struct user_pt_regs * u_ar0;	/* Used by gdb to help find the values for */
+				/* the registers. */
+  struct user_i387_struct* u_fpstate;	/* Math Co-processor pointer. */
+  unsigned long magic;		/* To uniquely identify a core file */
+  char u_comm[32];		/* User command that was responsible */
+  int u_debugreg[8];
+};
+#define NBPG PAGE_SIZE
+#define UPAGES 1
+#define HOST_TEXT_START_ADDR (u.start_code)
+#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
+
+#endif /* _I386_USER_H */
diff --git a/include/asm-i386/vga.h b/include/asm-i386/vga.h
new file mode 100644
index 0000000..ef0c0e5
--- /dev/null
+++ b/include/asm-i386/vga.h
@@ -0,0 +1,20 @@
+/*
+ *	Access to VGA videoram
+ *
+ *	(c) 1998 Martin Mares <mj@ucw.cz>
+ */
+
+#ifndef _LINUX_ASM_VGA_H_
+#define _LINUX_ASM_VGA_H_
+
+/*
+ *	On the PC, we can just recalculate addresses and then
+ *	access the videoram directly without any black magic.
+ */
+
+#define VGA_MAP_MEM(x) (unsigned long)phys_to_virt(x)
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x,y) (*(y) = (x))
+
+#endif
diff --git a/include/asm-i386/vic.h b/include/asm-i386/vic.h
new file mode 100644
index 0000000..4abfcfb
--- /dev/null
+++ b/include/asm-i386/vic.h
@@ -0,0 +1,61 @@
+/* Copyright (C) 1999,2001
+ *
+ * Author: J.E.J.Bottomley@HansenPartnership.com
+ *
+ * Standard include definitions for the NCR Voyager Interrupt Controller */
+
+/* The eight CPI vectors.  To activate a CPI, you write a bit mask
+ * corresponding to the processor set to be interrupted into the
+ * relevant register.  That set of CPUs will then be interrupted with
+ * the CPI */
+static const int VIC_CPI_Registers[] =
+	{0xFC00, 0xFC01, 0xFC08, 0xFC09,
+	 0xFC10, 0xFC11, 0xFC18, 0xFC19 };
+
+#define VIC_PROC_WHO_AM_I		0xfc29
+#	define	QUAD_IDENTIFIER		0xC0
+#	define  EIGHT_SLOT_IDENTIFIER	0xE0
+#define QIC_EXTENDED_PROCESSOR_SELECT	0xFC72
+#define VIC_CPI_BASE_REGISTER		0xFC41
+#define VIC_PROCESSOR_ID		0xFC21
+#	define VIC_CPU_MASQUERADE_ENABLE 0x8
+
+#define VIC_CLAIM_REGISTER_0		0xFC38
+#define VIC_CLAIM_REGISTER_1		0xFC39
+#define VIC_REDIRECT_REGISTER_0		0xFC60
+#define VIC_REDIRECT_REGISTER_1		0xFC61
+#define VIC_PRIORITY_REGISTER		0xFC20
+
+#define VIC_PRIMARY_MC_BASE		0xFC48
+#define VIC_SECONDARY_MC_BASE		0xFC49
+
+#define QIC_PROCESSOR_ID		0xFC71
+#	define	QIC_CPUID_ENABLE	0x08
+
+#define QIC_VIC_CPI_BASE_REGISTER	0xFC79
+#define QIC_CPI_BASE_REGISTER		0xFC7A
+
+#define QIC_MASK_REGISTER0		0xFC80
+/* NOTE: these are masked high, enabled low */
+#	define QIC_PERF_TIMER		0x01
+#	define QIC_LPE			0x02
+#	define QIC_SYS_INT		0x04
+#	define QIC_CMN_INT		0x08
+/* at the moment, just enable CMN_INT, disable SYS_INT */
+#	define QIC_DEFAULT_MASK0	(~(QIC_CMN_INT /* | VIC_SYS_INT */))
+#define QIC_MASK_REGISTER1		0xFC81
+#	define QIC_BOOT_CPI_MASK	0xFE
+/* Enable CPI's 1-6 inclusive */
+#	define QIC_CPI_ENABLE		0x81
+
+#define QIC_INTERRUPT_CLEAR0		0xFC8A
+#define QIC_INTERRUPT_CLEAR1		0xFC8B
+
+/* this is where we place the CPI vectors */
+#define VIC_DEFAULT_CPI_BASE		0xC0
+/* this is where we place the QIC CPI vectors */
+#define QIC_DEFAULT_CPI_BASE		0xD0
+
+#define VIC_BOOT_INTERRUPT_MASK		0xfe
+
+extern void smp_vic_timer_interrupt(struct pt_regs *regs);
diff --git a/include/asm-i386/vm86.h b/include/asm-i386/vm86.h
new file mode 100644
index 0000000..40ec82c
--- /dev/null
+++ b/include/asm-i386/vm86.h
@@ -0,0 +1,208 @@
+#ifndef _LINUX_VM86_H
+#define _LINUX_VM86_H
+
+/*
+ * I'm guessing at the VIF/VIP flag usage, but hope that this is how
+ * the Pentium uses them. Linux will return from vm86 mode when both
+ * VIF and VIP is set.
+ *
+ * On a Pentium, we could probably optimize the virtual flags directly
+ * in the eflags register instead of doing it "by hand" in vflags...
+ *
+ * Linus
+ */
+
+#define TF_MASK		0x00000100
+#define IF_MASK		0x00000200
+#define IOPL_MASK	0x00003000
+#define NT_MASK		0x00004000
+#define VM_MASK		0x00020000
+#define AC_MASK		0x00040000
+#define VIF_MASK	0x00080000	/* virtual interrupt flag */
+#define VIP_MASK	0x00100000	/* virtual interrupt pending */
+#define ID_MASK		0x00200000
+
+#define BIOSSEG		0x0f000
+
+#define CPU_086		0
+#define CPU_186		1
+#define CPU_286		2
+#define CPU_386		3
+#define CPU_486		4
+#define CPU_586		5
+
+/*
+ * Return values for the 'vm86()' system call
+ */
+#define VM86_TYPE(retval)	((retval) & 0xff)
+#define VM86_ARG(retval)	((retval) >> 8)
+
+#define VM86_SIGNAL	0	/* return due to signal */
+#define VM86_UNKNOWN	1	/* unhandled GP fault - IO-instruction or similar */
+#define VM86_INTx	2	/* int3/int x instruction (ARG = x) */
+#define VM86_STI	3	/* sti/popf/iret instruction enabled virtual interrupts */
+
+/*
+ * Additional return values when invoking new vm86()
+ */
+#define VM86_PICRETURN	4	/* return due to pending PIC request */
+#define VM86_TRAP	6	/* return due to DOS-debugger request */
+
+/*
+ * function codes when invoking new vm86()
+ */
+#define VM86_PLUS_INSTALL_CHECK	0
+#define VM86_ENTER		1
+#define VM86_ENTER_NO_BYPASS	2
+#define	VM86_REQUEST_IRQ	3
+#define VM86_FREE_IRQ		4
+#define VM86_GET_IRQ_BITS	5
+#define VM86_GET_AND_RESET_IRQ	6
+
+/*
+ * This is the stack-layout seen by the user space program when we have
+ * done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout
+ * is 'kernel_vm86_regs' (see below).
+ */
+
+struct vm86_regs {
+/*
+ * normal regs, with special meaning for the segment descriptors..
+ */
+	long ebx;
+	long ecx;
+	long edx;
+	long esi;
+	long edi;
+	long ebp;
+	long eax;
+	long __null_ds;
+	long __null_es;
+	long __null_fs;
+	long __null_gs;
+	long orig_eax;
+	long eip;
+	unsigned short cs, __csh;
+	long eflags;
+	long esp;
+	unsigned short ss, __ssh;
+/*
+ * these are specific to v86 mode:
+ */
+	unsigned short es, __esh;
+	unsigned short ds, __dsh;
+	unsigned short fs, __fsh;
+	unsigned short gs, __gsh;
+};
+
+struct revectored_struct {
+	unsigned long __map[8];			/* 256 bits */
+};
+
+struct vm86_struct {
+	struct vm86_regs regs;
+	unsigned long flags;
+	unsigned long screen_bitmap;
+	unsigned long cpu_type;
+	struct revectored_struct int_revectored;
+	struct revectored_struct int21_revectored;
+};
+
+/*
+ * flags masks
+ */
+#define VM86_SCREEN_BITMAP	0x0001
+
+struct vm86plus_info_struct {
+	unsigned long force_return_for_pic:1;
+	unsigned long vm86dbg_active:1;       /* for debugger */
+	unsigned long vm86dbg_TFpendig:1;     /* for debugger */
+	unsigned long unused:28;
+	unsigned long is_vm86pus:1;	      /* for vm86 internal use */
+	unsigned char vm86dbg_intxxtab[32];   /* for debugger */
+};
+
+struct vm86plus_struct {
+	struct vm86_regs regs;
+	unsigned long flags;
+	unsigned long screen_bitmap;
+	unsigned long cpu_type;
+	struct revectored_struct int_revectored;
+	struct revectored_struct int21_revectored;
+	struct vm86plus_info_struct vm86plus;
+};
+
+#ifdef __KERNEL__
+/*
+ * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86
+ * mode - the main change is that the old segment descriptors aren't
+ * useful any more and are forced to be zero by the kernel (and the
+ * hardware when a trap occurs), and the real segment descriptors are
+ * at the end of the structure. Look at ptrace.h to see the "normal"
+ * setup. For user space layout see 'struct vm86_regs' above.
+ */
+
+struct kernel_vm86_regs {
+/*
+ * normal regs, with special meaning for the segment descriptors..
+ */
+	long ebx;
+	long ecx;
+	long edx;
+	long esi;
+	long edi;
+	long ebp;
+	long eax;
+	long __null_ds;
+	long __null_es;
+	long orig_eax;
+	long eip;
+	unsigned short cs, __csh;
+	long eflags;
+	long esp;
+	unsigned short ss, __ssh;
+/*
+ * these are specific to v86 mode:
+ */
+	unsigned short es, __esh;
+	unsigned short ds, __dsh;
+	unsigned short fs, __fsh;
+	unsigned short gs, __gsh;
+};
+
+struct kernel_vm86_struct {
+	struct kernel_vm86_regs regs;
+/*
+ * the below part remains on the kernel stack while we are in VM86 mode.
+ * 'tss.esp0' then contains the address of VM86_TSS_ESP0 below, and when we
+ * get forced back from VM86, the CPU and "SAVE_ALL" will restore the above
+ * 'struct kernel_vm86_regs' with the then actual values.
+ * Therefore, pt_regs in fact points to a complete 'kernel_vm86_struct'
+ * in kernelspace, hence we need not reget the data from userspace.
+ */
+#define VM86_TSS_ESP0 flags
+	unsigned long flags;
+	unsigned long screen_bitmap;
+	unsigned long cpu_type;
+	struct revectored_struct int_revectored;
+	struct revectored_struct int21_revectored;
+	struct vm86plus_info_struct vm86plus;
+	struct pt_regs *regs32;   /* here we save the pointer to the old regs */
+/*
+ * The below is not part of the structure, but the stack layout continues
+ * this way. In front of 'return-eip' may be some data, depending on
+ * compilation, so we don't rely on this and save the pointer to 'oldregs'
+ * in 'regs32' above.
+ * However, with GCC-2.7.2 and the current CFLAGS you see exactly this:
+
+	long return-eip;        from call to vm86()
+	struct pt_regs oldregs;  user space registers as saved by syscall
+ */
+};
+
+void handle_vm86_fault(struct kernel_vm86_regs *, long);
+int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/include/asm-i386/voyager.h b/include/asm-i386/voyager.h
new file mode 100644
index 0000000..aaf432d
--- /dev/null
+++ b/include/asm-i386/voyager.h
@@ -0,0 +1,521 @@
+/* Copyright (C) 1999,2001
+ *
+ * Author: J.E.J.Bottomley@HansenPartnership.com
+ *
+ * Standard include definitions for the NCR Voyager system */
+
+#undef	VOYAGER_DEBUG
+#undef	VOYAGER_CAT_DEBUG
+
+#ifdef VOYAGER_DEBUG
+#define VDEBUG(x)	printk x
+#else
+#define VDEBUG(x)
+#endif
+
+/* There are three levels of voyager machine: 3,4 and 5. The rule is
+ * if it's less than 3435 it's a Level 3 except for a 3360 which is
+ * a level 4.  A 3435 or above is a Level 5 */
+#define VOYAGER_LEVEL5_AND_ABOVE	0x3435
+#define VOYAGER_LEVEL4			0x3360
+
+/* The L4 DINO ASIC */
+#define VOYAGER_DINO			0x43
+
+/* voyager ports in standard I/O space */
+#define VOYAGER_MC_SETUP	0x96
+
+
+#define	VOYAGER_CAT_CONFIG_PORT			0x97
+#	define VOYAGER_CAT_DESELECT		0xff
+#define VOYAGER_SSPB_RELOCATION_PORT		0x98
+
+/* Valid CAT controller commands */
+/* start instruction register cycle */
+#define VOYAGER_CAT_IRCYC			0x01
+/* start data register cycle */
+#define VOYAGER_CAT_DRCYC			0x02
+/* move to execute state */
+#define VOYAGER_CAT_RUN				0x0F
+/* end operation */
+#define VOYAGER_CAT_END				0x80
+/* hold in idle state */
+#define VOYAGER_CAT_HOLD			0x90
+/* single step an "intest" vector */
+#define VOYAGER_CAT_STEP			0xE0
+/* return cat controller to CLEMSON mode */
+#define VOYAGER_CAT_CLEMSON			0xFF
+
+/* the default cat command header */
+#define VOYAGER_CAT_HEADER			0x7F
+
+/* the range of possible CAT module ids in the system */
+#define VOYAGER_MIN_MODULE			0x10
+#define VOYAGER_MAX_MODULE			0x1f
+
+/* The voyager registers per asic */
+#define VOYAGER_ASIC_ID_REG			0x00
+#define VOYAGER_ASIC_TYPE_REG			0x01
+/* the sub address registers can be made auto incrementing on reads */
+#define VOYAGER_AUTO_INC_REG			0x02
+#	define VOYAGER_AUTO_INC			0x04
+#	define VOYAGER_NO_AUTO_INC		0xfb
+#define VOYAGER_SUBADDRDATA			0x03
+#define VOYAGER_SCANPATH			0x05
+#	define VOYAGER_CONNECT_ASIC		0x01
+#	define VOYAGER_DISCONNECT_ASIC		0xfe
+#define VOYAGER_SUBADDRLO			0x06
+#define VOYAGER_SUBADDRHI			0x07
+#define VOYAGER_SUBMODSELECT			0x08
+#define VOYAGER_SUBMODPRESENT			0x09
+
+#define VOYAGER_SUBADDR_LO			0xff
+#define VOYAGER_SUBADDR_HI			0xffff
+
+/* the maximum size of a scan path -- used to form instructions */
+#define VOYAGER_MAX_SCAN_PATH			0x100
+/* the biggest possible register size (in bytes) */
+#define VOYAGER_MAX_REG_SIZE			4
+
+/* Total number of possible modules (including submodules) */
+#define VOYAGER_MAX_MODULES			16
+/* Largest number of asics per module */
+#define VOYAGER_MAX_ASICS_PER_MODULE		7
+
+/* the CAT asic of each module is always the first one */
+#define VOYAGER_CAT_ID				0
+#define VOYAGER_PSI				0x1a
+
+/* voyager instruction operations and registers */
+#define VOYAGER_READ_CONFIG			0x1
+#define VOYAGER_WRITE_CONFIG			0x2
+#define VOYAGER_BYPASS				0xff
+
+typedef struct voyager_asic 
+{
+	__u8	asic_addr;	/* ASIC address; Level 4 */
+	__u8	asic_type;      /* ASIC type */
+	__u8	asic_id;	/* ASIC id */
+	__u8	jtag_id[4];	/* JTAG id */
+	__u8	asic_location;	/* Location within scan path; start w/ 0 */
+	__u8	bit_location;	/* Location within bit stream; start w/ 0 */
+	__u8	ireg_length;	/* Instruction register length */
+	__u16	subaddr;	/* Amount of sub address space */
+	struct voyager_asic *next;	/* Next asic in linked list */
+} voyager_asic_t;
+
+typedef struct voyager_module {
+	__u8	module_addr;		/* Module address */
+	__u8	scan_path_connected;	/* Scan path connected */
+	__u16   ee_size;		/* Size of the EEPROM */
+	__u16   num_asics;		/* Number of Asics */
+	__u16   inst_bits;		/* Instruction bits in the scan path */
+	__u16   largest_reg;		/* Largest register in the scan path */
+	__u16   smallest_reg;		/* Smallest register in the scan path */
+	voyager_asic_t   *asic;		/* First ASIC in scan path (CAT_I) */
+	struct   voyager_module *submodule;	/* Submodule pointer */ 
+	struct   voyager_module *next;		/* Next module in linked list */
+} voyager_module_t;
+
+typedef struct voyager_eeprom_hdr {
+	 __u8  module_id[4] __attribute__((packed)); 
+	 __u8  version_id __attribute__((packed));
+	 __u8  config_id __attribute__((packed)); 
+	 __u16 boundry_id __attribute__((packed));	/* boundary scan id */
+	 __u16 ee_size __attribute__((packed));		/* size of EEPROM */
+	 __u8  assembly[11] __attribute__((packed));	/* assembly # */
+	 __u8  assembly_rev __attribute__((packed));	/* assembly rev */
+	 __u8  tracer[4] __attribute__((packed));	/* tracer number */
+	 __u16 assembly_cksum __attribute__((packed));	/* asm checksum */
+	 __u16 power_consump __attribute__((packed));	/* pwr requirements */
+	 __u16 num_asics __attribute__((packed));	/* number of asics */
+	 __u16 bist_time __attribute__((packed));	/* min. bist time */
+	 __u16 err_log_offset __attribute__((packed));	/* error log offset */
+	 __u16 scan_path_offset __attribute__((packed));/* scan path offset */
+	 __u16 cct_offset __attribute__((packed));
+	 __u16 log_length __attribute__((packed));	/* length of err log */
+	 __u16 xsum_end __attribute__((packed));	/* offset to end of
+							   checksum */
+	 __u8  reserved[4] __attribute__((packed));
+	 __u8  sflag __attribute__((packed));		/* starting sentinal */
+	 __u8  part_number[13] __attribute__((packed));	/* prom part number */
+	 __u8  version[10] __attribute__((packed));	/* version number */
+	 __u8  signature[8] __attribute__((packed));
+	 __u16 eeprom_chksum __attribute__((packed));
+	 __u32  data_stamp_offset __attribute__((packed));
+	 __u8  eflag  __attribute__((packed));		 /* ending sentinal */
+} voyager_eprom_hdr_t;
+
+
+
+#define VOYAGER_EPROM_SIZE_OFFSET   ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size)))
+#define VOYAGER_XSUM_END_OFFSET		0x2a
+
+/* the following three definitions are for internal table layouts
+ * in the module EPROMs.  We really only care about the IDs and
+ * offsets */
+typedef struct voyager_sp_table {
+	__u8 asic_id __attribute__((packed));
+	__u8 bypass_flag __attribute__((packed));
+	__u16 asic_data_offset __attribute__((packed));
+	__u16 config_data_offset __attribute__((packed));
+} voyager_sp_table_t;
+
+typedef struct voyager_jtag_table {
+	__u8 icode[4] __attribute__((packed));
+	__u8 runbist[4] __attribute__((packed));
+	__u8 intest[4] __attribute__((packed));
+	__u8 samp_preld[4] __attribute__((packed));
+	__u8 ireg_len __attribute__((packed));
+} voyager_jtt_t;
+
+typedef struct voyager_asic_data_table {
+	__u8 jtag_id[4] __attribute__((packed));
+	__u16 length_bsr __attribute__((packed));
+	__u16 length_bist_reg __attribute__((packed));
+	__u32 bist_clk __attribute__((packed));
+	__u16 subaddr_bits __attribute__((packed));
+	__u16 seed_bits __attribute__((packed));
+	__u16 sig_bits __attribute__((packed));
+	__u16 jtag_offset __attribute__((packed));
+} voyager_at_t;
+
+/* Voyager Interrupt Controller (VIC) registers */
+
+/* Base to add to Cross Processor Interrupts (CPIs) when triggering
+ * the CPU IRQ line */
+/* register defines for the WCBICs (one per processor) */
+#define VOYAGER_WCBIC0	0x41		/* bus A node P1 processor 0 */
+#define VOYAGER_WCBIC1	0x49		/* bus A node P1 processor 1 */
+#define VOYAGER_WCBIC2	0x51		/* bus A node P2 processor 0 */
+#define VOYAGER_WCBIC3	0x59		/* bus A node P2 processor 1 */
+#define VOYAGER_WCBIC4	0x61		/* bus B node P1 processor 0 */
+#define VOYAGER_WCBIC5	0x69		/* bus B node P1 processor 1 */
+#define VOYAGER_WCBIC6	0x71		/* bus B node P2 processor 0 */
+#define VOYAGER_WCBIC7	0x79		/* bus B node P2 processor 1 */
+
+
+/* top of memory registers */
+#define VOYAGER_WCBIC_TOM_L	0x4
+#define VOYAGER_WCBIC_TOM_H	0x5
+
+/* register defines for Voyager Memory Contol (VMC) 
+ * these are present on L4 machines only */
+#define	VOYAGER_VMC1		0x81
+#define VOYAGER_VMC2		0x91
+#define VOYAGER_VMC3		0xa1
+#define VOYAGER_VMC4		0xb1
+
+/* VMC Ports */
+#define VOYAGER_VMC_MEMORY_SETUP	0x9
+#	define VMC_Interleaving		0x01
+#	define VMC_4Way			0x02
+#	define VMC_EvenCacheLines	0x04
+#	define VMC_HighLine		0x08
+#	define VMC_Start0_Enable	0x20
+#	define VMC_Start1_Enable	0x40
+#	define VMC_Vremap		0x80
+#define VOYAGER_VMC_BANK_DENSITY	0xa
+#	define	VMC_BANK_EMPTY		0
+#	define	VMC_BANK_4MB		1
+#	define	VMC_BANK_16MB		2
+#	define	VMC_BANK_64MB		3
+#	define	VMC_BANK0_MASK		0x03
+#	define	VMC_BANK1_MASK		0x0C
+#	define	VMC_BANK2_MASK		0x30
+#	define	VMC_BANK3_MASK		0xC0
+
+/* Magellan Memory Controller (MMC) defines - present on L5 */
+#define VOYAGER_MMC_ASIC_ID		1
+/* the two memory modules corresponding to memory cards in the system */
+#define VOYAGER_MMC_MEMORY0_MODULE	0x14
+#define VOYAGER_MMC_MEMORY1_MODULE	0x15
+/* the Magellan Memory Address (MMA) defines */
+#define VOYAGER_MMA_ASIC_ID		2
+
+/* Submodule number for the Quad Baseboard */
+#define VOYAGER_QUAD_BASEBOARD		1
+
+/* ASIC defines for the Quad Baseboard */
+#define VOYAGER_QUAD_QDATA0		1
+#define VOYAGER_QUAD_QDATA1		2
+#define VOYAGER_QUAD_QABC		3
+
+/* Useful areas in extended CMOS */
+#define VOYAGER_PROCESSOR_PRESENT_MASK	0x88a
+#define VOYAGER_MEMORY_CLICKMAP		0xa23
+#define VOYAGER_DUMP_LOCATION		0xb1a
+
+/* SUS In Control bit - used to tell SUS that we don't need to be
+ * babysat anymore */
+#define VOYAGER_SUS_IN_CONTROL_PORT	0x3ff
+#	define VOYAGER_IN_CONTROL_FLAG	0x80
+
+/* Voyager PSI defines */
+#define VOYAGER_PSI_STATUS_REG		0x08
+#	define PSI_DC_FAIL		0x01
+#	define PSI_MON			0x02
+#	define PSI_FAULT		0x04
+#	define PSI_ALARM		0x08
+#	define PSI_CURRENT		0x10
+#	define PSI_DVM			0x20
+#	define PSI_PSCFAULT		0x40
+#	define PSI_STAT_CHG		0x80
+
+#define VOYAGER_PSI_SUPPLY_REG		0x8000
+	/* read */
+#	define PSI_FAIL_DC		0x01
+#	define PSI_FAIL_AC		0x02
+#	define PSI_MON_INT		0x04
+#	define PSI_SWITCH_OFF		0x08
+#	define PSI_HX_OFF		0x10
+#	define PSI_SECURITY		0x20
+#	define PSI_CMOS_BATT_LOW	0x40
+#	define PSI_CMOS_BATT_FAIL	0x80
+	/* write */
+#	define PSI_CLR_SWITCH_OFF	0x13
+#	define PSI_CLR_HX_OFF		0x14
+#	define PSI_CLR_CMOS_BATT_FAIL	0x17
+
+#define VOYAGER_PSI_MASK		0x8001
+#	define PSI_MASK_MASK		0x10
+
+#define VOYAGER_PSI_AC_FAIL_REG		0x8004
+#define	AC_FAIL_STAT_CHANGE		0x80
+
+#define VOYAGER_PSI_GENERAL_REG		0x8007
+	/* read */
+#	define PSI_SWITCH_ON		0x01
+#	define PSI_SWITCH_ENABLED	0x02
+#	define PSI_ALARM_ENABLED	0x08
+#	define PSI_SECURE_ENABLED	0x10
+#	define PSI_COLD_RESET		0x20
+#	define PSI_COLD_START		0x80
+	/* write */
+#	define PSI_POWER_DOWN		0x10
+#	define PSI_SWITCH_DISABLE	0x01
+#	define PSI_SWITCH_ENABLE	0x11
+#	define PSI_CLEAR		0x12
+#	define PSI_ALARM_DISABLE	0x03
+#	define PSI_ALARM_ENABLE		0x13
+#	define PSI_CLEAR_COLD_RESET	0x05
+#	define PSI_SET_COLD_RESET	0x15
+#	define PSI_CLEAR_COLD_START	0x07
+#	define PSI_SET_COLD_START	0x17
+
+
+
+struct voyager_bios_info {
+	__u8	len;
+	__u8	major;
+	__u8	minor;
+	__u8	debug;
+	__u8	num_classes;
+	__u8	class_1;
+	__u8	class_2;
+};
+
+/* The following structures and definitions are for the Kernel/SUS
+ * interface these are needed to find out how SUS initialised any Quad
+ * boards in the system */
+
+#define	NUMBER_OF_MC_BUSSES	2
+#define SLOTS_PER_MC_BUS	8
+#define MAX_CPUS                16      /* 16 way CPU system */
+#define MAX_PROCESSOR_BOARDS	4	/* 4 processor slot system */
+#define MAX_CACHE_LEVELS	4	/* # of cache levels supported */
+#define MAX_SHARED_CPUS		4	/* # of CPUs that can share a LARC */
+#define NUMBER_OF_POS_REGS	8
+
+typedef struct {
+	__u8	MC_Slot __attribute__((packed));
+	__u8	POS_Values[NUMBER_OF_POS_REGS] __attribute__((packed));
+} MC_SlotInformation_t;
+
+struct QuadDescription {
+	__u8  Type __attribute__((packed));	/* for type 0 (DYADIC or MONADIC) all fields
+                         * will be zero except for slot */
+	__u8 StructureVersion __attribute__((packed));
+	__u32 CPI_BaseAddress __attribute__((packed));
+	__u32  LARC_BankSize __attribute__((packed));	
+	__u32 LocalMemoryStateBits __attribute__((packed));
+	__u8  Slot __attribute__((packed)); /* Processor slots 1 - 4 */
+}; 
+
+struct ProcBoardInfo { 
+	__u8 Type __attribute__((packed));    
+	__u8 StructureVersion __attribute__((packed));
+	__u8 NumberOfBoards __attribute__((packed));
+	struct QuadDescription QuadData[MAX_PROCESSOR_BOARDS] __attribute__((packed));
+};
+
+struct CacheDescription {
+	__u8 Level __attribute__((packed));
+	__u32 TotalSize __attribute__((packed));
+	__u16 LineSize __attribute__((packed));
+	__u8  Associativity __attribute__((packed));
+	__u8  CacheType __attribute__((packed));
+	__u8  WriteType __attribute__((packed));
+	__u8  Number_CPUs_SharedBy __attribute__((packed));
+	__u8  Shared_CPUs_Hardware_IDs[MAX_SHARED_CPUS] __attribute__((packed));
+
+};
+
+struct CPU_Description {
+	__u8 CPU_HardwareId __attribute__((packed));
+	char *FRU_String __attribute__((packed));
+	__u8 NumberOfCacheLevels __attribute__((packed));
+	struct CacheDescription CacheLevelData[MAX_CACHE_LEVELS] __attribute__((packed));
+};
+
+struct CPU_Info {
+	__u8 Type __attribute__((packed));
+	__u8 StructureVersion __attribute__((packed));
+	__u8 NumberOf_CPUs __attribute__((packed));
+	struct CPU_Description CPU_Data[MAX_CPUS] __attribute__((packed));
+};
+
+
+/*
+ * This structure will be used by SUS and the OS.
+ * The assumption about this structure is that no blank space is
+ * packed in it by our friend the compiler.
+ */
+typedef struct {
+	__u8	Mailbox_SUS;		/* Written to by SUS to give commands/response to the OS */
+	__u8	Mailbox_OS;		/* Written to by the OS to give commands/response to SUS */
+	__u8	SUS_MailboxVersion;	/* Tells the OS which iteration of the interface SUS supports */
+	__u8	OS_MailboxVersion;	/* Tells SUS which iteration of the interface the OS supports */
+	__u32	OS_Flags;		/* Flags set by the OS as info for SUS */
+	__u32	SUS_Flags;		/* Flags set by SUS as info for the OS */
+	__u32	WatchDogPeriod;		/* Watchdog period (in seconds) which the DP uses to see if the OS is dead */
+	__u32	WatchDogCount;		/* Updated by the OS on every tic. */
+	__u32	MemoryFor_SUS_ErrorLog;	/* Flat 32 bit address which tells SUS where to stuff the SUS error log on a dump */
+	MC_SlotInformation_t  MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS];	/* Storage for MCA POS data */
+	/* All new SECOND_PASS_INTERFACE fields added from this point */
+        struct ProcBoardInfo    *BoardData;
+        struct CPU_Info         *CPU_Data;
+	/* All new fields must be added from this point */
+} Voyager_KernelSUS_Mbox_t;
+
+/* structure for finding the right memory address to send a QIC CPI to */
+struct voyager_qic_cpi {
+	/* Each cache line (32 bytes) can trigger a cpi.  The cpi
+	 * read/write may occur anywhere in the cache line---pick the
+	 * middle to be safe */
+	struct  {
+		__u32 pad1[3];
+		__u32 cpi;
+		__u32 pad2[4];
+	} qic_cpi[8];
+};
+
+struct voyager_status {
+	__u32	power_fail:1;
+	__u32	switch_off:1;
+	__u32	request_from_kernel:1;
+};
+
+struct voyager_psi_regs {
+	__u8 cat_id;
+	__u8 cat_dev;
+	__u8 cat_control;
+	__u8 subaddr;
+	__u8 dummy4;
+	__u8 checkbit;
+	__u8 subaddr_low;
+	__u8 subaddr_high;
+	__u8 intstatus;
+	__u8 stat1;
+	__u8 stat3;
+	__u8 fault;
+	__u8 tms;
+	__u8 gen;
+	__u8 sysconf;
+	__u8 dummy15;
+};
+
+struct voyager_psi_subregs {
+	__u8 supply;
+	__u8 mask;
+	__u8 present;
+	__u8 DCfail;
+	__u8 ACfail;
+	__u8 fail;
+	__u8 UPSfail;
+	__u8 genstatus;
+};
+
+struct voyager_psi {
+	struct voyager_psi_regs regs;
+	struct voyager_psi_subregs subregs;
+};
+
+struct voyager_SUS {
+#define	VOYAGER_DUMP_BUTTON_NMI		0x1
+#define VOYAGER_SUS_VALID		0x2
+#define VOYAGER_SYSINT_COMPLETE		0x3
+	__u8	SUS_mbox;
+#define VOYAGER_NO_COMMAND		0x0
+#define VOYAGER_IGNORE_DUMP		0x1
+#define VOYAGER_DO_DUMP			0x2
+#define VOYAGER_SYSINT_HANDSHAKE	0x3
+#define VOYAGER_DO_MEM_DUMP		0x4
+#define VOYAGER_SYSINT_WAS_RECOVERED	0x5
+	__u8	kernel_mbox;
+#define	VOYAGER_MAILBOX_VERSION		0x10
+	__u8	SUS_version;
+	__u8	kernel_version;
+#define VOYAGER_OS_HAS_SYSINT		0x1
+#define VOYAGER_OS_IN_PROGRESS		0x2
+#define VOYAGER_UPDATING_WDPERIOD	0x4
+	__u32	kernel_flags;
+#define VOYAGER_SUS_BOOTING		0x1
+#define VOYAGER_SUS_IN_PROGRESS		0x2
+	__u32	SUS_flags;
+	__u32	watchdog_period;
+	__u32	watchdog_count;
+	__u32	SUS_errorlog;
+	/* lots of system configuration stuff under here */
+};
+	
+/* Variables exported by voyager_smp */
+extern __u32 voyager_extended_vic_processors;
+extern __u32 voyager_allowed_boot_processors;
+extern __u32 voyager_quad_processors;
+extern struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS];
+extern struct voyager_SUS *voyager_SUS;
+
+/* variables exported always */
+extern int voyager_level;
+extern int kvoyagerd_running;
+extern struct semaphore kvoyagerd_sem;
+extern struct voyager_status voyager_status;
+
+
+
+/* functions exported by the voyager and voyager_smp modules */
+
+extern int voyager_cat_readb(__u8 module, __u8 asic, int reg);
+extern void voyager_cat_init(void);
+extern void voyager_detect(struct voyager_bios_info *);
+extern void voyager_trap_init(void);
+extern void voyager_setup_irqs(void);
+extern int voyager_memory_detect(int region, __u32 *addr, __u32 *length);
+extern void voyager_smp_intr_init(void);
+extern __u8 voyager_extended_cmos_read(__u16 cmos_address);
+extern void voyager_smp_dump(void);
+extern void voyager_timer_interrupt(struct pt_regs *regs);
+extern void smp_local_timer_interrupt(struct pt_regs * regs);
+extern void voyager_power_off(void);
+extern void smp_voyager_power_off(void *dummy);
+extern void voyager_restart(void);
+extern void voyager_cat_power_off(void);
+extern void voyager_cat_do_common_interrupt(void);
+extern void voyager_handle_nmi(void);
+/* Commands for the following are */
+#define	VOYAGER_PSI_READ	0
+#define VOYAGER_PSI_WRITE	1
+#define VOYAGER_PSI_SUBREAD	2
+#define VOYAGER_PSI_SUBWRITE	3
+extern void voyager_cat_psi(__u8, __u16, __u8 *);
diff --git a/include/asm-i386/xor.h b/include/asm-i386/xor.h
new file mode 100644
index 0000000..f80e2db
--- /dev/null
+++ b/include/asm-i386/xor.h
@@ -0,0 +1,883 @@
+/*
+ * include/asm-i386/xor.h
+ *
+ * Optimized RAID-5 checksumming functions for MMX and SSE.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * High-speed RAID5 checksumming functions utilizing MMX instructions.
+ * Copyright (C) 1998 Ingo Molnar.
+ */
+
+#define LD(x,y)		"       movq   8*("#x")(%1), %%mm"#y"   ;\n"
+#define ST(x,y)		"       movq %%mm"#y",   8*("#x")(%1)   ;\n"
+#define XO1(x,y)	"       pxor   8*("#x")(%2), %%mm"#y"   ;\n"
+#define XO2(x,y)	"       pxor   8*("#x")(%3), %%mm"#y"   ;\n"
+#define XO3(x,y)	"       pxor   8*("#x")(%4), %%mm"#y"   ;\n"
+#define XO4(x,y)	"       pxor   8*("#x")(%5), %%mm"#y"   ;\n"
+
+#include <asm/i387.h>
+
+static void
+xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+	unsigned long lines = bytes >> 7;
+
+	kernel_fpu_begin();
+
+	__asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+	LD(i,0)					\
+		LD(i+1,1)			\
+			LD(i+2,2)		\
+				LD(i+3,3)	\
+	XO1(i,0)				\
+	ST(i,0)					\
+		XO1(i+1,1)			\
+		ST(i+1,1)			\
+			XO1(i+2,2)		\
+			ST(i+2,2)		\
+				XO1(i+3,3)	\
+				ST(i+3,3)
+
+	" .align 32			;\n"
+  	" 1:                            ;\n"
+
+	BLOCK(0)
+	BLOCK(4)
+	BLOCK(8)
+	BLOCK(12)
+
+	"       addl $128, %1         ;\n"
+	"       addl $128, %2         ;\n"
+	"       decl %0               ;\n"
+	"       jnz 1b                ;\n"
+	: "+r" (lines),
+	  "+r" (p1), "+r" (p2)
+	:
+	: "memory");
+
+	kernel_fpu_end();
+}
+
+static void
+xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+	      unsigned long *p3)
+{
+	unsigned long lines = bytes >> 7;
+
+	kernel_fpu_begin();
+
+	__asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+	LD(i,0)					\
+		LD(i+1,1)			\
+			LD(i+2,2)		\
+				LD(i+3,3)	\
+	XO1(i,0)				\
+		XO1(i+1,1)			\
+			XO1(i+2,2)		\
+				XO1(i+3,3)	\
+	XO2(i,0)				\
+	ST(i,0)					\
+		XO2(i+1,1)			\
+		ST(i+1,1)			\
+			XO2(i+2,2)		\
+			ST(i+2,2)		\
+				XO2(i+3,3)	\
+				ST(i+3,3)
+
+	" .align 32			;\n"
+	" 1:                            ;\n"
+
+	BLOCK(0)
+	BLOCK(4)
+	BLOCK(8)
+	BLOCK(12)
+
+	"       addl $128, %1         ;\n"
+	"       addl $128, %2         ;\n"
+	"       addl $128, %3         ;\n"
+	"       decl %0               ;\n"
+	"       jnz 1b                ;\n"
+	: "+r" (lines),
+	  "+r" (p1), "+r" (p2), "+r" (p3)
+	:
+	: "memory");
+
+	kernel_fpu_end();
+}
+
+static void
+xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+	      unsigned long *p3, unsigned long *p4)
+{
+	unsigned long lines = bytes >> 7;
+
+	kernel_fpu_begin();
+
+	__asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+	LD(i,0)					\
+		LD(i+1,1)			\
+			LD(i+2,2)		\
+				LD(i+3,3)	\
+	XO1(i,0)				\
+		XO1(i+1,1)			\
+			XO1(i+2,2)		\
+				XO1(i+3,3)	\
+	XO2(i,0)				\
+		XO2(i+1,1)			\
+			XO2(i+2,2)		\
+				XO2(i+3,3)	\
+	XO3(i,0)				\
+	ST(i,0)					\
+		XO3(i+1,1)			\
+		ST(i+1,1)			\
+			XO3(i+2,2)		\
+			ST(i+2,2)		\
+				XO3(i+3,3)	\
+				ST(i+3,3)
+
+	" .align 32			;\n"
+	" 1:                            ;\n"
+
+	BLOCK(0)
+	BLOCK(4)
+	BLOCK(8)
+	BLOCK(12)
+
+	"       addl $128, %1         ;\n"
+	"       addl $128, %2         ;\n"
+	"       addl $128, %3         ;\n"
+	"       addl $128, %4         ;\n"
+	"       decl %0               ;\n"
+	"       jnz 1b                ;\n"
+	: "+r" (lines),
+	  "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
+	:
+	: "memory");
+
+	kernel_fpu_end();
+}
+
+
+static void
+xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+	      unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+	unsigned long lines = bytes >> 7;
+
+	kernel_fpu_begin();
+
+	/* Make sure GCC forgets anything it knows about p4 or p5,
+	   such that it won't pass to the asm volatile below a
+	   register that is shared with any other variable.  That's
+	   because we modify p4 and p5 there, but we can't mark them
+	   as read/write, otherwise we'd overflow the 10-asm-operands
+	   limit of GCC < 3.1.  */
+	__asm__ ("" : "+r" (p4), "+r" (p5));
+
+	__asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+	LD(i,0)					\
+		LD(i+1,1)			\
+			LD(i+2,2)		\
+				LD(i+3,3)	\
+	XO1(i,0)				\
+		XO1(i+1,1)			\
+			XO1(i+2,2)		\
+				XO1(i+3,3)	\
+	XO2(i,0)				\
+		XO2(i+1,1)			\
+			XO2(i+2,2)		\
+				XO2(i+3,3)	\
+	XO3(i,0)				\
+		XO3(i+1,1)			\
+			XO3(i+2,2)		\
+				XO3(i+3,3)	\
+	XO4(i,0)				\
+	ST(i,0)					\
+		XO4(i+1,1)			\
+		ST(i+1,1)			\
+			XO4(i+2,2)		\
+			ST(i+2,2)		\
+				XO4(i+3,3)	\
+				ST(i+3,3)
+
+	" .align 32			;\n"
+	" 1:                            ;\n"
+
+	BLOCK(0)
+	BLOCK(4)
+	BLOCK(8)
+	BLOCK(12)
+
+	"       addl $128, %1         ;\n"
+	"       addl $128, %2         ;\n"
+	"       addl $128, %3         ;\n"
+	"       addl $128, %4         ;\n"
+	"       addl $128, %5         ;\n"
+	"       decl %0               ;\n"
+	"       jnz 1b                ;\n"
+	: "+r" (lines),
+	  "+r" (p1), "+r" (p2), "+r" (p3)
+	: "r" (p4), "r" (p5) 
+	: "memory");
+
+	/* p4 and p5 were modified, and now the variables are dead.
+	   Clobber them just to be sure nobody does something stupid
+	   like assuming they have some legal value.  */
+	__asm__ ("" : "=r" (p4), "=r" (p5));
+
+	kernel_fpu_end();
+}
+
+#undef LD
+#undef XO1
+#undef XO2
+#undef XO3
+#undef XO4
+#undef ST
+#undef BLOCK
+
+static void
+xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+	unsigned long lines = bytes >> 6;
+
+	kernel_fpu_begin();
+
+	__asm__ __volatile__ (
+	" .align 32	             ;\n"
+	" 1:                         ;\n"
+	"       movq   (%1), %%mm0   ;\n"
+	"       movq  8(%1), %%mm1   ;\n"
+	"       pxor   (%2), %%mm0   ;\n"
+	"       movq 16(%1), %%mm2   ;\n"
+	"       movq %%mm0,   (%1)   ;\n"
+	"       pxor  8(%2), %%mm1   ;\n"
+	"       movq 24(%1), %%mm3   ;\n"
+	"       movq %%mm1,  8(%1)   ;\n"
+	"       pxor 16(%2), %%mm2   ;\n"
+	"       movq 32(%1), %%mm4   ;\n"
+	"       movq %%mm2, 16(%1)   ;\n"
+	"       pxor 24(%2), %%mm3   ;\n"
+	"       movq 40(%1), %%mm5   ;\n"
+	"       movq %%mm3, 24(%1)   ;\n"
+	"       pxor 32(%2), %%mm4   ;\n"
+	"       movq 48(%1), %%mm6   ;\n"
+	"       movq %%mm4, 32(%1)   ;\n"
+	"       pxor 40(%2), %%mm5   ;\n"
+	"       movq 56(%1), %%mm7   ;\n"
+	"       movq %%mm5, 40(%1)   ;\n"
+	"       pxor 48(%2), %%mm6   ;\n"
+	"       pxor 56(%2), %%mm7   ;\n"
+	"       movq %%mm6, 48(%1)   ;\n"
+	"       movq %%mm7, 56(%1)   ;\n"
+	
+	"       addl $64, %1         ;\n"
+	"       addl $64, %2         ;\n"
+	"       decl %0              ;\n"
+	"       jnz 1b               ;\n"
+	: "+r" (lines),
+	  "+r" (p1), "+r" (p2)
+	:
+	: "memory");
+
+	kernel_fpu_end();
+}
+
+static void
+xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+	     unsigned long *p3)
+{
+	unsigned long lines = bytes >> 6;
+
+	kernel_fpu_begin();
+
+	__asm__ __volatile__ (
+	" .align 32,0x90             ;\n"
+	" 1:                         ;\n"
+	"       movq   (%1), %%mm0   ;\n"
+	"       movq  8(%1), %%mm1   ;\n"
+	"       pxor   (%2), %%mm0   ;\n"
+	"       movq 16(%1), %%mm2   ;\n"
+	"       pxor  8(%2), %%mm1   ;\n"
+	"       pxor   (%3), %%mm0   ;\n"
+	"       pxor 16(%2), %%mm2   ;\n"
+	"       movq %%mm0,   (%1)   ;\n"
+	"       pxor  8(%3), %%mm1   ;\n"
+	"       pxor 16(%3), %%mm2   ;\n"
+	"       movq 24(%1), %%mm3   ;\n"
+	"       movq %%mm1,  8(%1)   ;\n"
+	"       movq 32(%1), %%mm4   ;\n"
+	"       movq 40(%1), %%mm5   ;\n"
+	"       pxor 24(%2), %%mm3   ;\n"
+	"       movq %%mm2, 16(%1)   ;\n"
+	"       pxor 32(%2), %%mm4   ;\n"
+	"       pxor 24(%3), %%mm3   ;\n"
+	"       pxor 40(%2), %%mm5   ;\n"
+	"       movq %%mm3, 24(%1)   ;\n"
+	"       pxor 32(%3), %%mm4   ;\n"
+	"       pxor 40(%3), %%mm5   ;\n"
+	"       movq 48(%1), %%mm6   ;\n"
+	"       movq %%mm4, 32(%1)   ;\n"
+	"       movq 56(%1), %%mm7   ;\n"
+	"       pxor 48(%2), %%mm6   ;\n"
+	"       movq %%mm5, 40(%1)   ;\n"
+	"       pxor 56(%2), %%mm7   ;\n"
+	"       pxor 48(%3), %%mm6   ;\n"
+	"       pxor 56(%3), %%mm7   ;\n"
+	"       movq %%mm6, 48(%1)   ;\n"
+	"       movq %%mm7, 56(%1)   ;\n"
+      
+	"       addl $64, %1         ;\n"
+	"       addl $64, %2         ;\n"
+	"       addl $64, %3         ;\n"
+	"       decl %0              ;\n"
+	"       jnz 1b               ;\n"
+	: "+r" (lines),
+	  "+r" (p1), "+r" (p2), "+r" (p3)
+	:
+	: "memory" );
+
+	kernel_fpu_end();
+}
+
+static void
+xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+	     unsigned long *p3, unsigned long *p4)
+{
+	unsigned long lines = bytes >> 6;
+
+	kernel_fpu_begin();
+
+	__asm__ __volatile__ (
+	" .align 32,0x90             ;\n"
+	" 1:                         ;\n"
+	"       movq   (%1), %%mm0   ;\n"
+	"       movq  8(%1), %%mm1   ;\n"
+	"       pxor   (%2), %%mm0   ;\n"
+	"       movq 16(%1), %%mm2   ;\n"
+	"       pxor  8(%2), %%mm1   ;\n"
+	"       pxor   (%3), %%mm0   ;\n"
+	"       pxor 16(%2), %%mm2   ;\n"
+	"       pxor  8(%3), %%mm1   ;\n"
+	"       pxor   (%4), %%mm0   ;\n"
+	"       movq 24(%1), %%mm3   ;\n"
+	"       pxor 16(%3), %%mm2   ;\n"
+	"       pxor  8(%4), %%mm1   ;\n"
+	"       movq %%mm0,   (%1)   ;\n"
+	"       movq 32(%1), %%mm4   ;\n"
+	"       pxor 24(%2), %%mm3   ;\n"
+	"       pxor 16(%4), %%mm2   ;\n"
+	"       movq %%mm1,  8(%1)   ;\n"
+	"       movq 40(%1), %%mm5   ;\n"
+	"       pxor 32(%2), %%mm4   ;\n"
+	"       pxor 24(%3), %%mm3   ;\n"
+	"       movq %%mm2, 16(%1)   ;\n"
+	"       pxor 40(%2), %%mm5   ;\n"
+	"       pxor 32(%3), %%mm4   ;\n"
+	"       pxor 24(%4), %%mm3   ;\n"
+	"       movq %%mm3, 24(%1)   ;\n"
+	"       movq 56(%1), %%mm7   ;\n"
+	"       movq 48(%1), %%mm6   ;\n"
+	"       pxor 40(%3), %%mm5   ;\n"
+	"       pxor 32(%4), %%mm4   ;\n"
+	"       pxor 48(%2), %%mm6   ;\n"
+	"       movq %%mm4, 32(%1)   ;\n"
+	"       pxor 56(%2), %%mm7   ;\n"
+	"       pxor 40(%4), %%mm5   ;\n"
+	"       pxor 48(%3), %%mm6   ;\n"
+	"       pxor 56(%3), %%mm7   ;\n"
+	"       movq %%mm5, 40(%1)   ;\n"
+	"       pxor 48(%4), %%mm6   ;\n"
+	"       pxor 56(%4), %%mm7   ;\n"
+	"       movq %%mm6, 48(%1)   ;\n"
+	"       movq %%mm7, 56(%1)   ;\n"
+      
+	"       addl $64, %1         ;\n"
+	"       addl $64, %2         ;\n"
+	"       addl $64, %3         ;\n"
+	"       addl $64, %4         ;\n"
+	"       decl %0              ;\n"
+	"       jnz 1b               ;\n"
+	: "+r" (lines),
+	  "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
+	:
+	: "memory");
+
+	kernel_fpu_end();
+}
+
+static void
+xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+	     unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+	unsigned long lines = bytes >> 6;
+
+	kernel_fpu_begin();
+
+	/* Make sure GCC forgets anything it knows about p4 or p5,
+	   such that it won't pass to the asm volatile below a
+	   register that is shared with any other variable.  That's
+	   because we modify p4 and p5 there, but we can't mark them
+	   as read/write, otherwise we'd overflow the 10-asm-operands
+	   limit of GCC < 3.1.  */
+	__asm__ ("" : "+r" (p4), "+r" (p5));
+
+	__asm__ __volatile__ (
+	" .align 32,0x90             ;\n"
+	" 1:                         ;\n"
+	"       movq   (%1), %%mm0   ;\n"
+	"       movq  8(%1), %%mm1   ;\n"
+	"       pxor   (%2), %%mm0   ;\n"
+	"       pxor  8(%2), %%mm1   ;\n"
+	"       movq 16(%1), %%mm2   ;\n"
+	"       pxor   (%3), %%mm0   ;\n"
+	"       pxor  8(%3), %%mm1   ;\n"
+	"       pxor 16(%2), %%mm2   ;\n"
+	"       pxor   (%4), %%mm0   ;\n"
+	"       pxor  8(%4), %%mm1   ;\n"
+	"       pxor 16(%3), %%mm2   ;\n"
+	"       movq 24(%1), %%mm3   ;\n"
+	"       pxor   (%5), %%mm0   ;\n"
+	"       pxor  8(%5), %%mm1   ;\n"
+	"       movq %%mm0,   (%1)   ;\n"
+	"       pxor 16(%4), %%mm2   ;\n"
+	"       pxor 24(%2), %%mm3   ;\n"
+	"       movq %%mm1,  8(%1)   ;\n"
+	"       pxor 16(%5), %%mm2   ;\n"
+	"       pxor 24(%3), %%mm3   ;\n"
+	"       movq 32(%1), %%mm4   ;\n"
+	"       movq %%mm2, 16(%1)   ;\n"
+	"       pxor 24(%4), %%mm3   ;\n"
+	"       pxor 32(%2), %%mm4   ;\n"
+	"       movq 40(%1), %%mm5   ;\n"
+	"       pxor 24(%5), %%mm3   ;\n"
+	"       pxor 32(%3), %%mm4   ;\n"
+	"       pxor 40(%2), %%mm5   ;\n"
+	"       movq %%mm3, 24(%1)   ;\n"
+	"       pxor 32(%4), %%mm4   ;\n"
+	"       pxor 40(%3), %%mm5   ;\n"
+	"       movq 48(%1), %%mm6   ;\n"
+	"       movq 56(%1), %%mm7   ;\n"
+	"       pxor 32(%5), %%mm4   ;\n"
+	"       pxor 40(%4), %%mm5   ;\n"
+	"       pxor 48(%2), %%mm6   ;\n"
+	"       pxor 56(%2), %%mm7   ;\n"
+	"       movq %%mm4, 32(%1)   ;\n"
+	"       pxor 48(%3), %%mm6   ;\n"
+	"       pxor 56(%3), %%mm7   ;\n"
+	"       pxor 40(%5), %%mm5   ;\n"
+	"       pxor 48(%4), %%mm6   ;\n"
+	"       pxor 56(%4), %%mm7   ;\n"
+	"       movq %%mm5, 40(%1)   ;\n"
+	"       pxor 48(%5), %%mm6   ;\n"
+	"       pxor 56(%5), %%mm7   ;\n"
+	"       movq %%mm6, 48(%1)   ;\n"
+	"       movq %%mm7, 56(%1)   ;\n"
+      
+	"       addl $64, %1         ;\n"
+	"       addl $64, %2         ;\n"
+	"       addl $64, %3         ;\n"
+	"       addl $64, %4         ;\n"
+	"       addl $64, %5         ;\n"
+	"       decl %0              ;\n"
+	"       jnz 1b               ;\n"
+	: "+r" (lines),
+	  "+r" (p1), "+r" (p2), "+r" (p3)
+	: "r" (p4), "r" (p5)
+	: "memory");
+
+	/* p4 and p5 were modified, and now the variables are dead.
+	   Clobber them just to be sure nobody does something stupid
+	   like assuming they have some legal value.  */
+	__asm__ ("" : "=r" (p4), "=r" (p5));
+
+	kernel_fpu_end();
+}
+
+static struct xor_block_template xor_block_pII_mmx = {
+	.name = "pII_mmx",
+	.do_2 = xor_pII_mmx_2,
+	.do_3 = xor_pII_mmx_3,
+	.do_4 = xor_pII_mmx_4,
+	.do_5 = xor_pII_mmx_5,
+};
+
+static struct xor_block_template xor_block_p5_mmx = {
+	.name = "p5_mmx",
+	.do_2 = xor_p5_mmx_2,
+	.do_3 = xor_p5_mmx_3,
+	.do_4 = xor_p5_mmx_4,
+	.do_5 = xor_p5_mmx_5,
+};
+
+/*
+ * Cache avoiding checksumming functions utilizing KNI instructions
+ * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
+ */
+
+#define XMMS_SAVE do {				\
+	preempt_disable();			\
+	__asm__ __volatile__ ( 			\
+		"movl %%cr0,%0		;\n\t"	\
+		"clts			;\n\t"	\
+		"movups %%xmm0,(%1)	;\n\t"	\
+		"movups %%xmm1,0x10(%1)	;\n\t"	\
+		"movups %%xmm2,0x20(%1)	;\n\t"	\
+		"movups %%xmm3,0x30(%1)	;\n\t"	\
+		: "=&r" (cr0)			\
+		: "r" (xmm_save) 		\
+		: "memory");			\
+} while(0)
+
+#define XMMS_RESTORE do {			\
+	__asm__ __volatile__ ( 			\
+		"sfence			;\n\t"	\
+		"movups (%1),%%xmm0	;\n\t"	\
+		"movups 0x10(%1),%%xmm1	;\n\t"	\
+		"movups 0x20(%1),%%xmm2	;\n\t"	\
+		"movups 0x30(%1),%%xmm3	;\n\t"	\
+		"movl 	%0,%%cr0	;\n\t"	\
+		:				\
+		: "r" (cr0), "r" (xmm_save)	\
+		: "memory");			\
+	preempt_enable();			\
+} while(0)
+
+#define ALIGN16 __attribute__((aligned(16)))
+
+#define OFFS(x)		"16*("#x")"
+#define PF_OFFS(x)	"256+16*("#x")"
+#define	PF0(x)		"	prefetchnta "PF_OFFS(x)"(%1)		;\n"
+#define LD(x,y)		"       movaps   "OFFS(x)"(%1), %%xmm"#y"	;\n"
+#define ST(x,y)		"       movaps %%xmm"#y",   "OFFS(x)"(%1)	;\n"
+#define PF1(x)		"	prefetchnta "PF_OFFS(x)"(%2)		;\n"
+#define PF2(x)		"	prefetchnta "PF_OFFS(x)"(%3)		;\n"
+#define PF3(x)		"	prefetchnta "PF_OFFS(x)"(%4)		;\n"
+#define PF4(x)		"	prefetchnta "PF_OFFS(x)"(%5)		;\n"
+#define PF5(x)		"	prefetchnta "PF_OFFS(x)"(%6)		;\n"
+#define XO1(x,y)	"       xorps   "OFFS(x)"(%2), %%xmm"#y"	;\n"
+#define XO2(x,y)	"       xorps   "OFFS(x)"(%3), %%xmm"#y"	;\n"
+#define XO3(x,y)	"       xorps   "OFFS(x)"(%4), %%xmm"#y"	;\n"
+#define XO4(x,y)	"       xorps   "OFFS(x)"(%5), %%xmm"#y"	;\n"
+#define XO5(x,y)	"       xorps   "OFFS(x)"(%6), %%xmm"#y"	;\n"
+
+
+static void
+xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+        unsigned long lines = bytes >> 8;
+	char xmm_save[16*4] ALIGN16;
+	int cr0;
+
+	XMMS_SAVE;
+
+        __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+		LD(i,0)					\
+			LD(i+1,1)			\
+		PF1(i)					\
+				PF1(i+2)		\
+				LD(i+2,2)		\
+					LD(i+3,3)	\
+		PF0(i+4)				\
+				PF0(i+6)		\
+		XO1(i,0)				\
+			XO1(i+1,1)			\
+				XO1(i+2,2)		\
+					XO1(i+3,3)	\
+		ST(i,0)					\
+			ST(i+1,1)			\
+				ST(i+2,2)		\
+					ST(i+3,3)	\
+
+
+		PF0(0)
+				PF0(2)
+
+	" .align 32			;\n"
+        " 1:                            ;\n"
+
+		BLOCK(0)
+		BLOCK(4)
+		BLOCK(8)
+		BLOCK(12)
+
+        "       addl $256, %1           ;\n"
+        "       addl $256, %2           ;\n"
+        "       decl %0                 ;\n"
+        "       jnz 1b                  ;\n"
+	: "+r" (lines),
+	  "+r" (p1), "+r" (p2)
+	:
+        : "memory");
+
+	XMMS_RESTORE;
+}
+
+static void
+xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+	  unsigned long *p3)
+{
+        unsigned long lines = bytes >> 8;
+	char xmm_save[16*4] ALIGN16;
+	int cr0;
+
+	XMMS_SAVE;
+
+        __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+		PF1(i)					\
+				PF1(i+2)		\
+		LD(i,0)					\
+			LD(i+1,1)			\
+				LD(i+2,2)		\
+					LD(i+3,3)	\
+		PF2(i)					\
+				PF2(i+2)		\
+		PF0(i+4)				\
+				PF0(i+6)		\
+		XO1(i,0)				\
+			XO1(i+1,1)			\
+				XO1(i+2,2)		\
+					XO1(i+3,3)	\
+		XO2(i,0)				\
+			XO2(i+1,1)			\
+				XO2(i+2,2)		\
+					XO2(i+3,3)	\
+		ST(i,0)					\
+			ST(i+1,1)			\
+				ST(i+2,2)		\
+					ST(i+3,3)	\
+
+
+		PF0(0)
+				PF0(2)
+
+	" .align 32			;\n"
+        " 1:                            ;\n"
+
+		BLOCK(0)
+		BLOCK(4)
+		BLOCK(8)
+		BLOCK(12)
+
+        "       addl $256, %1           ;\n"
+        "       addl $256, %2           ;\n"
+        "       addl $256, %3           ;\n"
+        "       decl %0                 ;\n"
+        "       jnz 1b                  ;\n"
+	: "+r" (lines),
+	  "+r" (p1), "+r"(p2), "+r"(p3)
+	:
+        : "memory" );
+
+	XMMS_RESTORE;
+}
+
+static void
+xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+	  unsigned long *p3, unsigned long *p4)
+{
+        unsigned long lines = bytes >> 8;
+	char xmm_save[16*4] ALIGN16;
+	int cr0;
+
+	XMMS_SAVE;
+
+        __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+		PF1(i)					\
+				PF1(i+2)		\
+		LD(i,0)					\
+			LD(i+1,1)			\
+				LD(i+2,2)		\
+					LD(i+3,3)	\
+		PF2(i)					\
+				PF2(i+2)		\
+		XO1(i,0)				\
+			XO1(i+1,1)			\
+				XO1(i+2,2)		\
+					XO1(i+3,3)	\
+		PF3(i)					\
+				PF3(i+2)		\
+		PF0(i+4)				\
+				PF0(i+6)		\
+		XO2(i,0)				\
+			XO2(i+1,1)			\
+				XO2(i+2,2)		\
+					XO2(i+3,3)	\
+		XO3(i,0)				\
+			XO3(i+1,1)			\
+				XO3(i+2,2)		\
+					XO3(i+3,3)	\
+		ST(i,0)					\
+			ST(i+1,1)			\
+				ST(i+2,2)		\
+					ST(i+3,3)	\
+
+
+		PF0(0)
+				PF0(2)
+
+	" .align 32			;\n"
+        " 1:                            ;\n"
+
+		BLOCK(0)
+		BLOCK(4)
+		BLOCK(8)
+		BLOCK(12)
+
+        "       addl $256, %1           ;\n"
+        "       addl $256, %2           ;\n"
+        "       addl $256, %3           ;\n"
+        "       addl $256, %4           ;\n"
+        "       decl %0                 ;\n"
+        "       jnz 1b                  ;\n"
+	: "+r" (lines),
+	  "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
+	:
+        : "memory" );
+
+	XMMS_RESTORE;
+}
+
+static void
+xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+	  unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+        unsigned long lines = bytes >> 8;
+	char xmm_save[16*4] ALIGN16;
+	int cr0;
+
+	XMMS_SAVE;
+
+	/* Make sure GCC forgets anything it knows about p4 or p5,
+	   such that it won't pass to the asm volatile below a
+	   register that is shared with any other variable.  That's
+	   because we modify p4 and p5 there, but we can't mark them
+	   as read/write, otherwise we'd overflow the 10-asm-operands
+	   limit of GCC < 3.1.  */
+	__asm__ ("" : "+r" (p4), "+r" (p5));
+
+        __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+		PF1(i)					\
+				PF1(i+2)		\
+		LD(i,0)					\
+			LD(i+1,1)			\
+				LD(i+2,2)		\
+					LD(i+3,3)	\
+		PF2(i)					\
+				PF2(i+2)		\
+		XO1(i,0)				\
+			XO1(i+1,1)			\
+				XO1(i+2,2)		\
+					XO1(i+3,3)	\
+		PF3(i)					\
+				PF3(i+2)		\
+		XO2(i,0)				\
+			XO2(i+1,1)			\
+				XO2(i+2,2)		\
+					XO2(i+3,3)	\
+		PF4(i)					\
+				PF4(i+2)		\
+		PF0(i+4)				\
+				PF0(i+6)		\
+		XO3(i,0)				\
+			XO3(i+1,1)			\
+				XO3(i+2,2)		\
+					XO3(i+3,3)	\
+		XO4(i,0)				\
+			XO4(i+1,1)			\
+				XO4(i+2,2)		\
+					XO4(i+3,3)	\
+		ST(i,0)					\
+			ST(i+1,1)			\
+				ST(i+2,2)		\
+					ST(i+3,3)	\
+
+
+		PF0(0)
+				PF0(2)
+
+	" .align 32			;\n"
+        " 1:                            ;\n"
+
+		BLOCK(0)
+		BLOCK(4)
+		BLOCK(8)
+		BLOCK(12)
+
+        "       addl $256, %1           ;\n"
+        "       addl $256, %2           ;\n"
+        "       addl $256, %3           ;\n"
+        "       addl $256, %4           ;\n"
+        "       addl $256, %5           ;\n"
+        "       decl %0                 ;\n"
+        "       jnz 1b                  ;\n"
+	: "+r" (lines),
+	  "+r" (p1), "+r" (p2), "+r" (p3)
+	: "r" (p4), "r" (p5)
+	: "memory");
+
+	/* p4 and p5 were modified, and now the variables are dead.
+	   Clobber them just to be sure nobody does something stupid
+	   like assuming they have some legal value.  */
+	__asm__ ("" : "=r" (p4), "=r" (p5));
+
+	XMMS_RESTORE;
+}
+
+static struct xor_block_template xor_block_pIII_sse = {
+        .name = "pIII_sse",
+        .do_2 =  xor_sse_2,
+        .do_3 =  xor_sse_3,
+        .do_4 =  xor_sse_4,
+        .do_5 = xor_sse_5,
+};
+
+/* Also try the generic routines.  */
+#include <asm-generic/xor.h>
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES				\
+	do {						\
+		xor_speed(&xor_block_8regs);		\
+		xor_speed(&xor_block_8regs_p);		\
+		xor_speed(&xor_block_32regs);		\
+		xor_speed(&xor_block_32regs_p);		\
+	        if (cpu_has_xmm)			\
+			xor_speed(&xor_block_pIII_sse);	\
+	        if (cpu_has_mmx) {			\
+	                xor_speed(&xor_block_pII_mmx);	\
+	                xor_speed(&xor_block_p5_mmx);	\
+	        }					\
+	} while (0)
+
+/* We force the use of the SSE xor block because it can write around L2.
+   We may also be able to load into the L1 only depending on how the cpu
+   deals with a load to a line that is being prefetched.  */
+#define XOR_SELECT_TEMPLATE(FASTEST) \
+	(cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)