Merge by hand from Linus' tree.

Signed-off-by: Paul Mackerras <paulus@samba.org>
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
new file mode 100644
index 0000000..edfac46
--- /dev/null
+++ b/arch/powerpc/Kconfig
@@ -0,0 +1,861 @@
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+
+mainmenu "Linux/PowerPC Kernel Configuration"
+
+config PPC64
+	bool "64-bit kernel"
+	default n
+	help
+	  This option selects whether a 32-bit or a 64-bit kernel
+	  will be built.
+
+config PPC32
+	bool
+	default y if !PPC64
+
+config 64BIT
+	bool
+	default y if PPC64
+
+config PPC_MERGE
+	def_bool y
+
+config MMU
+	bool
+	default y
+
+config UID16
+	bool
+
+config GENERIC_HARDIRQS
+	bool
+	default y
+
+config RWSEM_GENERIC_SPINLOCK
+	bool
+
+config RWSEM_XCHGADD_ALGORITHM
+	bool
+	default y
+
+config GENERIC_CALIBRATE_DELAY
+	bool
+	default y
+
+config PPC
+	bool
+	default y
+
+config EARLY_PRINTK
+	bool
+	default y if PPC64
+
+config COMPAT
+	bool
+	default y if PPC64
+
+config SYSVIPC_COMPAT
+	bool
+	depends on COMPAT && SYSVIPC
+	default y
+
+# All PPC32s use generic nvram driver through ppc_md
+config GENERIC_NVRAM
+	bool
+	default y if PPC32
+
+config SCHED_NO_NO_OMIT_FRAME_POINTER
+	bool
+	default y
+
+config ARCH_MAY_HAVE_PC_FDC
+	bool
+	default y
+
+menu "Processor support"
+choice
+	prompt "Processor Type"
+	depends on PPC32
+	default 6xx
+
+config 6xx
+	bool "6xx/7xx/74xx"
+	select PPC_FPU
+	help
+	  There are four families of PowerPC chips supported.  The more common
+	  types (601, 603, 604, 740, 750, 7400), the Motorola embedded
+	  versions (821, 823, 850, 855, 860, 52xx, 82xx, 83xx), the AMCC
+	  embedded versions (403 and 405) and the high end 64 bit Power
+	  processors (POWER 3, POWER4, and IBM PPC970 also known as G5).
+	  
+	  Unless you are building a kernel for one of the embedded processor
+	  systems, 64 bit IBM RS/6000 or an Apple G5, choose 6xx.
+	  Note that the kernel runs in 32-bit mode even on 64-bit chips.
+
+config PPC_52xx
+	bool "Freescale 52xx"
+	
+config PPC_82xx
+	bool "Freescale 82xx"
+
+config PPC_83xx
+	bool "Freescale 83xx"
+
+config 40x
+	bool "AMCC 40x"
+
+config 44x
+	bool "AMCC 44x"
+
+config PPC64BRIDGE
+	select PPC_FPU
+	bool "POWER3, POWER4 and PPC970 (G5)"
+
+config 8xx
+	bool "Freescale 8xx"
+
+config E200
+	bool "Freescale e200"
+
+config E500
+	bool "Freescale e500"
+endchoice
+
+config POWER4_ONLY
+	bool "Optimize for POWER4"
+	depends on PPC64 || PPC64BRIDGE
+	default n
+	---help---
+	  Cause the compiler to optimize for POWER4/POWER5/PPC970 processors.
+	  The resulting binary will not work on POWER3 or RS64 processors
+	  when compiled with binutils 2.15 or later.
+
+config POWER3
+	bool
+	depends on PPC64 || PPC64BRIDGE
+	default y if !POWER4_ONLY
+
+config POWER4
+	depends on PPC64 || PPC64BRIDGE
+	def_bool y
+
+config PPC_FPU
+	bool
+	default y if PPC64
+
+config BOOKE
+	bool
+	depends on E200 || E500
+	default y
+
+config FSL_BOOKE
+	bool
+	depends on E200 || E500
+	default y
+
+config PTE_64BIT
+	bool
+	depends on 44x || E500
+	default y if 44x
+	default y if E500 && PHYS_64BIT
+
+config PHYS_64BIT
+	bool 'Large physical address support' if E500
+	depends on 44x || E500
+	default y if 44x
+	---help---
+	  This option enables kernel support for larger than 32-bit physical
+	  addresses.  This features is not be available on all e500 cores.
+
+	  If in doubt, say N here.
+
+config ALTIVEC
+	bool "AltiVec Support"
+	depends on 6xx || POWER4
+	---help---
+	  This option enables kernel support for the Altivec extensions to the
+	  PowerPC processor. The kernel currently supports saving and restoring
+	  altivec registers, and turning on the 'altivec enable' bit so user
+	  processes can execute altivec instructions.
+
+	  This option is only usefully if you have a processor that supports
+	  altivec (G4, otherwise known as 74xx series), but does not have
+	  any affect on a non-altivec cpu (it does, however add code to the
+	  kernel).
+
+	  If in doubt, say Y here.
+
+config SPE
+	bool "SPE Support"
+	depends on E200 || E500
+	---help---
+	  This option enables kernel support for the Signal Processing
+	  Extensions (SPE) to the PowerPC processor. The kernel currently
+	  supports saving and restoring SPE registers, and turning on the
+	  'spe enable' bit so user processes can execute SPE instructions.
+
+	  This option is only useful if you have a processor that supports
+	  SPE (e500, otherwise known as 85xx series), but does not have any
+	  effect on a non-spe cpu (it does, however add code to the kernel).
+
+	  If in doubt, say Y here.
+
+config PPC_STD_MMU
+	bool
+	depends on 6xx || POWER3 || POWER4 || PPC64
+	default y
+
+config PPC_STD_MMU_32
+	def_bool y
+	depends on PPC_STD_MMU && PPC32
+
+config SMP
+	depends on PPC_STD_MMU
+	bool "Symmetric multi-processing support"
+	---help---
+	  This enables support for systems with more than one CPU. If you have
+	  a system with only one CPU, say N. If you have a system with more
+	  than one CPU, say Y.  Note that the kernel does not currently
+	  support SMP machines with 603/603e/603ev or PPC750 ("G3") processors
+	  since they have inadequate hardware support for multiprocessor
+	  operation.
+
+	  If you say N here, the kernel will run on single and multiprocessor
+	  machines, but will use only one CPU of a multiprocessor machine. If
+	  you say Y here, the kernel will run on single-processor machines.
+	  On a single-processor machine, the kernel will run faster if you say
+	  N here.
+
+	  If you don't know what to do here, say N.
+
+config NR_CPUS
+	int "Maximum number of CPUs (2-32)"
+	range 2 128
+	depends on SMP
+	default "32" if PPC64
+	default "4"
+
+config NOT_COHERENT_CACHE
+	bool
+	depends on 4xx || 8xx || E200
+	default y
+endmenu
+
+source "init/Kconfig"
+
+menu "Platform support"
+	depends on PPC64 || 6xx
+
+choice
+	prompt "Machine type"
+	default PPC_MULTIPLATFORM
+
+config PPC_MULTIPLATFORM
+	bool "Generic desktop/server/laptop"
+	help
+	  Select this option if configuring for an IBM pSeries or
+	  RS/6000 machine, an Apple machine, or a PReP, CHRP,
+	  Maple or Cell-based machine.
+
+config PPC_ISERIES
+	bool "IBM Legacy iSeries"
+	depends on PPC64
+
+config EMBEDDED6xx
+	bool "Embedded 6xx/7xx/7xxx-based board"
+	depends on PPC32
+
+config APUS
+	bool "Amiga-APUS"
+	depends on PPC32 && BROKEN
+	help
+	  Select APUS if configuring for a PowerUP Amiga.
+	  More information is available at:
+	  <http://linux-apus.sourceforge.net/>.
+endchoice
+
+config PPC_PSERIES
+	depends on PPC_MULTIPLATFORM && PPC64
+	bool "  IBM pSeries & new (POWER5-based) iSeries"
+	default y
+
+config PPC_CHRP
+	bool "  Common Hardware Reference Platform (CHRP) based machines"
+	depends on PPC_MULTIPLATFORM && PPC32
+	default y
+
+config PPC_PMAC
+	bool "  Apple PowerMac based machines"
+	depends on PPC_MULTIPLATFORM
+	default y
+
+config PPC_PMAC64
+	bool
+	depends on PPC_PMAC && POWER4
+	default y
+
+config PPC_PREP
+	bool "  PowerPC Reference Platform (PReP) based machines"
+	depends on PPC_MULTIPLATFORM && PPC32
+	default y
+
+config PPC_MAPLE
+	depends on PPC_MULTIPLATFORM && PPC64
+	bool "  Maple 970FX Evaluation Board"
+	select U3_DART
+	select MPIC_BROKEN_U3
+	default n
+	help
+          This option enables support for the Maple 970FX Evaluation Board.
+	  For more informations, refer to <http://www.970eval.com>
+
+config PPC_BPA
+	bool "  Broadband Processor Architecture"
+	depends on PPC_MULTIPLATFORM && PPC64
+
+config PPC_OF
+	bool
+	depends on PPC_MULTIPLATFORM	# for now
+	default y
+
+config XICS
+	depends on PPC_PSERIES
+	bool
+	default y
+
+config U3_DART
+	bool 
+	depends on PPC_MULTIPLATFORM && PPC64
+	default n
+
+config MPIC
+	depends on PPC_PSERIES || PPC_PMAC || PPC_MAPLE
+	bool
+	default y
+
+config MPIC_BROKEN_U3
+	bool
+	depends on PPC_MAPLE
+	default y
+
+config BPA_IIC
+	depends on PPC_BPA
+	bool
+	default y
+
+config IBMVIO
+	depends on PPC_PSERIES || PPC_ISERIES
+	bool
+	default y
+
+source "drivers/cpufreq/Kconfig"
+
+config CPU_FREQ_PMAC
+	bool "Support for Apple PowerBooks"
+	depends on CPU_FREQ && ADB_PMU && PPC32
+	select CPU_FREQ_TABLE
+	help
+	  This adds support for frequency switching on Apple PowerBooks,
+	  this currently includes some models of iBook & Titanium
+	  PowerBook.
+
+config PPC601_SYNC_FIX
+	bool "Workarounds for PPC601 bugs"
+	depends on 6xx && (PPC_PREP || PPC_PMAC)
+	help
+	  Some versions of the PPC601 (the first PowerPC chip) have bugs which
+	  mean that extra synchronization instructions are required near
+	  certain instructions, typically those that make major changes to the
+	  CPU state.  These extra instructions reduce performance slightly.
+	  If you say N here, these extra instructions will not be included,
+	  resulting in a kernel which will run faster but may not run at all
+	  on some systems with the PPC601 chip.
+
+	  If in doubt, say Y here.
+
+config TAU
+	bool "Thermal Management Support"
+	depends on 6xx
+	help
+	  G3 and G4 processors have an on-chip temperature sensor called the
+	  'Thermal Assist Unit (TAU)', which, in theory, can measure the on-die
+	  temperature within 2-4 degrees Celsius. This option shows the current
+	  on-die temperature in /proc/cpuinfo if the cpu supports it.
+
+	  Unfortunately, on some chip revisions, this sensor is very inaccurate
+	  and in some cases, does not work at all, so don't assume the cpu
+	  temp is actually what /proc/cpuinfo says it is.
+
+config TAU_INT
+	bool "Interrupt driven TAU driver (DANGEROUS)"
+	depends on TAU
+	---help---
+	  The TAU supports an interrupt driven mode which causes an interrupt
+	  whenever the temperature goes out of range. This is the fastest way
+	  to get notified the temp has exceeded a range. With this option off,
+	  a timer is used to re-check the temperature periodically.
+
+	  However, on some cpus it appears that the TAU interrupt hardware
+	  is buggy and can cause a situation which would lead unexplained hard
+	  lockups.
+
+	  Unless you are extending the TAU driver, or enjoy kernel/hardware
+	  debugging, leave this option off.
+
+config TAU_AVERAGE
+	bool "Average high and low temp"
+	depends on TAU
+	---help---
+	  The TAU hardware can compare the temperature to an upper and lower
+	  bound.  The default behavior is to show both the upper and lower
+	  bound in /proc/cpuinfo. If the range is large, the temperature is
+	  either changing a lot, or the TAU hardware is broken (likely on some
+	  G4's). If the range is small (around 4 degrees), the temperature is
+	  relatively stable.  If you say Y here, a single temperature value,
+	  halfway between the upper and lower bounds, will be reported in
+	  /proc/cpuinfo.
+
+	  If in doubt, say N here.
+endmenu
+
+source arch/powerpc/platforms/embedded6xx/Kconfig
+source arch/powerpc/platforms/4xx/Kconfig
+source arch/powerpc/platforms/85xx/Kconfig
+source arch/powerpc/platforms/8xx/Kconfig
+
+menu "Kernel options"
+
+config HIGHMEM
+	bool "High memory support"
+	depends on PPC32
+
+source kernel/Kconfig.hz
+source kernel/Kconfig.preempt
+source "fs/Kconfig.binfmt"
+
+# We optimistically allocate largepages from the VM, so make the limit
+# large enough (16MB). This badly named config option is actually
+# max order + 1
+config FORCE_MAX_ZONEORDER
+	int
+	depends on PPC64
+	default "13"
+
+config MATH_EMULATION
+	bool "Math emulation"
+	depends on 4xx || 8xx || E200 || E500
+	---help---
+	  Some PowerPC chips designed for embedded applications do not have
+	  a floating-point unit and therefore do not implement the
+	  floating-point instructions in the PowerPC instruction set.  If you
+	  say Y here, the kernel will include code to emulate a floating-point
+	  unit, which will allow programs that use floating-point
+	  instructions to run.
+
+config IOMMU_VMERGE
+	bool "Enable IOMMU virtual merging (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && PPC64
+	default n
+	help
+	  Cause IO segments sent to a device for DMA to be merged virtually
+	  by the IOMMU when they happen to have been allocated contiguously.
+	  This doesn't add pressure to the IOMMU allocator. However, some
+	  drivers don't support getting large merged segments coming back
+	  from *_map_sg(). Say Y if you know the drivers you are using are
+	  properly handling this case.
+
+config HOTPLUG_CPU
+	bool "Support for enabling/disabling CPUs"
+	depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
+	---help---
+	  Say Y here to be able to disable and re-enable individual
+	  CPUs at runtime on SMP machines.
+
+	  Say N if you are unsure.
+
+config KEXEC
+	bool "kexec system call (EXPERIMENTAL)"
+	depends on PPC_MULTIPLATFORM && EXPERIMENTAL
+	help
+	  kexec is a system call that implements the ability to shutdown your
+	  current kernel, and to start another kernel.  It is like a reboot
+	  but it is indepedent of the system firmware.   And like a reboot
+	  you can start any kernel with it, not just Linux.
+
+	  The name comes from the similiarity to the exec system call.
+
+	  It is an ongoing process to be certain the hardware in a machine
+	  is properly shutdown, so do not be surprised if this code does not
+	  initially work for you.  It may help to enable device hotplugging
+	  support.  As of this writing the exact hardware interface is
+	  strongly in flux, so no good recommendation can be made.
+
+config EMBEDDEDBOOT
+	bool
+	depends on 8xx || 8260
+	default y
+
+config PC_KEYBOARD
+	bool "PC PS/2 style Keyboard"
+	depends on 4xx || CPM2
+
+config PPCBUG_NVRAM
+	bool "Enable reading PPCBUG NVRAM during boot" if PPLUS || LOPEC
+	default y if PPC_PREP
+
+config IRQ_ALL_CPUS
+	bool "Distribute interrupts on all CPUs by default"
+	depends on SMP && !MV64360
+	help
+	  This option gives the kernel permission to distribute IRQs across
+	  multiple CPUs.  Saying N here will route all IRQs to the first
+	  CPU.  Generally saying Y is safe, although some problems have been
+	  reported with SMP Power Macintoshes with this option enabled.
+
+source "arch/powerpc/platforms/pseries/Kconfig"
+
+config ARCH_SELECT_MEMORY_MODEL
+	def_bool y
+	depends on PPC64
+
+config ARCH_FLATMEM_ENABLE
+       def_bool y
+       depends on PPC64 && !NUMA
+
+config ARCH_DISCONTIGMEM_ENABLE
+	def_bool y
+	depends on SMP && PPC_PSERIES
+
+config ARCH_DISCONTIGMEM_DEFAULT
+	def_bool y
+	depends on ARCH_DISCONTIGMEM_ENABLE
+
+config ARCH_FLATMEM_ENABLE
+	def_bool y
+	depends on PPC64
+
+config ARCH_SPARSEMEM_ENABLE
+	def_bool y
+	depends on ARCH_DISCONTIGMEM_ENABLE
+
+source "mm/Kconfig"
+
+config HAVE_ARCH_EARLY_PFN_TO_NID
+	def_bool y
+	depends on NEED_MULTIPLE_NODES
+
+# Some NUMA nodes have memory ranges that span
+# other nodes.  Even though a pfn is valid and
+# between a node's start and end pfns, it may not
+# reside on that node.
+#
+# This is a relatively temporary hack that should
+# be able to go away when sparsemem is fully in
+# place
+
+config NODES_SPAN_OTHER_NODES
+	def_bool y
+	depends on NEED_MULTIPLE_NODES
+
+config NUMA
+	bool "NUMA support"
+	default y if DISCONTIGMEM || SPARSEMEM
+
+config SCHED_SMT
+	bool "SMT (Hyperthreading) scheduler support"
+	depends on PPC64 && SMP
+	default off
+	help
+	  SMT scheduler support improves the CPU scheduler's decision making
+	  when dealing with POWER5 cpus at a cost of slightly increased
+	  overhead in some places. If unsure say N here.
+
+config PROC_DEVICETREE
+	bool "Support for Open Firmware device tree in /proc"
+	depends on PPC_OF && PROC_FS
+	help
+	  This option adds a device-tree directory under /proc which contains
+	  an image of the device tree that the kernel copies from Open
+	  Firmware. If unsure, say Y here.
+
+source "arch/powerpc/platforms/prep/Kconfig"
+
+config CMDLINE_BOOL
+	bool "Default bootloader kernel arguments"
+	depends on !PPC_ISERIES
+
+config CMDLINE
+	string "Initial kernel command string"
+	depends on CMDLINE_BOOL
+	default "console=ttyS0,9600 console=tty0 root=/dev/sda2"
+	help
+	  On some platforms, there is currently no way for the boot loader to
+	  pass arguments to the kernel. For these platforms, you can supply
+	  some command-line options at build time by entering them here.  In
+	  most cases you will need to specify the root device here.
+
+if !44x || BROKEN
+source kernel/power/Kconfig
+endif
+
+config SECCOMP
+	bool "Enable seccomp to safely compute untrusted bytecode"
+	depends on PROC_FS
+	default y
+	help
+	  This kernel feature is useful for number crunching applications
+	  that may need to compute untrusted bytecode during their
+	  execution. By using pipes or other transports made available to
+	  the process as file descriptors supporting the read/write
+	  syscalls, it's possible to isolate those applications in
+	  their own address space using seccomp. Once seccomp is
+	  enabled via /proc/<pid>/seccomp, it cannot be disabled
+	  and the task is only allowed to execute a few safe syscalls
+	  defined by each seccomp mode.
+
+	  If unsure, say Y. Only embedded should say N here.
+
+endmenu
+
+config ISA_DMA_API
+	bool
+	default y
+
+menu "Bus options"
+
+config ISA
+	bool "Support for ISA-bus hardware"
+	depends on PPC_PREP || PPC_CHRP
+	help
+	  Find out whether you have ISA slots on your motherboard.  ISA is the
+	  name of a bus system, i.e. the way the CPU talks to the other stuff
+	  inside your box.  If you have an Apple machine, say N here; if you
+	  have an IBM RS/6000 or pSeries machine or a PReP machine, say Y.  If
+	  you have an embedded board, consult your board documentation.
+
+config GENERIC_ISA_DMA
+	bool
+	depends on PPC64 || POWER4 || 6xx && !CPM2
+	default y
+
+config EISA
+	bool
+
+config SBUS
+	bool
+
+# Yes MCA RS/6000s exist but Linux-PPC does not currently support any
+config MCA
+	bool
+
+config PCI
+	bool "PCI support" if 40x || CPM2 || 83xx || 85xx || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES)
+	default y if !40x && !CPM2 && !8xx && !APUS && !83xx && !85xx
+	default PCI_PERMEDIA if !4xx && !CPM2 && !8xx && APUS
+	default PCI_QSPAN if !4xx && !CPM2 && 8xx
+	help
+	  Find out whether your system includes a PCI bus. PCI is the name of
+	  a bus system, i.e. the way the CPU talks to the other stuff inside
+	  your box.  If you say Y here, the kernel will include drivers and
+	  infrastructure code to support PCI bus devices.
+
+config PCI_DOMAINS
+	bool
+	default PCI
+
+config MPC83xx_PCI2
+	bool "  Supprt for 2nd PCI host controller"
+	depends on PCI && MPC834x
+	default y if MPC834x_SYS
+
+config PCI_QSPAN
+	bool "QSpan PCI"
+	depends on !4xx && !CPM2 && 8xx
+	help
+	  Say Y here if you have a system based on a Motorola 8xx-series
+	  embedded processor with a QSPAN PCI interface, otherwise say N.
+
+config PCI_8260
+	bool
+	depends on PCI && 8260
+	default y
+
+config 8260_PCI9
+	bool "  Enable workaround for MPC826x erratum PCI 9"
+	depends on PCI_8260 && !ADS8272
+	default y
+
+choice
+	prompt "  IDMA channel for PCI 9 workaround"
+	depends on 8260_PCI9
+
+config 8260_PCI9_IDMA1
+	bool "IDMA1"
+
+config 8260_PCI9_IDMA2
+	bool "IDMA2"
+
+config 8260_PCI9_IDMA3
+	bool "IDMA3"
+
+config 8260_PCI9_IDMA4
+	bool "IDMA4"
+
+endchoice
+
+source "drivers/pci/Kconfig"
+
+source "drivers/pcmcia/Kconfig"
+
+source "drivers/pci/hotplug/Kconfig"
+
+endmenu
+
+menu "Advanced setup"
+	depends on PPC32
+
+config ADVANCED_OPTIONS
+	bool "Prompt for advanced kernel configuration options"
+	help
+	  This option will enable prompting for a variety of advanced kernel
+	  configuration options.  These options can cause the kernel to not
+	  work if they are set incorrectly, but can be used to optimize certain
+	  aspects of kernel memory management.
+
+	  Unless you know what you are doing, say N here.
+
+comment "Default settings for advanced configuration options are used"
+	depends on !ADVANCED_OPTIONS
+
+config HIGHMEM_START_BOOL
+	bool "Set high memory pool address"
+	depends on ADVANCED_OPTIONS && HIGHMEM
+	help
+	  This option allows you to set the base address of the kernel virtual
+	  area used to map high memory pages.  This can be useful in
+	  optimizing the layout of kernel virtual memory.
+
+	  Say N here unless you know what you are doing.
+
+config HIGHMEM_START
+	hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL
+	default "0xfe000000"
+
+config LOWMEM_SIZE_BOOL
+	bool "Set maximum low memory"
+	depends on ADVANCED_OPTIONS
+	help
+	  This option allows you to set the maximum amount of memory which
+	  will be used as "low memory", that is, memory which the kernel can
+	  access directly, without having to set up a kernel virtual mapping.
+	  This can be useful in optimizing the layout of kernel virtual
+	  memory.
+
+	  Say N here unless you know what you are doing.
+
+config LOWMEM_SIZE
+	hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL
+	default "0x30000000"
+
+config KERNEL_START_BOOL
+	bool "Set custom kernel base address"
+	depends on ADVANCED_OPTIONS
+	help
+	  This option allows you to set the kernel virtual address at which
+	  the kernel will map low memory (the kernel image will be linked at
+	  this address).  This can be useful in optimizing the virtual memory
+	  layout of the system.
+
+	  Say N here unless you know what you are doing.
+
+config KERNEL_START
+	hex "Virtual address of kernel base" if KERNEL_START_BOOL
+	default "0xc0000000"
+
+config TASK_SIZE_BOOL
+	bool "Set custom user task size"
+	depends on ADVANCED_OPTIONS
+	help
+	  This option allows you to set the amount of virtual address space
+	  allocated to user tasks.  This can be useful in optimizing the
+	  virtual memory layout of the system.
+
+	  Say N here unless you know what you are doing.
+
+config TASK_SIZE
+	hex "Size of user task space" if TASK_SIZE_BOOL
+	default "0x80000000"
+
+config CONSISTENT_START_BOOL
+	bool "Set custom consistent memory pool address"
+	depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
+	help
+	  This option allows you to set the base virtual address
+	  of the the consistent memory pool.  This pool of virtual
+	  memory is used to make consistent memory allocations.
+
+config CONSISTENT_START
+	hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL
+	default "0xff100000" if NOT_COHERENT_CACHE
+
+config CONSISTENT_SIZE_BOOL
+	bool "Set custom consistent memory pool size"
+	depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
+	help
+	  This option allows you to set the size of the the
+	  consistent memory pool.  This pool of virtual memory
+	  is used to make consistent memory allocations.
+
+config CONSISTENT_SIZE
+	hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL
+	default "0x00200000" if NOT_COHERENT_CACHE
+
+config BOOT_LOAD_BOOL
+	bool "Set the boot link/load address"
+	depends on ADVANCED_OPTIONS && !PPC_MULTIPLATFORM
+	help
+	  This option allows you to set the initial load address of the zImage
+	  or zImage.initrd file.  This can be useful if you are on a board
+	  which has a small amount of memory.
+
+	  Say N here unless you know what you are doing.
+
+config BOOT_LOAD
+	hex "Link/load address for booting" if BOOT_LOAD_BOOL
+	default "0x00400000" if 40x || 8xx || 8260
+	default "0x01000000" if 44x
+	default "0x00800000"
+
+config PIN_TLB
+	bool "Pinned Kernel TLBs (860 ONLY)"
+	depends on ADVANCED_OPTIONS && 8xx
+endmenu
+
+source "net/Kconfig"
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+# XXX source "arch/ppc/8xx_io/Kconfig"
+
+# XXX source "arch/ppc/8260_io/Kconfig"
+
+source "arch/powerpc/platforms/iseries/Kconfig"
+
+source "lib/Kconfig"
+
+source "arch/powerpc/oprofile/Kconfig"
+
+source "arch/powerpc/Kconfig.debug"
+
+source "security/Kconfig"
+
+config KEYS_COMPAT
+	bool
+	depends on COMPAT && KEYS
+	default y
+
+source "crypto/Kconfig"
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
new file mode 100644
index 0000000..61653cb
--- /dev/null
+++ b/arch/powerpc/Kconfig.debug
@@ -0,0 +1,73 @@
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config KGDB
+	bool "Include kgdb kernel debugger"
+	depends on DEBUG_KERNEL && (BROKEN || PPC_GEN550 || 4xx)
+	select DEBUG_INFO
+	help
+	  Include in-kernel hooks for kgdb, the Linux kernel source level
+	  debugger.  See <http://kgdb.sourceforge.net/> for more information.
+	  Unless you are intending to debug the kernel, say N here.
+
+choice
+	prompt "Serial Port"
+	depends on KGDB
+	default KGDB_TTYS1
+
+config KGDB_TTYS0
+	bool "ttyS0"
+
+config KGDB_TTYS1
+	bool "ttyS1"
+
+config KGDB_TTYS2
+	bool "ttyS2"
+
+config KGDB_TTYS3
+	bool "ttyS3"
+
+endchoice
+
+config KGDB_CONSOLE
+	bool "Enable serial console thru kgdb port"
+	depends on KGDB && 8xx || CPM2
+	help
+	  If you enable this, all serial console messages will be sent
+	  over the gdb stub.
+	  If unsure, say N.
+
+config XMON
+	bool "Include xmon kernel debugger"
+	depends on DEBUG_KERNEL
+	help
+	  Include in-kernel hooks for the xmon kernel monitor/debugger.
+	  Unless you are intending to debug the kernel, say N here.
+
+config BDI_SWITCH
+	bool "Include BDI-2000 user context switcher"
+	depends on DEBUG_KERNEL
+	help
+	  Include in-kernel support for the Abatron BDI2000 debugger.
+	  Unless you are intending to debug the kernel with one of these
+	  machines, say N here.
+
+config BOOTX_TEXT
+	bool "Support for early boot text console (BootX or OpenFirmware only)"
+	depends PPC_OF
+	help
+	  Say Y here to see progress messages from the boot firmware in text
+	  mode. Requires either BootX or Open Firmware.
+
+config SERIAL_TEXT_DEBUG
+	bool "Support for early boot texts over serial port"
+	depends on 4xx || LOPEC || MV64X60 || PPLUS || PRPMC800 || \
+		PPC_GEN550 || PPC_MPC52xx
+
+config PPC_OCP
+	bool
+	depends on IBM_OCP || XILINX_OCP
+	default y
+
+endmenu
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
new file mode 100644
index 0000000..92751ca
--- /dev/null
+++ b/arch/powerpc/Makefile
@@ -0,0 +1,211 @@
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+# Changes for PPC by Gary Thomas
+# Rewritten by Cort Dougan and Paul Mackerras
+#
+
+# This must match PAGE_OFFSET in include/asm-powerpc/page.h.
+KERNELLOAD	:= $(CONFIG_KERNEL_START)
+
+HAS_BIARCH	:= $(call cc-option-yn, -m32)
+
+ifeq ($(CONFIG_PPC64),y)
+OLDARCH	:= ppc64
+SZ	:= 64
+
+# Set default 32 bits cross compilers for vdso and boot wrapper
+CROSS32_COMPILE ?=
+
+CROSS32CC		:= $(CROSS32_COMPILE)gcc
+CROSS32AS		:= $(CROSS32_COMPILE)as
+CROSS32LD		:= $(CROSS32_COMPILE)ld
+CROSS32OBJCOPY		:= $(CROSS32_COMPILE)objcopy
+
+ifeq ($(HAS_BIARCH),y)
+ifeq ($(CROSS32_COMPILE),)
+CROSS32CC	:= $(CC) -m32
+CROSS32AS	:= $(AS) -a32
+CROSS32LD	:= $(LD) -m elf32ppc
+CROSS32OBJCOPY	:= $(OBJCOPY)
+endif
+endif
+
+export CROSS32CC CROSS32AS CROSS32LD CROSS32OBJCOPY
+
+new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
+
+ifeq ($(new_nm),y)
+NM		:= $(NM) --synthetic
+endif
+
+else
+OLDARCH	:= ppc
+SZ	:= 32
+endif
+
+ifeq ($(HAS_BIARCH),y)
+override AS	+= -a$(SZ)
+override LD	+= -m elf$(SZ)ppc
+override CC	+= -m$(SZ)
+endif
+
+LDFLAGS_vmlinux	:= -Ttext $(KERNELLOAD) -Bstatic -e $(KERNELLOAD)
+
+# The -Iarch/$(ARCH)/include is temporary while we are merging
+CPPFLAGS	+= -Iarch/$(ARCH) -Iarch/$(ARCH)/include
+AFLAGS		+= -Iarch/$(ARCH)
+CFLAGS		+= -Iarch/$(ARCH) -msoft-float -pipe
+CFLAGS-$(CONFIG_PPC64)	:= -mminimal-toc -mtraceback=none  -mcall-aixdesc
+CFLAGS-$(CONFIG_PPC32)	:= -ffixed-r2 -mmultiple
+CFLAGS		+= $(CFLAGS-y)
+CPP		= $(CC) -E $(CFLAGS)
+# Temporary hack until we have migrated to asm-powerpc
+LINUXINCLUDE    += -Iarch/$(ARCH)/include
+
+CHECKFLAGS	+= -m$(SZ) -D__powerpc__ -D__powerpc$(SZ)__
+
+ifeq ($(CONFIG_PPC64),y)
+GCC_VERSION     := $(call cc-version)
+GCC_BROKEN_VEC	:= $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi)
+
+ifeq ($(CONFIG_POWER4_ONLY),y)
+ifeq ($(CONFIG_ALTIVEC),y)
+ifeq ($(GCC_BROKEN_VEC),y)
+	CFLAGS += $(call cc-option,-mcpu=970)
+else
+	CFLAGS += $(call cc-option,-mcpu=power4)
+endif
+else
+	CFLAGS += $(call cc-option,-mcpu=power4)
+endif
+else
+	CFLAGS += $(call cc-option,-mtune=power4)
+endif
+endif
+
+# Enable unit-at-a-time mode when possible. It shrinks the
+# kernel considerably.
+CFLAGS += $(call cc-option,-funit-at-a-time)
+
+ifndef CONFIG_FSL_BOOKE
+CFLAGS		+= -mstring
+endif
+
+cpu-as-$(CONFIG_PPC64BRIDGE)	+= -Wa,-mppc64bridge
+cpu-as-$(CONFIG_4xx)		+= -Wa,-m405
+cpu-as-$(CONFIG_6xx)		+= -Wa,-maltivec
+cpu-as-$(CONFIG_POWER4)		+= -Wa,-maltivec
+cpu-as-$(CONFIG_E500)		+= -Wa,-me500
+cpu-as-$(CONFIG_E200)		+= -Wa,-me200
+
+AFLAGS += $(cpu-as-y)
+CFLAGS += $(cpu-as-y)
+
+# Default to the common case.
+KBUILD_DEFCONFIG := common_defconfig
+
+head-y				:= arch/powerpc/kernel/head.o
+head-$(CONFIG_PPC64)		:= arch/powerpc/kernel/head_64.o
+head-$(CONFIG_8xx)		:= arch/powerpc/kernel/head_8xx.o
+head-$(CONFIG_4xx)		:= arch/powerpc/kernel/head_4xx.o
+head-$(CONFIG_44x)		:= arch/powerpc/kernel/head_44x.o
+head-$(CONFIG_FSL_BOOKE)	:= arch/powerpc/kernel/head_fsl_booke.o
+
+ifeq ($(CONFIG_PPC32),y)
+head-$(CONFIG_6xx)		+= arch/powerpc/kernel/idle_6xx.o
+head-$(CONFIG_POWER4)		+= arch/powerpc/kernel/idle_power4.o
+head-$(CONFIG_PPC_FPU)		+= arch/powerpc/kernel/fpu.o
+endif
+
+core-y				+= arch/powerpc/kernel/ \
+				   arch/$(OLDARCH)/kernel/ \
+				   arch/powerpc/mm/ \
+				   arch/powerpc/lib/ \
+				   arch/powerpc/sysdev/ \
+				   arch/powerpc/platforms/
+core-$(CONFIG_PPC32)		+= arch/ppc/syslib/
+core-$(CONFIG_MATH_EMULATION)	+= arch/ppc/math-emu/
+core-$(CONFIG_XMON)		+= arch/powerpc/xmon/
+core-$(CONFIG_APUS)		+= arch/ppc/amiga/
+drivers-$(CONFIG_8xx)		+= arch/ppc/8xx_io/
+drivers-$(CONFIG_4xx)		+= arch/ppc/4xx_io/
+drivers-$(CONFIG_CPM2)		+= arch/ppc/8260_io/
+
+drivers-$(CONFIG_OPROFILE)	+= arch/powerpc/oprofile/
+
+BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm
+
+.PHONY: $(BOOT_TARGETS)
+
+all: uImage zImage
+
+CPPFLAGS_vmlinux.lds	:= -Upowerpc
+
+# All the instructions talk about "make bzImage".
+bzImage: zImage
+
+boot := arch/$(OLDARCH)/boot
+
+$(BOOT_TARGETS): vmlinux
+	$(Q)$(MAKE) ARCH=$(OLDARCH) $(build)=$(boot) $@
+
+uImage: vmlinux
+	$(Q)$(MAKE) ARCH=$(OLDARCH) $(build)=$(boot)/images $(boot)/images/$@
+
+define archhelp
+  @echo '* zImage          - Compressed kernel image (arch/$(ARCH)/boot/images/zImage.*)'
+  @echo '  uImage          - Create a bootable image for U-Boot / PPCBoot'
+  @echo '  install         - Install kernel using'
+  @echo '                    (your) ~/bin/installkernel or'
+  @echo '                    (distribution) /sbin/installkernel or'
+  @echo '                    install to $$(INSTALL_PATH) and run lilo'
+  @echo '  *_defconfig     - Select default config from arch/$(ARCH)/ppc/configs'
+endef
+
+archclean:
+	$(Q)$(MAKE) $(clean)=$(boot)
+	# Temporary hack until we have migrated to asm-powerpc
+	$(Q)rm -rf arch/$(ARCH)/include
+
+archprepare: checkbin
+
+# Temporary hack until we have migrated to asm-powerpc
+include/asm: arch/$(ARCH)/include/asm
+arch/$(ARCH)/include/asm:
+	$(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
+	$(Q)ln -fsn $(srctree)/include/asm-$(OLDARCH) arch/$(ARCH)/include/asm
+
+# Use the file '.tmp_gas_check' for binutils tests, as gas won't output
+# to stdout and these checks are run even on install targets.
+TOUT	:= .tmp_gas_check
+# Ensure this is binutils 2.12.1 (or 2.12.90.0.7) or later for altivec
+# instructions.
+# gcc-3.4 and binutils-2.14 are a fatal combination.
+GCC_VERSION	:= $(call cc-version)
+
+checkbin:
+	@if test "$(GCC_VERSION)" = "0304" ; then \
+		if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \
+			echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \
+			echo 'correctly with gcc-3.4 and your version of binutils.'; \
+			echo '*** Please upgrade your binutils or downgrade your gcc'; \
+			false; \
+		fi ; \
+	fi
+	@if ! /bin/echo dssall | $(AS) -many -o $(TOUT) >/dev/null 2>&1 ; then \
+		echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build ' ; \
+		echo 'correctly with old versions of binutils.' ; \
+		echo '*** Please upgrade your binutils to 2.12.1 or newer' ; \
+		false ; \
+	fi
+
+CLEAN_FILES += $(TOUT)
+
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
new file mode 100644
index 0000000..58c130b
--- /dev/null
+++ b/arch/powerpc/kernel/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the linux kernel.
+#
+
+extra-$(CONFIG_PPC_STD_MMU)	:= head.o
+extra_$(CONFIG_PPC64)		:= head_64.o
+extra-$(CONFIG_40x)		:= head_4xx.o
+extra-$(CONFIG_44x)		:= head_44x.o
+extra-$(CONFIG_FSL_BOOKE)	:= head_fsl_booke.o
+extra-$(CONFIG_8xx)		:= head_8xx.o
+extra-$(CONFIG_6xx)		+= idle_6xx.o
+extra-$(CONFIG_POWER4)		+= idle_power4.o
+extra-$(CONFIG_PPC_FPU)		+= fpu.o
+extra-y				+= vmlinux.lds
+
+obj-y				:= semaphore.o traps.o process.o
+obj-$(CONFIG_MODULES)		+= ppc_ksyms.o
+obj-$(CONFIG_ALTIVEC)		+= vecemu.o vector.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
new file mode 100644
index 0000000..16cf0b7
--- /dev/null
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -0,0 +1,262 @@
+/*
+ * This program is used to generate definitions needed by
+ * assembly language modules.
+ *
+ * We use the technique used in the OSF Mach kernel code:
+ * generate asm statements containing #defines,
+ * compile this file to assembler, and then extract the
+ * #defines from the assembly-language output.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/suspend.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/time.h>
+#include <linux/hardirq.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+#include <asm/lppaca.h>
+#include <asm/iSeries/HvLpEvent.h>
+#include <asm/rtas.h>
+#include <asm/cache.h>
+#include <asm/systemcfg.h>
+#include <asm/compat.h>
+#endif
+
+#define DEFINE(sym, val) \
+	asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+int main(void)
+{
+	/* thread struct on stack */
+	DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+	DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
+	DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+#ifdef CONFIG_PPC32
+	DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
+#endif
+#ifdef CONFIG_PPC64
+	DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
+	DEFINE(THREAD_SHIFT, THREAD_SHIFT);
+#endif
+	DEFINE(THREAD_SIZE, THREAD_SIZE);
+
+	/* task_struct->thread */
+	DEFINE(THREAD, offsetof(struct task_struct, thread));
+	DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info));
+	DEFINE(MM, offsetof(struct task_struct, mm));
+	DEFINE(PTRACE, offsetof(struct task_struct, ptrace));
+	DEFINE(KSP, offsetof(struct thread_struct, ksp));
+	DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
+	DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall));
+	DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
+	DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
+	DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
+	DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+	DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
+	DEFINE(PT_PTRACED, PT_PTRACED);
+#endif
+#ifdef CONFIG_PPC64
+	DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
+#endif
+
+#ifdef CONFIG_ALTIVEC
+	DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
+	DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
+	DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
+	DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+	DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0]));
+	DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc));
+	DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr));
+	DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
+#endif /* CONFIG_SPE */
+	/* Interrupt register frame */
+	DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
+#ifndef CONFIG_PPC64
+	DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
+#else
+	DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
+
+	/* 288 = # of volatile regs, int & fp, for leaf routines */
+	/* which do not stack a frame.  See the PPC64 ABI.       */
+	DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288);
+#endif
+	/* in fact we only use gpr0 - gpr9 and gpr20 - gpr23 */
+	DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
+	DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
+	DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
+	DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
+	DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
+	DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
+	DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
+	DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
+	DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
+	DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
+	DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
+	DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
+	DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
+	DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
+	DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14]));
+	DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15]));
+	DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16]));
+	DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17]));
+	DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18]));
+	DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19]));
+	DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20]));
+	DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21]));
+	DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22]));
+	DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23]));
+	DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24]));
+	DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25]));
+	DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26]));
+	DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27]));
+	DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28]));
+	DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29]));
+	DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30]));
+	DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31]));
+	/*
+	 * Note: these symbols include _ because they overlap with special
+	 * register names
+	 */
+	DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
+	DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
+	DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
+	DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
+	DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
+	DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq));
+	DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
+	DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
+	DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
+	/* The PowerPC 400-class & Book-E processors have neither the DAR nor the DSISR
+	 * SPRs. Hence, we overload them to hold the similar DEAR and ESR SPRs
+	 * for such processors.  For critical interrupts we use them to
+	 * hold SRR0 and SRR1.
+	 */
+	DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
+	DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
+	DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
+	DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
+	DEFINE(TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
+	DEFINE(CLONE_VM, CLONE_VM);
+	DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
+	DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
+
+	/* About the CPU features table */
+	DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec));
+	DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask));
+	DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value));
+	DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
+	DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
+
+#ifdef CONFIG_PPC64
+	DEFINE(MM, offsetof(struct task_struct, mm));
+	DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
+
+	DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size));
+	DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size));
+	DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page));
+	DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
+	DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
+	DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
+	DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
+
+	/* paca */
+        DEFINE(PACA_SIZE, sizeof(struct paca_struct));
+        DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
+        DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
+        DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
+	DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
+        DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
+        DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
+        DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
+	DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
+        DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
+	DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
+	DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
+	DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
+	DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
+	DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
+#ifdef CONFIG_HUGETLB_PAGE
+	DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
+	DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
+#endif /* CONFIG_HUGETLB_PAGE */
+	DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
+        DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
+        DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
+        DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
+        DEFINE(PACA_EXDSI, offsetof(struct paca_struct, exdsi));
+        DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
+	DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca));
+	DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
+	DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
+	DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
+	DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
+	DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
+
+	/* RTAS */
+	DEFINE(RTASBASE, offsetof(struct rtas_t, base));
+	DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
+
+	DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
+	DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));
+
+	/* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
+	DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
+	DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
+
+	/* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
+	DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
+	DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
+
+	/* systemcfg offsets for use by vdso */
+	DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct systemcfg, tb_orig_stamp));
+	DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct systemcfg, tb_ticks_per_sec));
+	DEFINE(CFG_TB_TO_XS, offsetof(struct systemcfg, tb_to_xs));
+	DEFINE(CFG_STAMP_XSEC, offsetof(struct systemcfg, stamp_xsec));
+	DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct systemcfg, tb_update_count));
+	DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct systemcfg, tz_minuteswest));
+	DEFINE(CFG_TZ_DSTTIME, offsetof(struct systemcfg, tz_dsttime));
+	DEFINE(CFG_SYSCALL_MAP32, offsetof(struct systemcfg, syscall_map_32));
+	DEFINE(CFG_SYSCALL_MAP64, offsetof(struct systemcfg, syscall_map_64));
+
+	/* timeval/timezone offsets for use by vdso */
+	DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec));
+	DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec));
+	DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec));
+	DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec));
+	DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
+	DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
+#endif
+
+	DEFINE(pbe_address, offsetof(struct pbe, address));
+	DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
+	DEFINE(pbe_next, offsetof(struct pbe, next));
+
+	DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
+	return 0;
+}
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
new file mode 100644
index 0000000..665d7d3
--- /dev/null
+++ b/arch/powerpc/kernel/fpu.S
@@ -0,0 +1,133 @@
+/*
+ *  FPU support code, moved here from head.S so that it can be used
+ *  by chips which use other head-whatever.S files.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/cputable.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+/*
+ * This task wants to use the FPU now.
+ * On UP, disable FP for the task which had the FPU previously,
+ * and save its floating-point registers in its thread_struct.
+ * Load up this task's FP registers from its thread_struct,
+ * enable the FPU for the current task and return to the task.
+ */
+	.globl	load_up_fpu
+load_up_fpu:
+	mfmsr	r5
+	ori	r5,r5,MSR_FP
+#ifdef CONFIG_PPC64BRIDGE
+	clrldi	r5,r5,1			/* turn off 64-bit mode */
+#endif /* CONFIG_PPC64BRIDGE */
+	SYNC
+	MTMSRD(r5)			/* enable use of fpu now */
+	isync
+/*
+ * For SMP, we don't do lazy FPU switching because it just gets too
+ * horrendously complex, especially when a task switches from one CPU
+ * to another.  Instead we call giveup_fpu in switch_to.
+ */
+#ifndef CONFIG_SMP
+	tophys(r6,0)			/* get __pa constant */
+	addis	r3,r6,last_task_used_math@ha
+	lwz	r4,last_task_used_math@l(r3)
+	cmpwi	0,r4,0
+	beq	1f
+	add	r4,r4,r6
+	addi	r4,r4,THREAD		/* want last_task_used_math->thread */
+	SAVE_32FPRS(0, r4)
+	mffs	fr0
+	stfd	fr0,THREAD_FPSCR-4(r4)
+	lwz	r5,PT_REGS(r4)
+	add	r5,r5,r6
+	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+	li	r10,MSR_FP|MSR_FE0|MSR_FE1
+	andc	r4,r4,r10		/* disable FP for previous task */
+	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#endif /* CONFIG_SMP */
+	/* enable use of FP after return */
+	mfspr	r5,SPRN_SPRG3		/* current task's THREAD (phys) */
+	lwz	r4,THREAD_FPEXC_MODE(r5)
+	ori	r9,r9,MSR_FP		/* enable FP for current */
+	or	r9,r9,r4
+	lfd	fr0,THREAD_FPSCR-4(r5)
+	mtfsf	0xff,fr0
+	REST_32FPRS(0, r5)
+#ifndef CONFIG_SMP
+	subi	r4,r5,THREAD
+	sub	r4,r4,r6
+	stw	r4,last_task_used_math@l(r3)
+#endif /* CONFIG_SMP */
+	/* restore registers and return */
+	/* we haven't used ctr or xer or lr */
+	b	fast_exception_return
+
+/*
+ * FP unavailable trap from kernel - print a message, but let
+ * the task use FP in the kernel until it returns to user mode.
+ */
+ 	.globl	KernelFP
+KernelFP:
+	lwz	r3,_MSR(r1)
+	ori	r3,r3,MSR_FP
+	stw	r3,_MSR(r1)		/* enable use of FP after return */
+	lis	r3,86f@h
+	ori	r3,r3,86f@l
+	mr	r4,r2			/* current */
+	lwz	r5,_NIP(r1)
+	bl	printk
+	b	ret_from_except
+86:	.string	"floating point used in kernel (task=%p, pc=%x)\n"
+	.align	4,0
+
+/*
+ * giveup_fpu(tsk)
+ * Disable FP for the task given as the argument,
+ * and save the floating-point registers in its thread_struct.
+ * Enables the FPU for use in the kernel on return.
+ */
+	.globl	giveup_fpu
+giveup_fpu:
+	mfmsr	r5
+	ori	r5,r5,MSR_FP
+	SYNC_601
+	ISYNC_601
+	MTMSRD(r5)			/* enable use of fpu now */
+	SYNC_601
+	isync
+	cmpwi	0,r3,0
+	beqlr-				/* if no previous owner, done */
+	addi	r3,r3,THREAD	        /* want THREAD of task */
+	lwz	r5,PT_REGS(r3)
+	cmpwi	0,r5,0
+	SAVE_32FPRS(0, r3)
+	mffs	fr0
+	stfd	fr0,THREAD_FPSCR-4(r3)
+	beq	1f
+	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+	li	r3,MSR_FP|MSR_FE0|MSR_FE1
+	andc	r4,r4,r3		/* disable FP for previous task */
+	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#ifndef CONFIG_SMP
+	li	r5,0
+	lis	r4,last_task_used_math@ha
+	stw	r5,last_task_used_math@l(r4)
+#endif /* CONFIG_SMP */
+	blr
diff --git a/arch/powerpc/kernel/head.S b/arch/powerpc/kernel/head.S
new file mode 100644
index 0000000..8cdac73
--- /dev/null
+++ b/arch/powerpc/kernel/head.S
@@ -0,0 +1,1539 @@
+/*
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
+ *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
+ *  Adapted for Power Macintosh by Paul Mackerras.
+ *  Low-level exception handlers and MMU support
+ *  rewritten by Paul Mackerras.
+ *    Copyright (C) 1996 Paul Mackerras.
+ *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  This file contains the low-level support and setup for the
+ *  PowerPC platform, including trap and interrupt dispatch.
+ *  (The PPC 8xx embedded CPUs use head_8xx.S instead.)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/cputable.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+#ifdef CONFIG_APUS
+#include <asm/amigappc.h>
+#endif
+
+#ifdef CONFIG_PPC64BRIDGE
+#define LOAD_BAT(n, reg, RA, RB)	\
+	ld	RA,(n*32)+0(reg);	\
+	ld	RB,(n*32)+8(reg);	\
+	mtspr	SPRN_IBAT##n##U,RA;	\
+	mtspr	SPRN_IBAT##n##L,RB;	\
+	ld	RA,(n*32)+16(reg);	\
+	ld	RB,(n*32)+24(reg);	\
+	mtspr	SPRN_DBAT##n##U,RA;	\
+	mtspr	SPRN_DBAT##n##L,RB;	\
+
+#else /* CONFIG_PPC64BRIDGE */
+
+/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
+#define LOAD_BAT(n, reg, RA, RB)	\
+	/* see the comment for clear_bats() -- Cort */ \
+	li	RA,0;			\
+	mtspr	SPRN_IBAT##n##U,RA;	\
+	mtspr	SPRN_DBAT##n##U,RA;	\
+	lwz	RA,(n*16)+0(reg);	\
+	lwz	RB,(n*16)+4(reg);	\
+	mtspr	SPRN_IBAT##n##U,RA;	\
+	mtspr	SPRN_IBAT##n##L,RB;	\
+	beq	1f;			\
+	lwz	RA,(n*16)+8(reg);	\
+	lwz	RB,(n*16)+12(reg);	\
+	mtspr	SPRN_DBAT##n##U,RA;	\
+	mtspr	SPRN_DBAT##n##L,RB;	\
+1:
+#endif /* CONFIG_PPC64BRIDGE */
+
+	.text
+	.stabs	"arch/ppc/kernel/",N_SO,0,0,0f
+	.stabs	"head.S",N_SO,0,0,0f
+0:
+	.globl	_stext
+_stext:
+
+/*
+ * _start is defined this way because the XCOFF loader in the OpenFirmware
+ * on the powermac expects the entry point to be a procedure descriptor.
+ */
+	.text
+	.globl	_start
+_start:
+	/*
+	 * These are here for legacy reasons, the kernel used to
+	 * need to look like a coff function entry for the pmac
+	 * but we're always started by some kind of bootloader now.
+	 *  -- Cort
+	 */
+	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
+	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
+	nop
+
+/* PMAC
+ * Enter here with the kernel text, data and bss loaded starting at
+ * 0, running with virtual == physical mapping.
+ * r5 points to the prom entry point (the client interface handler
+ * address).  Address translation is turned on, with the prom
+ * managing the hash table.  Interrupts are disabled.  The stack
+ * pointer (r1) points to just below the end of the half-meg region
+ * from 0x380000 - 0x400000, which is mapped in already.
+ *
+ * If we are booted from MacOS via BootX, we enter with the kernel
+ * image loaded somewhere, and the following values in registers:
+ *  r3: 'BooX' (0x426f6f58)
+ *  r4: virtual address of boot_infos_t
+ *  r5: 0
+ *
+ * APUS
+ *   r3: 'APUS'
+ *   r4: physical address of memory base
+ *   Linux/m68k style BootInfo structure at &_end.
+ *
+ * PREP
+ * This is jumped to on prep systems right after the kernel is relocated
+ * to its proper place in memory by the boot loader.  The expected layout
+ * of the regs is:
+ *   r3: ptr to residual data
+ *   r4: initrd_start or if no initrd then 0
+ *   r5: initrd_end - unused if r4 is 0
+ *   r6: Start of command line string
+ *   r7: End of command line string
+ *
+ * This just gets a minimal mmu environment setup so we can call
+ * start_here() to do the real work.
+ * -- Cort
+ */
+
+	.globl	__start
+__start:
+/*
+ * We have to do any OF calls before we map ourselves to KERNELBASE,
+ * because OF may have I/O devices mapped into that area
+ * (particularly on CHRP).
+ */
+	mr	r31,r3			/* save parameters */
+	mr	r30,r4
+	mr	r29,r5
+	mr	r28,r6
+	mr	r27,r7
+	li	r24,0			/* cpu # */
+
+/*
+ * early_init() does the early machine identification and does
+ * the necessary low-level setup and clears the BSS
+ *  -- Cort <cort@fsmlabs.com>
+ */
+	bl	early_init
+
+/*
+ * On POWER4, we first need to tweak some CPU configuration registers
+ * like real mode cache inhibit or exception base
+ */
+#ifdef CONFIG_POWER4
+	bl	__970_cpu_preinit
+#endif /* CONFIG_POWER4 */
+
+#ifdef CONFIG_APUS
+/* On APUS the __va/__pa constants need to be set to the correct
+ * values before continuing.
+ */
+	mr	r4,r30
+	bl	fix_mem_constants
+#endif /* CONFIG_APUS */
+
+/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
+ * the physical address we are running at, returned by early_init()
+ */
+ 	bl	mmu_off
+__after_mmu_off:
+#ifndef CONFIG_POWER4
+	bl	clear_bats
+	bl	flush_tlbs
+
+	bl	initial_bats
+#if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT)
+	bl	setup_disp_bat
+#endif
+#else /* CONFIG_POWER4 */
+	bl	reloc_offset
+	bl	initial_mm_power4
+#endif /* CONFIG_POWER4 */
+
+/*
+ * Call setup_cpu for CPU 0 and initialize 6xx Idle
+ */
+	bl	reloc_offset
+	li	r24,0			/* cpu# */
+	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
+#ifdef CONFIG_6xx
+	bl	reloc_offset
+	bl	init_idle_6xx
+#endif /* CONFIG_6xx */
+#ifdef CONFIG_POWER4
+	bl	reloc_offset
+	bl	init_idle_power4
+#endif /* CONFIG_POWER4 */
+
+
+#ifndef CONFIG_APUS
+/*
+ * We need to run with _start at physical address 0.
+ * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
+ * the exception vectors at 0 (and therefore this copy
+ * overwrites OF's exception vectors with our own).
+ * If the MMU is already turned on, we copy stuff to KERNELBASE,
+ * otherwise we copy it to 0.
+ */
+	bl	reloc_offset
+	mr	r26,r3
+	addis	r4,r3,KERNELBASE@h	/* current address of _start */
+	cmpwi	0,r4,0			/* are we already running at 0? */
+	bne	relocate_kernel
+#endif /* CONFIG_APUS */
+/*
+ * we now have the 1st 16M of ram mapped with the bats.
+ * prep needs the mmu to be turned on here, but pmac already has it on.
+ * this shouldn't bother the pmac since it just gets turned on again
+ * as we jump to our code at KERNELBASE. -- Cort
+ * Actually no, pmac doesn't have it on any more. BootX enters with MMU
+ * off, and in other cases, we now turn it off before changing BATs above.
+ */
+turn_on_mmu:
+	mfmsr	r0
+	ori	r0,r0,MSR_DR|MSR_IR
+	mtspr	SPRN_SRR1,r0
+	lis	r0,start_here@h
+	ori	r0,r0,start_here@l
+	mtspr	SPRN_SRR0,r0
+	SYNC
+	RFI				/* enables MMU */
+
+/*
+ * We need __secondary_hold as a place to hold the other cpus on
+ * an SMP machine, even when we are running a UP kernel.
+ */
+	. = 0xc0			/* for prep bootloader */
+	li	r3,1			/* MTX only has 1 cpu */
+	.globl	__secondary_hold
+__secondary_hold:
+	/* tell the master we're here */
+	stw	r3,4(0)
+#ifdef CONFIG_SMP
+100:	lwz	r4,0(0)
+	/* wait until we're told to start */
+	cmpw	0,r4,r3
+	bne	100b
+	/* our cpu # was at addr 0 - go */
+	mr	r24,r3			/* cpu # */
+	b	__secondary_start
+#else
+	b	.
+#endif /* CONFIG_SMP */
+
+/*
+ * Exception entry code.  This code runs with address translation
+ * turned off, i.e. using physical addresses.
+ * We assume sprg3 has the physical address of the current
+ * task's thread_struct.
+ */
+#define EXCEPTION_PROLOG	\
+	mtspr	SPRN_SPRG0,r10;	\
+	mtspr	SPRN_SPRG1,r11;	\
+	mfcr	r10;		\
+	EXCEPTION_PROLOG_1;	\
+	EXCEPTION_PROLOG_2
+
+#define EXCEPTION_PROLOG_1	\
+	mfspr	r11,SPRN_SRR1;		/* check whether user or kernel */ \
+	andi.	r11,r11,MSR_PR;	\
+	tophys(r11,r1);			/* use tophys(r1) if kernel */ \
+	beq	1f;		\
+	mfspr	r11,SPRN_SPRG3;	\
+	lwz	r11,THREAD_INFO-THREAD(r11);	\
+	addi	r11,r11,THREAD_SIZE;	\
+	tophys(r11,r11);	\
+1:	subi	r11,r11,INT_FRAME_SIZE	/* alloc exc. frame */
+
+
+#define EXCEPTION_PROLOG_2	\
+	CLR_TOP32(r11);		\
+	stw	r10,_CCR(r11);		/* save registers */ \
+	stw	r12,GPR12(r11);	\
+	stw	r9,GPR9(r11);	\
+	mfspr	r10,SPRN_SPRG0;	\
+	stw	r10,GPR10(r11);	\
+	mfspr	r12,SPRN_SPRG1;	\
+	stw	r12,GPR11(r11);	\
+	mflr	r10;		\
+	stw	r10,_LINK(r11);	\
+	mfspr	r12,SPRN_SRR0;	\
+	mfspr	r9,SPRN_SRR1;	\
+	stw	r1,GPR1(r11);	\
+	stw	r1,0(r11);	\
+	tovirt(r1,r11);			/* set new kernel sp */	\
+	li	r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
+	MTMSRD(r10);			/* (except for mach check in rtas) */ \
+	stw	r0,GPR0(r11);	\
+	SAVE_4GPRS(3, r11);	\
+	SAVE_2GPRS(7, r11)
+
+/*
+ * Note: code which follows this uses cr0.eq (set if from kernel),
+ * r11, r12 (SRR0), and r9 (SRR1).
+ *
+ * Note2: once we have set r1 we are in a position to take exceptions
+ * again, and we could thus set MSR:RI at that point.
+ */
+
+/*
+ * Exception vectors.
+ */
+#define EXCEPTION(n, label, hdlr, xfer)		\
+	. = n;					\
+label:						\
+	EXCEPTION_PROLOG;			\
+	addi	r3,r1,STACK_FRAME_OVERHEAD;	\
+	xfer(n, hdlr)
+
+#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret)	\
+	li	r10,trap;					\
+	stw	r10,TRAP(r11);					\
+	li	r10,MSR_KERNEL;					\
+	copyee(r10, r9);					\
+	bl	tfer;						\
+i##n:								\
+	.long	hdlr;						\
+	.long	ret
+
+#define COPY_EE(d, s)		rlwimi d,s,0,16,16
+#define NOCOPY(d, s)
+
+#define EXC_XFER_STD(n, hdlr)		\
+	EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full,	\
+			  ret_from_except_full)
+
+#define EXC_XFER_LITE(n, hdlr)		\
+	EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
+			  ret_from_except)
+
+#define EXC_XFER_EE(n, hdlr)		\
+	EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
+			  ret_from_except_full)
+
+#define EXC_XFER_EE_LITE(n, hdlr)	\
+	EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
+			  ret_from_except)
+
+/* System reset */
+/* core99 pmac starts the seconary here by changing the vector, and
+   putting it back to what it was (UnknownException) when done.  */
+#if defined(CONFIG_GEMINI) && defined(CONFIG_SMP)
+	. = 0x100
+	b	__secondary_start_gemini
+#else
+	EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD)
+#endif
+
+/* Machine check */
+/*
+ * On CHRP, this is complicated by the fact that we could get a
+ * machine check inside RTAS, and we have no guarantee that certain
+ * critical registers will have the values we expect.  The set of
+ * registers that might have bad values includes all the GPRs
+ * and all the BATs.  We indicate that we are in RTAS by putting
+ * a non-zero value, the address of the exception frame to use,
+ * in SPRG2.  The machine check handler checks SPRG2 and uses its
+ * value if it is non-zero.  If we ever needed to free up SPRG2,
+ * we could use a field in the thread_info or thread_struct instead.
+ * (Other exception handlers assume that r1 is a valid kernel stack
+ * pointer when we take an exception from supervisor mode.)
+ *	-- paulus.
+ */
+	. = 0x200
+	mtspr	SPRN_SPRG0,r10
+	mtspr	SPRN_SPRG1,r11
+	mfcr	r10
+#ifdef CONFIG_PPC_CHRP
+	mfspr	r11,SPRN_SPRG2
+	cmpwi	0,r11,0
+	bne	7f
+#endif /* CONFIG_PPC_CHRP */
+	EXCEPTION_PROLOG_1
+7:	EXCEPTION_PROLOG_2
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+#ifdef CONFIG_PPC_CHRP
+	mfspr	r4,SPRN_SPRG2
+	cmpwi	cr1,r4,0
+	bne	cr1,1f
+#endif
+	EXC_XFER_STD(0x200, MachineCheckException)
+#ifdef CONFIG_PPC_CHRP
+1:	b	machine_check_in_rtas
+#endif
+
+/* Data access exception. */
+	. = 0x300
+#ifdef CONFIG_PPC64BRIDGE
+	b	DataAccess
+DataAccessCont:
+#else
+DataAccess:
+	EXCEPTION_PROLOG
+#endif /* CONFIG_PPC64BRIDGE */
+	mfspr	r10,SPRN_DSISR
+	andis.	r0,r10,0xa470		/* weird error? */
+	bne	1f			/* if not, try to put a PTE */
+	mfspr	r4,SPRN_DAR		/* into the hash table */
+	rlwinm	r3,r10,32-15,21,21	/* DSISR_STORE -> _PAGE_RW */
+	bl	hash_page
+1:	stw	r10,_DSISR(r11)
+	mr	r5,r10
+	mfspr	r4,SPRN_DAR
+	EXC_XFER_EE_LITE(0x300, handle_page_fault)
+
+#ifdef CONFIG_PPC64BRIDGE
+/* SLB fault on data access. */
+	. = 0x380
+	b	DataSegment
+#endif /* CONFIG_PPC64BRIDGE */
+
+/* Instruction access exception. */
+	. = 0x400
+#ifdef CONFIG_PPC64BRIDGE
+	b	InstructionAccess
+InstructionAccessCont:
+#else
+InstructionAccess:
+	EXCEPTION_PROLOG
+#endif /* CONFIG_PPC64BRIDGE */
+	andis.	r0,r9,0x4000		/* no pte found? */
+	beq	1f			/* if so, try to put a PTE */
+	li	r3,0			/* into the hash table */
+	mr	r4,r12			/* SRR0 is fault address */
+	bl	hash_page
+1:	mr	r4,r12
+	mr	r5,r9
+	EXC_XFER_EE_LITE(0x400, handle_page_fault)
+
+#ifdef CONFIG_PPC64BRIDGE
+/* SLB fault on instruction access. */
+	. = 0x480
+	b	InstructionSegment
+#endif /* CONFIG_PPC64BRIDGE */
+
+/* External interrupt */
+	EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
+
+/* Alignment exception */
+	. = 0x600
+Alignment:
+	EXCEPTION_PROLOG
+	mfspr	r4,SPRN_DAR
+	stw	r4,_DAR(r11)
+	mfspr	r5,SPRN_DSISR
+	stw	r5,_DSISR(r11)
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	EXC_XFER_EE(0x600, AlignmentException)
+
+/* Program check exception */
+	EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD)
+
+/* Floating-point unavailable */
+	. = 0x800
+FPUnavailable:
+	EXCEPTION_PROLOG
+	bne	load_up_fpu		/* if from user, just load it up */
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	EXC_XFER_EE_LITE(0x800, KernelFP)
+
+/* Decrementer */
+	EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
+
+	EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE)
+
+/* System call */
+	. = 0xc00
+SystemCall:
+	EXCEPTION_PROLOG
+	EXC_XFER_EE_LITE(0xc00, DoSyscall)
+
+/* Single step - not used on 601 */
+	EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD)
+	EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE)
+
+/*
+ * The Altivec unavailable trap is at 0x0f20.  Foo.
+ * We effectively remap it to 0x3000.
+ * We include an altivec unavailable exception vector even if
+ * not configured for Altivec, so that you can't panic a
+ * non-altivec kernel running on a machine with altivec just
+ * by executing an altivec instruction.
+ */
+	. = 0xf00
+	b	Trap_0f
+
+	. = 0xf20
+	b	AltiVecUnavailable
+
+Trap_0f:
+	EXCEPTION_PROLOG
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	EXC_XFER_EE(0xf00, UnknownException)
+
+/*
+ * Handle TLB miss for instruction on 603/603e.
+ * Note: we get an alternate set of r0 - r3 to use automatically.
+ */
+	. = 0x1000
+InstructionTLBMiss:
+/*
+ * r0:	stored ctr
+ * r1:	linux style pte ( later becomes ppc hardware pte )
+ * r2:	ptr to linux-style pte
+ * r3:	scratch
+ */
+	mfctr	r0
+	/* Get PTE (linux-style) and check access */
+	mfspr	r3,SPRN_IMISS
+	lis	r1,KERNELBASE@h		/* check if kernel address */
+	cmplw	0,r3,r1
+	mfspr	r2,SPRN_SPRG3
+	li	r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
+	lwz	r2,PGDIR(r2)
+	blt+	112f
+	lis	r2,swapper_pg_dir@ha	/* if kernel address, use */
+	addi	r2,r2,swapper_pg_dir@l	/* kernel page table */
+	mfspr	r1,SPRN_SRR1		/* and MSR_PR bit from SRR1 */
+	rlwinm	r1,r1,32-12,29,29	/* shift MSR_PR to _PAGE_USER posn */
+112:	tophys(r2,r2)
+	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
+	lwz	r2,0(r2)		/* get pmd entry */
+	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
+	beq-	InstructionAddressInvalid	/* return if no mapping */
+	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
+	lwz	r3,0(r2)		/* get linux-style pte */
+	andc.	r1,r1,r3		/* check access & ~permission */
+	bne-	InstructionAddressInvalid /* return if access not permitted */
+	ori	r3,r3,_PAGE_ACCESSED	/* set _PAGE_ACCESSED in pte */
+	/*
+	 * NOTE! We are assuming this is not an SMP system, otherwise
+	 * we would need to update the pte atomically with lwarx/stwcx.
+	 */
+	stw	r3,0(r2)		/* update PTE (accessed bit) */
+	/* Convert linux-style PTE to low word of PPC-style PTE */
+	rlwinm	r1,r3,32-10,31,31	/* _PAGE_RW -> PP lsb */
+	rlwinm	r2,r3,32-7,31,31	/* _PAGE_DIRTY -> PP lsb */
+	and	r1,r1,r2		/* writable if _RW and _DIRTY */
+	rlwimi	r3,r3,32-1,30,30	/* _PAGE_USER -> PP msb */
+	rlwimi	r3,r3,32-1,31,31	/* _PAGE_USER -> PP lsb */
+	ori	r1,r1,0xe14		/* clear out reserved bits and M */
+	andc	r1,r3,r1		/* PP = user? (rw&dirty? 2: 3): 0 */
+	mtspr	SPRN_RPA,r1
+	mfspr	r3,SPRN_IMISS
+	tlbli	r3
+	mfspr	r3,SPRN_SRR1		/* Need to restore CR0 */
+	mtcrf	0x80,r3
+	rfi
+InstructionAddressInvalid:
+	mfspr	r3,SPRN_SRR1
+	rlwinm	r1,r3,9,6,6	/* Get load/store bit */
+
+	addis	r1,r1,0x2000
+	mtspr	SPRN_DSISR,r1	/* (shouldn't be needed) */
+	mtctr	r0		/* Restore CTR */
+	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
+	or	r2,r2,r1
+	mtspr	SPRN_SRR1,r2
+	mfspr	r1,SPRN_IMISS	/* Get failing address */
+	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
+	rlwimi	r2,r2,1,30,30	/* change 1 -> 3 */
+	xor	r1,r1,r2
+	mtspr	SPRN_DAR,r1	/* Set fault address */
+	mfmsr	r0		/* Restore "normal" registers */
+	xoris	r0,r0,MSR_TGPR>>16
+	mtcrf	0x80,r3		/* Restore CR0 */
+	mtmsr	r0
+	b	InstructionAccess
+
+/*
+ * Handle TLB miss for DATA Load operation on 603/603e
+ */
+	. = 0x1100
+DataLoadTLBMiss:
+/*
+ * r0:	stored ctr
+ * r1:	linux style pte ( later becomes ppc hardware pte )
+ * r2:	ptr to linux-style pte
+ * r3:	scratch
+ */
+	mfctr	r0
+	/* Get PTE (linux-style) and check access */
+	mfspr	r3,SPRN_DMISS
+	lis	r1,KERNELBASE@h		/* check if kernel address */
+	cmplw	0,r3,r1
+	mfspr	r2,SPRN_SPRG3
+	li	r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
+	lwz	r2,PGDIR(r2)
+	blt+	112f
+	lis	r2,swapper_pg_dir@ha	/* if kernel address, use */
+	addi	r2,r2,swapper_pg_dir@l	/* kernel page table */
+	mfspr	r1,SPRN_SRR1		/* and MSR_PR bit from SRR1 */
+	rlwinm	r1,r1,32-12,29,29	/* shift MSR_PR to _PAGE_USER posn */
+112:	tophys(r2,r2)
+	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
+	lwz	r2,0(r2)		/* get pmd entry */
+	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
+	beq-	DataAddressInvalid	/* return if no mapping */
+	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
+	lwz	r3,0(r2)		/* get linux-style pte */
+	andc.	r1,r1,r3		/* check access & ~permission */
+	bne-	DataAddressInvalid	/* return if access not permitted */
+	ori	r3,r3,_PAGE_ACCESSED	/* set _PAGE_ACCESSED in pte */
+	/*
+	 * NOTE! We are assuming this is not an SMP system, otherwise
+	 * we would need to update the pte atomically with lwarx/stwcx.
+	 */
+	stw	r3,0(r2)		/* update PTE (accessed bit) */
+	/* Convert linux-style PTE to low word of PPC-style PTE */
+	rlwinm	r1,r3,32-10,31,31	/* _PAGE_RW -> PP lsb */
+	rlwinm	r2,r3,32-7,31,31	/* _PAGE_DIRTY -> PP lsb */
+	and	r1,r1,r2		/* writable if _RW and _DIRTY */
+	rlwimi	r3,r3,32-1,30,30	/* _PAGE_USER -> PP msb */
+	rlwimi	r3,r3,32-1,31,31	/* _PAGE_USER -> PP lsb */
+	ori	r1,r1,0xe14		/* clear out reserved bits and M */
+	andc	r1,r3,r1		/* PP = user? (rw&dirty? 2: 3): 0 */
+	mtspr	SPRN_RPA,r1
+	mfspr	r3,SPRN_DMISS
+	tlbld	r3
+	mfspr	r3,SPRN_SRR1		/* Need to restore CR0 */
+	mtcrf	0x80,r3
+	rfi
+DataAddressInvalid:
+	mfspr	r3,SPRN_SRR1
+	rlwinm	r1,r3,9,6,6	/* Get load/store bit */
+	addis	r1,r1,0x2000
+	mtspr	SPRN_DSISR,r1
+	mtctr	r0		/* Restore CTR */
+	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
+	mtspr	SPRN_SRR1,r2
+	mfspr	r1,SPRN_DMISS	/* Get failing address */
+	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
+	beq	20f		/* Jump if big endian */
+	xori	r1,r1,3
+20:	mtspr	SPRN_DAR,r1	/* Set fault address */
+	mfmsr	r0		/* Restore "normal" registers */
+	xoris	r0,r0,MSR_TGPR>>16
+	mtcrf	0x80,r3		/* Restore CR0 */
+	mtmsr	r0
+	b	DataAccess
+
+/*
+ * Handle TLB miss for DATA Store on 603/603e
+ */
+	. = 0x1200
+DataStoreTLBMiss:
+/*
+ * r0:	stored ctr
+ * r1:	linux style pte ( later becomes ppc hardware pte )
+ * r2:	ptr to linux-style pte
+ * r3:	scratch
+ */
+	mfctr	r0
+	/* Get PTE (linux-style) and check access */
+	mfspr	r3,SPRN_DMISS
+	lis	r1,KERNELBASE@h		/* check if kernel address */
+	cmplw	0,r3,r1
+	mfspr	r2,SPRN_SPRG3
+	li	r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */
+	lwz	r2,PGDIR(r2)
+	blt+	112f
+	lis	r2,swapper_pg_dir@ha	/* if kernel address, use */
+	addi	r2,r2,swapper_pg_dir@l	/* kernel page table */
+	mfspr	r1,SPRN_SRR1		/* and MSR_PR bit from SRR1 */
+	rlwinm	r1,r1,32-12,29,29	/* shift MSR_PR to _PAGE_USER posn */
+112:	tophys(r2,r2)
+	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
+	lwz	r2,0(r2)		/* get pmd entry */
+	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
+	beq-	DataAddressInvalid	/* return if no mapping */
+	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
+	lwz	r3,0(r2)		/* get linux-style pte */
+	andc.	r1,r1,r3		/* check access & ~permission */
+	bne-	DataAddressInvalid	/* return if access not permitted */
+	ori	r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY
+	/*
+	 * NOTE! We are assuming this is not an SMP system, otherwise
+	 * we would need to update the pte atomically with lwarx/stwcx.
+	 */
+	stw	r3,0(r2)		/* update PTE (accessed/dirty bits) */
+	/* Convert linux-style PTE to low word of PPC-style PTE */
+	rlwimi	r3,r3,32-1,30,30	/* _PAGE_USER -> PP msb */
+	li	r1,0xe15		/* clear out reserved bits and M */
+	andc	r1,r3,r1		/* PP = user? 2: 0 */
+	mtspr	SPRN_RPA,r1
+	mfspr	r3,SPRN_DMISS
+	tlbld	r3
+	mfspr	r3,SPRN_SRR1		/* Need to restore CR0 */
+	mtcrf	0x80,r3
+	rfi
+
+#ifndef CONFIG_ALTIVEC
+#define AltivecAssistException	UnknownException
+#endif
+
+	EXCEPTION(0x1300, Trap_13, InstructionBreakpoint, EXC_XFER_EE)
+	EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
+	EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE)
+#ifdef CONFIG_POWER4
+	EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1700, Trap_17, AltivecAssistException, EXC_XFER_EE)
+	EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD)
+#else /* !CONFIG_POWER4 */
+	EXCEPTION(0x1600, Trap_16, AltivecAssistException, EXC_XFER_EE)
+	EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
+	EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE)
+#endif /* CONFIG_POWER4 */
+	EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
+	EXCEPTION(0x2100, Trap_21, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x2200, Trap_22, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x2300, Trap_23, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x2400, Trap_24, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x2500, Trap_25, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x2600, Trap_26, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x2700, Trap_27, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x2800, Trap_28, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x2900, Trap_29, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x2a00, Trap_2a, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x2b00, Trap_2b, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x2c00, Trap_2c, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x2d00, Trap_2d, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x2e00, Trap_2e, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x2f00, MOLTrampoline, UnknownException, EXC_XFER_EE_LITE)
+
+	.globl mol_trampoline
+	.set mol_trampoline, i0x2f00
+
+	. = 0x3000
+
+AltiVecUnavailable:
+	EXCEPTION_PROLOG
+#ifdef CONFIG_ALTIVEC
+	bne	load_up_altivec		/* if from user, just load it up */
+#endif /* CONFIG_ALTIVEC */
+	EXC_XFER_EE_LITE(0xf20, AltivecUnavailException)
+
+#ifdef CONFIG_PPC64BRIDGE
+DataAccess:
+	EXCEPTION_PROLOG
+	b	DataAccessCont
+
+InstructionAccess:
+	EXCEPTION_PROLOG
+	b	InstructionAccessCont
+
+DataSegment:
+	EXCEPTION_PROLOG
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	mfspr	r4,SPRN_DAR
+	stw	r4,_DAR(r11)
+	EXC_XFER_STD(0x380, UnknownException)
+
+InstructionSegment:
+	EXCEPTION_PROLOG
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	EXC_XFER_STD(0x480, UnknownException)
+#endif /* CONFIG_PPC64BRIDGE */
+
+#ifdef CONFIG_ALTIVEC
+/* Note that the AltiVec support is closely modeled after the FP
+ * support.  Changes to one are likely to be applicable to the
+ * other!  */
+load_up_altivec:
+/*
+ * Disable AltiVec for the task which had AltiVec previously,
+ * and save its AltiVec registers in its thread_struct.
+ * Enables AltiVec for use in the kernel on return.
+ * On SMP we know the AltiVec units are free, since we give it up every
+ * switch.  -- Kumar
+ */
+	mfmsr	r5
+	oris	r5,r5,MSR_VEC@h
+	MTMSRD(r5)			/* enable use of AltiVec now */
+	isync
+/*
+ * For SMP, we don't do lazy AltiVec switching because it just gets too
+ * horrendously complex, especially when a task switches from one CPU
+ * to another.  Instead we call giveup_altivec in switch_to.
+ */
+#ifndef CONFIG_SMP
+	tophys(r6,0)
+	addis	r3,r6,last_task_used_altivec@ha
+	lwz	r4,last_task_used_altivec@l(r3)
+	cmpwi	0,r4,0
+	beq	1f
+	add	r4,r4,r6
+	addi	r4,r4,THREAD	/* want THREAD of last_task_used_altivec */
+	SAVE_32VRS(0,r10,r4)
+	mfvscr	vr0
+	li	r10,THREAD_VSCR
+	stvx	vr0,r10,r4
+	lwz	r5,PT_REGS(r4)
+	add	r5,r5,r6
+	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+	lis	r10,MSR_VEC@h
+	andc	r4,r4,r10	/* disable altivec for previous task */
+	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#endif /* CONFIG_SMP */
+	/* enable use of AltiVec after return */
+	oris	r9,r9,MSR_VEC@h
+	mfspr	r5,SPRN_SPRG3		/* current task's THREAD (phys) */
+	li	r4,1
+	li	r10,THREAD_VSCR
+	stw	r4,THREAD_USED_VR(r5)
+	lvx	vr0,r10,r5
+	mtvscr	vr0
+	REST_32VRS(0,r10,r5)
+#ifndef CONFIG_SMP
+	subi	r4,r5,THREAD
+	sub	r4,r4,r6
+	stw	r4,last_task_used_altivec@l(r3)
+#endif /* CONFIG_SMP */
+	/* restore registers and return */
+	/* we haven't used ctr or xer or lr */
+	b	fast_exception_return
+
+/*
+ * AltiVec unavailable trap from kernel - print a message, but let
+ * the task use AltiVec in the kernel until it returns to user mode.
+ */
+KernelAltiVec:
+	lwz	r3,_MSR(r1)
+	oris	r3,r3,MSR_VEC@h
+	stw	r3,_MSR(r1)	/* enable use of AltiVec after return */
+	lis	r3,87f@h
+	ori	r3,r3,87f@l
+	mr	r4,r2		/* current */
+	lwz	r5,_NIP(r1)
+	bl	printk
+	b	ret_from_except
+87:	.string	"AltiVec used in kernel  (task=%p, pc=%x)  \n"
+	.align	4,0
+
+/*
+ * giveup_altivec(tsk)
+ * Disable AltiVec for the task given as the argument,
+ * and save the AltiVec registers in its thread_struct.
+ * Enables AltiVec for use in the kernel on return.
+ */
+
+	.globl	giveup_altivec
+giveup_altivec:
+	mfmsr	r5
+	oris	r5,r5,MSR_VEC@h
+	SYNC
+	MTMSRD(r5)			/* enable use of AltiVec now */
+	isync
+	cmpwi	0,r3,0
+	beqlr-				/* if no previous owner, done */
+	addi	r3,r3,THREAD		/* want THREAD of task */
+	lwz	r5,PT_REGS(r3)
+	cmpwi	0,r5,0
+	SAVE_32VRS(0, r4, r3)
+	mfvscr	vr0
+	li	r4,THREAD_VSCR
+	stvx	vr0,r4,r3
+	beq	1f
+	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+	lis	r3,MSR_VEC@h
+	andc	r4,r4,r3		/* disable AltiVec for previous task */
+	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#ifndef CONFIG_SMP
+	li	r5,0
+	lis	r4,last_task_used_altivec@ha
+	stw	r5,last_task_used_altivec@l(r4)
+#endif /* CONFIG_SMP */
+	blr
+#endif /* CONFIG_ALTIVEC */
+
+/*
+ * This code is jumped to from the startup code to copy
+ * the kernel image to physical address 0.
+ */
+relocate_kernel:
+	addis	r9,r26,klimit@ha	/* fetch klimit */
+	lwz	r25,klimit@l(r9)
+	addis	r25,r25,-KERNELBASE@h
+	li	r3,0			/* Destination base address */
+	li	r6,0			/* Destination offset */
+	li	r5,0x4000		/* # bytes of memory to copy */
+	bl	copy_and_flush		/* copy the first 0x4000 bytes */
+	addi	r0,r3,4f@l		/* jump to the address of 4f */
+	mtctr	r0			/* in copy and do the rest. */
+	bctr				/* jump to the copy */
+4:	mr	r5,r25
+	bl	copy_and_flush		/* copy the rest */
+	b	turn_on_mmu
+
+/*
+ * Copy routine used to copy the kernel to start at physical address 0
+ * and flush and invalidate the caches as needed.
+ * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
+ * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
+ */
+copy_and_flush:
+	addi	r5,r5,-4
+	addi	r6,r6,-4
+4:	li	r0,L1_CACHE_LINE_SIZE/4
+	mtctr	r0
+3:	addi	r6,r6,4			/* copy a cache line */
+	lwzx	r0,r6,r4
+	stwx	r0,r6,r3
+	bdnz	3b
+	dcbst	r6,r3			/* write it to memory */
+	sync
+	icbi	r6,r3			/* flush the icache line */
+	cmplw	0,r6,r5
+	blt	4b
+	sync				/* additional sync needed on g4 */
+	isync
+	addi	r5,r5,4
+	addi	r6,r6,4
+	blr
+
+#ifdef CONFIG_APUS
+/*
+ * On APUS the physical base address of the kernel is not known at compile
+ * time, which means the __pa/__va constants used are incorrect. In the
+ * __init section is recorded the virtual addresses of instructions using
+ * these constants, so all that has to be done is fix these before
+ * continuing the kernel boot.
+ *
+ * r4 = The physical address of the kernel base.
+ */
+fix_mem_constants:
+	mr	r10,r4
+	addis	r10,r10,-KERNELBASE@h    /* virt_to_phys constant */
+	neg	r11,r10	                 /* phys_to_virt constant */
+
+	lis	r12,__vtop_table_begin@h
+	ori	r12,r12,__vtop_table_begin@l
+	add	r12,r12,r10	         /* table begin phys address */
+	lis	r13,__vtop_table_end@h
+	ori	r13,r13,__vtop_table_end@l
+	add	r13,r13,r10	         /* table end phys address */
+	subi	r12,r12,4
+	subi	r13,r13,4
+1:	lwzu	r14,4(r12)               /* virt address of instruction */
+	add     r14,r14,r10              /* phys address of instruction */
+	lwz     r15,0(r14)               /* instruction, now insert top */
+	rlwimi  r15,r10,16,16,31         /* half of vp const in low half */
+	stw	r15,0(r14)               /* of instruction and restore. */
+	dcbst	r0,r14			 /* write it to memory */
+	sync
+	icbi	r0,r14			 /* flush the icache line */
+	cmpw	r12,r13
+	bne     1b
+	sync				/* additional sync needed on g4 */
+	isync
+
+/*
+ * Map the memory where the exception handlers will
+ * be copied to when hash constants have been patched.
+ */
+#ifdef CONFIG_APUS_FAST_EXCEPT
+	lis	r8,0xfff0
+#else
+	lis	r8,0
+#endif
+	ori	r8,r8,0x2		/* 128KB, supervisor */
+	mtspr	SPRN_DBAT3U,r8
+	mtspr	SPRN_DBAT3L,r8
+
+	lis	r12,__ptov_table_begin@h
+	ori	r12,r12,__ptov_table_begin@l
+	add	r12,r12,r10	         /* table begin phys address */
+	lis	r13,__ptov_table_end@h
+	ori	r13,r13,__ptov_table_end@l
+	add	r13,r13,r10	         /* table end phys address */
+	subi	r12,r12,4
+	subi	r13,r13,4
+1:	lwzu	r14,4(r12)               /* virt address of instruction */
+	add     r14,r14,r10              /* phys address of instruction */
+	lwz     r15,0(r14)               /* instruction, now insert top */
+	rlwimi  r15,r11,16,16,31         /* half of pv const in low half*/
+	stw	r15,0(r14)               /* of instruction and restore. */
+	dcbst	r0,r14			 /* write it to memory */
+	sync
+	icbi	r0,r14			 /* flush the icache line */
+	cmpw	r12,r13
+	bne     1b
+
+	sync				/* additional sync needed on g4 */
+	isync				/* No speculative loading until now */
+	blr
+
+/***********************************************************************
+ *  Please note that on APUS the exception handlers are located at the
+ *  physical address 0xfff0000. For this reason, the exception handlers
+ *  cannot use relative branches to access the code below.
+ ***********************************************************************/
+#endif /* CONFIG_APUS */
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_GEMINI
+	.globl	__secondary_start_gemini
+__secondary_start_gemini:
+        mfspr   r4,SPRN_HID0
+        ori     r4,r4,HID0_ICFI
+        li      r3,0
+        ori     r3,r3,HID0_ICE
+        andc    r4,r4,r3
+        mtspr   SPRN_HID0,r4
+        sync
+        b       __secondary_start
+#endif /* CONFIG_GEMINI */
+
+	.globl	__secondary_start_pmac_0
+__secondary_start_pmac_0:
+	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
+	li	r24,0
+	b	1f
+	li	r24,1
+	b	1f
+	li	r24,2
+	b	1f
+	li	r24,3
+1:
+	/* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
+	   set to map the 0xf0000000 - 0xffffffff region */
+	mfmsr	r0
+	rlwinm	r0,r0,0,28,26		/* clear DR (0x10) */
+	SYNC
+	mtmsr	r0
+	isync
+
+	.globl	__secondary_start
+__secondary_start:
+#ifdef CONFIG_PPC64BRIDGE
+	mfmsr	r0
+	clrldi	r0,r0,1			/* make sure it's in 32-bit mode */
+	SYNC
+	MTMSRD(r0)
+	isync
+#endif
+	/* Copy some CPU settings from CPU 0 */
+	bl	__restore_cpu_setup
+
+	lis	r3,-KERNELBASE@h
+	mr	r4,r24
+	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
+#ifdef CONFIG_6xx
+	lis	r3,-KERNELBASE@h
+	bl	init_idle_6xx
+#endif /* CONFIG_6xx */
+#ifdef CONFIG_POWER4
+	lis	r3,-KERNELBASE@h
+	bl	init_idle_power4
+#endif /* CONFIG_POWER4 */
+
+	/* get current_thread_info and current */
+	lis	r1,secondary_ti@ha
+	tophys(r1,r1)
+	lwz	r1,secondary_ti@l(r1)
+	tophys(r2,r1)
+	lwz	r2,TI_TASK(r2)
+
+	/* stack */
+	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+	li	r0,0
+	tophys(r3,r1)
+	stw	r0,0(r3)
+
+	/* load up the MMU */
+	bl	load_up_mmu
+
+	/* ptr to phys current thread */
+	tophys(r4,r2)
+	addi	r4,r4,THREAD	/* phys address of our thread_struct */
+	CLR_TOP32(r4)
+	mtspr	SPRN_SPRG3,r4
+	li	r3,0
+	mtspr	SPRN_SPRG2,r3	/* 0 => not in RTAS */
+
+	/* enable MMU and jump to start_secondary */
+	li	r4,MSR_KERNEL
+	FIX_SRR1(r4,r5)
+	lis	r3,start_secondary@h
+	ori	r3,r3,start_secondary@l
+	mtspr	SPRN_SRR0,r3
+	mtspr	SPRN_SRR1,r4
+	SYNC
+	RFI
+#endif /* CONFIG_SMP */
+
+/*
+ * Those generic dummy functions are kept for CPUs not
+ * included in CONFIG_6xx
+ */
+#if !defined(CONFIG_6xx) && !defined(CONFIG_POWER4)
+_GLOBAL(__save_cpu_setup)
+	blr
+_GLOBAL(__restore_cpu_setup)
+	blr
+#endif /* !defined(CONFIG_6xx) && !defined(CONFIG_POWER4) */
+
+
+/*
+ * Load stuff into the MMU.  Intended to be called with
+ * IR=0 and DR=0.
+ */
+load_up_mmu:
+	sync			/* Force all PTE updates to finish */
+	isync
+	tlbia			/* Clear all TLB entries */
+	sync			/* wait for tlbia/tlbie to finish */
+	TLBSYNC			/* ... on all CPUs */
+	/* Load the SDR1 register (hash table base & size) */
+	lis	r6,_SDR1@ha
+	tophys(r6,r6)
+	lwz	r6,_SDR1@l(r6)
+	mtspr	SPRN_SDR1,r6
+#ifdef CONFIG_PPC64BRIDGE
+	/* clear the ASR so we only use the pseudo-segment registers. */
+	li	r6,0
+	mtasr	r6
+#endif /* CONFIG_PPC64BRIDGE */
+	li	r0,16		/* load up segment register values */
+	mtctr	r0		/* for context 0 */
+	lis	r3,0x2000	/* Ku = 1, VSID = 0 */
+	li	r4,0
+3:	mtsrin	r3,r4
+	addi	r3,r3,0x111	/* increment VSID */
+	addis	r4,r4,0x1000	/* address of next segment */
+	bdnz	3b
+#ifndef CONFIG_POWER4
+/* Load the BAT registers with the values set up by MMU_init.
+   MMU_init takes care of whether we're on a 601 or not. */
+	mfpvr	r3
+	srwi	r3,r3,16
+	cmpwi	r3,1
+	lis	r3,BATS@ha
+	addi	r3,r3,BATS@l
+	tophys(r3,r3)
+	LOAD_BAT(0,r3,r4,r5)
+	LOAD_BAT(1,r3,r4,r5)
+	LOAD_BAT(2,r3,r4,r5)
+	LOAD_BAT(3,r3,r4,r5)
+#endif /* CONFIG_POWER4 */
+	blr
+
+/*
+ * This is where the main kernel code starts.
+ */
+start_here:
+	/* ptr to current */
+	lis	r2,init_task@h
+	ori	r2,r2,init_task@l
+	/* Set up for using our exception vectors */
+	/* ptr to phys current thread */
+	tophys(r4,r2)
+	addi	r4,r4,THREAD	/* init task's THREAD */
+	CLR_TOP32(r4)
+	mtspr	SPRN_SPRG3,r4
+	li	r3,0
+	mtspr	SPRN_SPRG2,r3	/* 0 => not in RTAS */
+
+	/* stack */
+	lis	r1,init_thread_union@ha
+	addi	r1,r1,init_thread_union@l
+	li	r0,0
+	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
+/*
+ * Do early bootinfo parsing, platform-specific initialization,
+ * and set up the MMU.
+ */
+	mr	r3,r31
+	mr	r4,r30
+	mr	r5,r29
+	mr	r6,r28
+	mr	r7,r27
+	bl	machine_init
+	bl	MMU_init
+
+#ifdef CONFIG_APUS
+	/* Copy exception code to exception vector base on APUS. */
+	lis	r4,KERNELBASE@h
+#ifdef CONFIG_APUS_FAST_EXCEPT
+	lis	r3,0xfff0		/* Copy to 0xfff00000 */
+#else
+	lis	r3,0			/* Copy to 0x00000000 */
+#endif
+	li	r5,0x4000		/* # bytes of memory to copy */
+	li	r6,0
+	bl	copy_and_flush		/* copy the first 0x4000 bytes */
+#endif  /* CONFIG_APUS */
+
+/*
+ * Go back to running unmapped so we can load up new values
+ * for SDR1 (hash table pointer) and the segment registers
+ * and change to using our exception vectors.
+ */
+	lis	r4,2f@h
+	ori	r4,r4,2f@l
+	tophys(r4,r4)
+	li	r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
+	FIX_SRR1(r3,r5)
+	mtspr	SPRN_SRR0,r4
+	mtspr	SPRN_SRR1,r3
+	SYNC
+	RFI
+/* Load up the kernel context */
+2:	bl	load_up_mmu
+
+#ifdef CONFIG_BDI_SWITCH
+	/* Add helper information for the Abatron bdiGDB debugger.
+	 * We do this here because we know the mmu is disabled, and
+	 * will be enabled for real in just a few instructions.
+	 */
+	lis	r5, abatron_pteptrs@h
+	ori	r5, r5, abatron_pteptrs@l
+	stw	r5, 0xf0(r0)	/* This much match your Abatron config */
+	lis	r6, swapper_pg_dir@h
+	ori	r6, r6, swapper_pg_dir@l
+	tophys(r5, r5)
+	stw	r6, 0(r5)
+#endif /* CONFIG_BDI_SWITCH */
+
+/* Now turn on the MMU for real! */
+	li	r4,MSR_KERNEL
+	FIX_SRR1(r4,r5)
+	lis	r3,start_kernel@h
+	ori	r3,r3,start_kernel@l
+	mtspr	SPRN_SRR0,r3
+	mtspr	SPRN_SRR1,r4
+	SYNC
+	RFI
+
+/*
+ * Set up the segment registers for a new context.
+ */
+_GLOBAL(set_context)
+	mulli	r3,r3,897	/* multiply context by skew factor */
+	rlwinm	r3,r3,4,8,27	/* VSID = (context & 0xfffff) << 4 */
+	addis	r3,r3,0x6000	/* Set Ks, Ku bits */
+	li	r0,NUM_USER_SEGMENTS
+	mtctr	r0
+
+#ifdef CONFIG_BDI_SWITCH
+	/* Context switch the PTE pointer for the Abatron BDI2000.
+	 * The PGDIR is passed as second argument.
+	 */
+	lis	r5, KERNELBASE@h
+	lwz	r5, 0xf0(r5)
+	stw	r4, 0x4(r5)
+#endif
+	li	r4,0
+	isync
+3:
+#ifdef CONFIG_PPC64BRIDGE
+	slbie	r4
+#endif /* CONFIG_PPC64BRIDGE */
+	mtsrin	r3,r4
+	addi	r3,r3,0x111	/* next VSID */
+	rlwinm	r3,r3,0,8,3	/* clear out any overflow from VSID field */
+	addis	r4,r4,0x1000	/* address of next segment */
+	bdnz	3b
+	sync
+	isync
+	blr
+
+/*
+ * An undocumented "feature" of 604e requires that the v bit
+ * be cleared before changing BAT values.
+ *
+ * Also, newer IBM firmware does not clear bat3 and 4 so
+ * this makes sure it's done.
+ *  -- Cort
+ */
+clear_bats:
+	li	r10,0
+	mfspr	r9,SPRN_PVR
+	rlwinm	r9,r9,16,16,31		/* r9 = 1 for 601, 4 for 604 */
+	cmpwi	r9, 1
+	beq	1f
+
+	mtspr	SPRN_DBAT0U,r10
+	mtspr	SPRN_DBAT0L,r10
+	mtspr	SPRN_DBAT1U,r10
+	mtspr	SPRN_DBAT1L,r10
+	mtspr	SPRN_DBAT2U,r10
+	mtspr	SPRN_DBAT2L,r10
+	mtspr	SPRN_DBAT3U,r10
+	mtspr	SPRN_DBAT3L,r10
+1:
+	mtspr	SPRN_IBAT0U,r10
+	mtspr	SPRN_IBAT0L,r10
+	mtspr	SPRN_IBAT1U,r10
+	mtspr	SPRN_IBAT1L,r10
+	mtspr	SPRN_IBAT2U,r10
+	mtspr	SPRN_IBAT2L,r10
+	mtspr	SPRN_IBAT3U,r10
+	mtspr	SPRN_IBAT3L,r10
+BEGIN_FTR_SECTION
+	/* Here's a tweak: at this point, CPU setup have
+	 * not been called yet, so HIGH_BAT_EN may not be
+	 * set in HID0 for the 745x processors. However, it
+	 * seems that doesn't affect our ability to actually
+	 * write to these SPRs.
+	 */
+	mtspr	SPRN_DBAT4U,r10
+	mtspr	SPRN_DBAT4L,r10
+	mtspr	SPRN_DBAT5U,r10
+	mtspr	SPRN_DBAT5L,r10
+	mtspr	SPRN_DBAT6U,r10
+	mtspr	SPRN_DBAT6L,r10
+	mtspr	SPRN_DBAT7U,r10
+	mtspr	SPRN_DBAT7L,r10
+	mtspr	SPRN_IBAT4U,r10
+	mtspr	SPRN_IBAT4L,r10
+	mtspr	SPRN_IBAT5U,r10
+	mtspr	SPRN_IBAT5L,r10
+	mtspr	SPRN_IBAT6U,r10
+	mtspr	SPRN_IBAT6L,r10
+	mtspr	SPRN_IBAT7U,r10
+	mtspr	SPRN_IBAT7L,r10
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
+	blr
+
+flush_tlbs:
+	lis	r10, 0x40
+1:	addic.	r10, r10, -0x1000
+	tlbie	r10
+	blt	1b
+	sync
+	blr
+
+mmu_off:
+ 	addi	r4, r3, __after_mmu_off - _start
+	mfmsr	r3
+	andi.	r0,r3,MSR_DR|MSR_IR		/* MMU enabled? */
+	beqlr
+	andc	r3,r3,r0
+	mtspr	SPRN_SRR0,r4
+	mtspr	SPRN_SRR1,r3
+	sync
+	RFI
+
+#ifndef CONFIG_POWER4
+/*
+ * Use the first pair of BAT registers to map the 1st 16MB
+ * of RAM to KERNELBASE.  From this point on we can't safely
+ * call OF any more.
+ */
+initial_bats:
+	lis	r11,KERNELBASE@h
+#ifndef CONFIG_PPC64BRIDGE
+	mfspr	r9,SPRN_PVR
+	rlwinm	r9,r9,16,16,31		/* r9 = 1 for 601, 4 for 604 */
+	cmpwi	0,r9,1
+	bne	4f
+	ori	r11,r11,4		/* set up BAT registers for 601 */
+	li	r8,0x7f			/* valid, block length = 8MB */
+	oris	r9,r11,0x800000@h	/* set up BAT reg for 2nd 8M */
+	oris	r10,r8,0x800000@h	/* set up BAT reg for 2nd 8M */
+	mtspr	SPRN_IBAT0U,r11		/* N.B. 601 has valid bit in */
+	mtspr	SPRN_IBAT0L,r8		/* lower BAT register */
+	mtspr	SPRN_IBAT1U,r9
+	mtspr	SPRN_IBAT1L,r10
+	isync
+	blr
+#endif /* CONFIG_PPC64BRIDGE */
+
+4:	tophys(r8,r11)
+#ifdef CONFIG_SMP
+	ori	r8,r8,0x12		/* R/W access, M=1 */
+#else
+	ori	r8,r8,2			/* R/W access */
+#endif /* CONFIG_SMP */
+#ifdef CONFIG_APUS
+	ori	r11,r11,BL_8M<<2|0x2	/* set up 8MB BAT registers for 604 */
+#else
+	ori	r11,r11,BL_256M<<2|0x2	/* set up BAT registers for 604 */
+#endif /* CONFIG_APUS */
+
+#ifdef CONFIG_PPC64BRIDGE
+	/* clear out the high 32 bits in the BAT */
+	clrldi	r11,r11,32
+	clrldi	r8,r8,32
+#endif /* CONFIG_PPC64BRIDGE */
+	mtspr	SPRN_DBAT0L,r8		/* N.B. 6xx (not 601) have valid */
+	mtspr	SPRN_DBAT0U,r11		/* bit in upper BAT register */
+	mtspr	SPRN_IBAT0L,r8
+	mtspr	SPRN_IBAT0U,r11
+	isync
+	blr
+
+#if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT)
+setup_disp_bat:
+	/*
+	 * setup the display bat prepared for us in prom.c
+	 */
+	mflr	r8
+	bl	reloc_offset
+	mtlr	r8
+	addis	r8,r3,disp_BAT@ha
+	addi	r8,r8,disp_BAT@l
+	lwz	r11,0(r8)
+	lwz	r8,4(r8)
+	mfspr	r9,SPRN_PVR
+	rlwinm	r9,r9,16,16,31		/* r9 = 1 for 601, 4 for 604 */
+	cmpwi	0,r9,1
+	beq	1f
+	mtspr	SPRN_DBAT3L,r8
+	mtspr	SPRN_DBAT3U,r11
+	blr
+1:	mtspr	SPRN_IBAT3L,r8
+	mtspr	SPRN_IBAT3U,r11
+	blr
+
+#endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */
+
+#else /* CONFIG_POWER4 */
+/*
+ * Load up the SDR1 and segment register values now
+ * since we don't have the BATs.
+ * Also make sure we are running in 32-bit mode.
+ */
+
+initial_mm_power4:
+	addis	r14,r3,_SDR1@ha		/* get the value from _SDR1 */
+	lwz	r14,_SDR1@l(r14)	/* assume hash table below 4GB */
+	mtspr	SPRN_SDR1,r14
+	slbia
+	lis	r4,0x2000		/* set pseudo-segment reg 12 */
+	ori	r5,r4,0x0ccc
+	mtsr	12,r5
+#if 0
+	ori	r5,r4,0x0888		/* set pseudo-segment reg 8 */
+	mtsr	8,r5			/* (for access to serial port) */
+#endif
+#ifdef CONFIG_BOOTX_TEXT
+	ori	r5,r4,0x0999		/* set pseudo-segment reg 9 */
+	mtsr	9,r5			/* (for access to screen) */
+#endif
+	mfmsr	r0
+	clrldi	r0,r0,1
+	sync
+	mtmsr	r0
+	isync
+	blr
+
+#endif /* CONFIG_POWER4 */
+
+#ifdef CONFIG_8260
+/* Jump into the system reset for the rom.
+ * We first disable the MMU, and then jump to the ROM reset address.
+ *
+ * r3 is the board info structure, r4 is the location for starting.
+ * I use this for building a small kernel that can load other kernels,
+ * rather than trying to write or rely on a rom monitor that can tftp load.
+ */
+       .globl  m8260_gorom
+m8260_gorom:
+	mfmsr	r0
+	rlwinm	r0,r0,0,17,15	/* clear MSR_EE in r0 */
+	sync
+	mtmsr	r0
+	sync
+	mfspr	r11, SPRN_HID0
+	lis	r10, 0
+	ori	r10,r10,HID0_ICE|HID0_DCE
+	andc	r11, r11, r10
+	mtspr	SPRN_HID0, r11
+	isync
+	li	r5, MSR_ME|MSR_RI
+	lis	r6,2f@h
+	addis	r6,r6,-KERNELBASE@h
+	ori	r6,r6,2f@l
+	mtspr	SPRN_SRR0,r6
+	mtspr	SPRN_SRR1,r5
+	isync
+	sync
+	rfi
+2:
+	mtlr	r4
+	blr
+#endif
+
+
+/*
+ * We put a few things here that have to be page-aligned.
+ * This stuff goes at the beginning of the data segment,
+ * which is page-aligned.
+ */
+	.data
+	.globl	sdata
+sdata:
+	.globl	empty_zero_page
+empty_zero_page:
+	.space	4096
+
+	.globl	swapper_pg_dir
+swapper_pg_dir:
+	.space	4096
+
+/*
+ * This space gets a copy of optional info passed to us by the bootstrap
+ * Used to pass parameters into the kernel like root=/dev/sda1, etc.
+ */
+	.globl	cmd_line
+cmd_line:
+	.space	512
+
+	.globl intercept_table
+intercept_table:
+	.long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700
+	.long i0x800, 0, 0, 0, 0, i0xd00, 0, 0
+	.long 0, 0, 0, i0x1300, 0, 0, 0, 0
+	.long 0, 0, 0, 0, 0, 0, 0, 0
+	.long 0, 0, 0, 0, 0, 0, 0, 0
+	.long 0, 0, 0, 0, 0, 0, 0, 0
+
+/* Room for two PTE pointers, usually the kernel and current user pointers
+ * to their respective root page table.
+ */
+abatron_pteptrs:
+	.space	8
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
new file mode 100644
index 0000000..599245b
--- /dev/null
+++ b/arch/powerpc/kernel/head_44x.S
@@ -0,0 +1,778 @@
+/*
+ * arch/ppc/kernel/head_44x.S
+ *
+ * Kernel execution entry point code.
+ *
+ *    Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
+ *      Initial PowerPC version.
+ *    Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
+ *      Rewritten for PReP
+ *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
+ *      Low-level exception handers, MMU support, and rewrite.
+ *    Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
+ *      PowerPC 8xx modifications.
+ *    Copyright (c) 1998-1999 TiVo, Inc.
+ *      PowerPC 403GCX modifications.
+ *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
+ *      PowerPC 403GCX/405GP modifications.
+ *    Copyright 2000 MontaVista Software Inc.
+ *	PPC405 modifications
+ *      PowerPC 403GCX/405GP modifications.
+ * 	Author: MontaVista Software, Inc.
+ *         	frank_rowand@mvista.com or source@mvista.com
+ * 	   	debbie_chu@mvista.com
+ *    Copyright 2002-2005 MontaVista Software, Inc.
+ *      PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/ibm4xx.h>
+#include <asm/ibm44x.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include "head_booke.h"
+
+
+/* As with the other PowerPC ports, it is expected that when code
+ * execution begins here, the following registers contain valid, yet
+ * optional, information:
+ *
+ *   r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
+ *   r4 - Starting address of the init RAM disk
+ *   r5 - Ending address of the init RAM disk
+ *   r6 - Start of kernel command line string (e.g. "mem=128")
+ *   r7 - End of kernel command line string
+ *
+ */
+	.text
+_GLOBAL(_stext)
+_GLOBAL(_start)
+	/*
+	 * Reserve a word at a fixed location to store the address
+	 * of abatron_pteptrs
+	 */
+	nop
+/*
+ * Save parameters we are passed
+ */
+	mr	r31,r3
+	mr	r30,r4
+	mr	r29,r5
+	mr	r28,r6
+	mr	r27,r7
+	li	r24,0		/* CPU number */
+
+/*
+ * Set up the initial MMU state
+ *
+ * We are still executing code at the virtual address
+ * mappings set by the firmware for the base of RAM.
+ *
+ * We first invalidate all TLB entries but the one
+ * we are running from.  We then load the KERNELBASE
+ * mappings so we can begin to use kernel addresses
+ * natively and so the interrupt vector locations are
+ * permanently pinned (necessary since Book E
+ * implementations always have translation enabled).
+ *
+ * TODO: Use the known TLB entry we are running from to
+ *	 determine which physical region we are located
+ *	 in.  This can be used to determine where in RAM
+ *	 (on a shared CPU system) or PCI memory space
+ *	 (on a DRAMless system) we are located.
+ *       For now, we assume a perfect world which means
+ *	 we are located at the base of DRAM (physical 0).
+ */
+
+/*
+ * Search TLB for entry that we are currently using.
+ * Invalidate all entries but the one we are using.
+ */
+	/* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
+	mfspr	r3,SPRN_PID			/* Get PID */
+	mfmsr	r4				/* Get MSR */
+	andi.	r4,r4,MSR_IS@l			/* TS=1? */
+	beq	wmmucr				/* If not, leave STS=0 */
+	oris	r3,r3,PPC44x_MMUCR_STS@h	/* Set STS=1 */
+wmmucr:	mtspr	SPRN_MMUCR,r3			/* Put MMUCR */
+	sync
+
+	bl	invstr				/* Find our address */
+invstr:	mflr	r5				/* Make it accessible */
+	tlbsx	r23,0,r5			/* Find entry we are in */
+	li	r4,0				/* Start at TLB entry 0 */
+	li	r3,0				/* Set PAGEID inval value */
+1:	cmpw	r23,r4				/* Is this our entry? */
+	beq	skpinv				/* If so, skip the inval */
+	tlbwe	r3,r4,PPC44x_TLB_PAGEID		/* If not, inval the entry */
+skpinv:	addi	r4,r4,1				/* Increment */
+	cmpwi	r4,64				/* Are we done? */
+	bne	1b				/* If not, repeat */
+	isync					/* If so, context change */
+
+/*
+ * Configure and load pinned entry into TLB slot 63.
+ */
+
+	lis	r3,KERNELBASE@h		/* Load the kernel virtual address */
+	ori	r3,r3,KERNELBASE@l
+
+	/* Kernel is at the base of RAM */
+	li r4, 0			/* Load the kernel physical address */
+
+	/* Load the kernel PID = 0 */
+	li	r0,0
+	mtspr	SPRN_PID,r0
+	sync
+
+	/* Initialize MMUCR */
+	li	r5,0
+	mtspr	SPRN_MMUCR,r5
+	sync
+
+ 	/* pageid fields */
+	clrrwi	r3,r3,10		/* Mask off the effective page number */
+	ori	r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
+
+	/* xlat fields */
+	clrrwi	r4,r4,10		/* Mask off the real page number */
+					/* ERPN is 0 for first 4GB page */
+
+	/* attrib fields */
+	/* Added guarded bit to protect against speculative loads/stores */
+	li	r5,0
+	ori	r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
+
+        li      r0,63                    /* TLB slot 63 */
+
+	tlbwe	r3,r0,PPC44x_TLB_PAGEID	/* Load the pageid fields */
+	tlbwe	r4,r0,PPC44x_TLB_XLAT	/* Load the translation fields */
+	tlbwe	r5,r0,PPC44x_TLB_ATTRIB	/* Load the attrib/access fields */
+
+	/* Force context change */
+	mfmsr	r0
+	mtspr	SPRN_SRR1, r0
+	lis	r0,3f@h
+	ori	r0,r0,3f@l
+	mtspr	SPRN_SRR0,r0
+	sync
+	rfi
+
+	/* If necessary, invalidate original entry we used */
+3:	cmpwi	r23,63
+	beq	4f
+	li	r6,0
+	tlbwe   r6,r23,PPC44x_TLB_PAGEID
+	isync
+
+4:
+#ifdef CONFIG_SERIAL_TEXT_DEBUG
+	/*
+	 * Add temporary UART mapping for early debug.
+	 * We can map UART registers wherever we want as long as they don't
+	 * interfere with other system mappings (e.g. with pinned entries).
+	 * For an example of how we handle this - see ocotea.h.       --ebs
+	 */
+ 	/* pageid fields */
+	lis	r3,UART0_IO_BASE@h
+	ori	r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_4K
+
+	/* xlat fields */
+	lis	r4,UART0_PHYS_IO_BASE@h		/* RPN depends on SoC */
+#ifndef CONFIG_440EP
+	ori	r4,r4,0x0001		/* ERPN is 1 for second 4GB page */
+#endif
+
+	/* attrib fields */
+	li	r5,0
+	ori	r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_I | PPC44x_TLB_G)
+
+        li      r0,0                    /* TLB slot 0 */
+
+	tlbwe	r3,r0,PPC44x_TLB_PAGEID	/* Load the pageid fields */
+	tlbwe	r4,r0,PPC44x_TLB_XLAT	/* Load the translation fields */
+	tlbwe	r5,r0,PPC44x_TLB_ATTRIB	/* Load the attrib/access fields */
+
+	/* Force context change */
+	isync
+#endif /* CONFIG_SERIAL_TEXT_DEBUG */
+
+	/* Establish the interrupt vector offsets */
+	SET_IVOR(0,  CriticalInput);
+	SET_IVOR(1,  MachineCheck);
+	SET_IVOR(2,  DataStorage);
+	SET_IVOR(3,  InstructionStorage);
+	SET_IVOR(4,  ExternalInput);
+	SET_IVOR(5,  Alignment);
+	SET_IVOR(6,  Program);
+	SET_IVOR(7,  FloatingPointUnavailable);
+	SET_IVOR(8,  SystemCall);
+	SET_IVOR(9,  AuxillaryProcessorUnavailable);
+	SET_IVOR(10, Decrementer);
+	SET_IVOR(11, FixedIntervalTimer);
+	SET_IVOR(12, WatchdogTimer);
+	SET_IVOR(13, DataTLBError);
+	SET_IVOR(14, InstructionTLBError);
+	SET_IVOR(15, Debug);
+
+	/* Establish the interrupt vector base */
+	lis	r4,interrupt_base@h	/* IVPR only uses the high 16-bits */
+	mtspr	SPRN_IVPR,r4
+
+#ifdef CONFIG_440EP
+	/* Clear DAPUIB flag in CCR0 (enable APU between CPU and FPU) */
+	mfspr	r2,SPRN_CCR0
+	lis	r3,0xffef
+	ori	r3,r3,0xffff
+	and	r2,r2,r3
+	mtspr	SPRN_CCR0,r2
+	isync
+#endif
+
+	/*
+	 * This is where the main kernel code starts.
+	 */
+
+	/* ptr to current */
+	lis	r2,init_task@h
+	ori	r2,r2,init_task@l
+
+	/* ptr to current thread */
+	addi	r4,r2,THREAD	/* init task's THREAD */
+	mtspr	SPRN_SPRG3,r4
+
+	/* stack */
+	lis	r1,init_thread_union@h
+	ori	r1,r1,init_thread_union@l
+	li	r0,0
+	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
+
+	bl	early_init
+
+/*
+ * Decide what sort of machine this is and initialize the MMU.
+ */
+	mr	r3,r31
+	mr	r4,r30
+	mr	r5,r29
+	mr	r6,r28
+	mr	r7,r27
+	bl	machine_init
+	bl	MMU_init
+
+	/* Setup PTE pointers for the Abatron bdiGDB */
+	lis	r6, swapper_pg_dir@h
+	ori	r6, r6, swapper_pg_dir@l
+	lis	r5, abatron_pteptrs@h
+	ori	r5, r5, abatron_pteptrs@l
+	lis	r4, KERNELBASE@h
+	ori	r4, r4, KERNELBASE@l
+	stw	r5, 0(r4)	/* Save abatron_pteptrs at a fixed location */
+	stw	r6, 0(r5)
+
+	/* Let's move on */
+	lis	r4,start_kernel@h
+	ori	r4,r4,start_kernel@l
+	lis	r3,MSR_KERNEL@h
+	ori	r3,r3,MSR_KERNEL@l
+	mtspr	SPRN_SRR0,r4
+	mtspr	SPRN_SRR1,r3
+	rfi			/* change context and jump to start_kernel */
+
+/*
+ * Interrupt vector entry code
+ *
+ * The Book E MMUs are always on so we don't need to handle
+ * interrupts in real mode as with previous PPC processors. In
+ * this case we handle interrupts in the kernel virtual address
+ * space.
+ *
+ * Interrupt vectors are dynamically placed relative to the
+ * interrupt prefix as determined by the address of interrupt_base.
+ * The interrupt vectors offsets are programmed using the labels
+ * for each interrupt vector entry.
+ *
+ * Interrupt vectors must be aligned on a 16 byte boundary.
+ * We align on a 32 byte cache line boundary for good measure.
+ */
+
+interrupt_base:
+	/* Critical Input Interrupt */
+	CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException)
+
+	/* Machine Check Interrupt */
+#ifdef CONFIG_440A
+	MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
+#else
+	CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
+#endif
+
+	/* Data Storage Interrupt */
+	START_EXCEPTION(DataStorage)
+	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
+	mtspr	SPRN_SPRG1, r11
+	mtspr	SPRN_SPRG4W, r12
+	mtspr	SPRN_SPRG5W, r13
+	mfcr	r11
+	mtspr	SPRN_SPRG7W, r11
+
+	/*
+	 * Check if it was a store fault, if not then bail
+	 * because a user tried to access a kernel or
+	 * read-protected page.  Otherwise, get the
+	 * offending address and handle it.
+	 */
+	mfspr	r10, SPRN_ESR
+	andis.	r10, r10, ESR_ST@h
+	beq	2f
+
+	mfspr	r10, SPRN_DEAR		/* Get faulting address */
+
+	/* If we are faulting a kernel address, we have to use the
+	 * kernel page tables.
+	 */
+	lis	r11, TASK_SIZE@h
+	cmplw	r10, r11
+	blt+	3f
+	lis	r11, swapper_pg_dir@h
+	ori	r11, r11, swapper_pg_dir@l
+
+	mfspr   r12,SPRN_MMUCR
+	rlwinm	r12,r12,0,0,23		/* Clear TID */
+
+	b	4f
+
+	/* Get the PGD for the current thread */
+3:
+	mfspr	r11,SPRN_SPRG3
+	lwz	r11,PGDIR(r11)
+
+	/* Load PID into MMUCR TID */
+	mfspr	r12,SPRN_MMUCR		/* Get MMUCR */
+	mfspr   r13,SPRN_PID		/* Get PID */
+	rlwimi	r12,r13,0,24,31		/* Set TID */
+
+4:
+	mtspr   SPRN_MMUCR,r12
+
+	rlwinm  r12, r10, 13, 19, 29    /* Compute pgdir/pmd offset */
+	lwzx    r11, r12, r11           /* Get pgd/pmd entry */
+	rlwinm. r12, r11, 0, 0, 20      /* Extract pt base address */
+	beq     2f                      /* Bail if no table */
+
+	rlwimi  r12, r10, 23, 20, 28    /* Compute pte address */
+	lwz     r11, 4(r12)             /* Get pte entry */
+
+	andi.	r13, r11, _PAGE_RW	/* Is it writeable? */
+	beq	2f			/* Bail if not */
+
+	/* Update 'changed'.
+	*/
+	ori	r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
+	stw	r11, 4(r12)		/* Update Linux page table */
+
+	li	r13, PPC44x_TLB_SR@l	/* Set SR */
+	rlwimi	r13, r11, 29, 29, 29	/* SX = _PAGE_HWEXEC */
+	rlwimi	r13, r11, 0, 30, 30	/* SW = _PAGE_RW */
+	rlwimi	r13, r11, 29, 28, 28	/* UR = _PAGE_USER */
+	rlwimi	r12, r11, 31, 26, 26	/* (_PAGE_USER>>1)->r12 */
+	rlwimi	r12, r11, 29, 30, 30	/* (_PAGE_USER>>3)->r12 */
+	and	r12, r12, r11		/* HWEXEC/RW & USER */
+	rlwimi	r13, r12, 0, 26, 26	/* UX = HWEXEC & USER */
+	rlwimi	r13, r12, 3, 27, 27	/* UW = RW & USER */
+
+	rlwimi	r11,r13,0,26,31		/* Insert static perms */
+
+	rlwinm	r11,r11,0,20,15		/* Clear U0-U3 */
+
+	/* find the TLB index that caused the fault.  It has to be here. */
+	tlbsx	r10, 0, r10
+
+	tlbwe	r11, r10, PPC44x_TLB_ATTRIB	/* Write ATTRIB */
+
+	/* Done...restore registers and get out of here.
+	*/
+	mfspr	r11, SPRN_SPRG7R
+	mtcr	r11
+	mfspr	r13, SPRN_SPRG5R
+	mfspr	r12, SPRN_SPRG4R
+
+	mfspr	r11, SPRN_SPRG1
+	mfspr	r10, SPRN_SPRG0
+	rfi			/* Force context change */
+
+2:
+	/*
+	 * The bailout.  Restore registers to pre-exception conditions
+	 * and call the heavyweights to help us out.
+	 */
+	mfspr	r11, SPRN_SPRG7R
+	mtcr	r11
+	mfspr	r13, SPRN_SPRG5R
+	mfspr	r12, SPRN_SPRG4R
+
+	mfspr	r11, SPRN_SPRG1
+	mfspr	r10, SPRN_SPRG0
+	b	data_access
+
+	/* Instruction Storage Interrupt */
+	INSTRUCTION_STORAGE_EXCEPTION
+
+	/* External Input Interrupt */
+	EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
+
+	/* Alignment Interrupt */
+	ALIGNMENT_EXCEPTION
+
+	/* Program Interrupt */
+	PROGRAM_EXCEPTION
+
+	/* Floating Point Unavailable Interrupt */
+#ifdef CONFIG_PPC_FPU
+	FP_UNAVAILABLE_EXCEPTION
+#else
+	EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE)
+#endif
+
+	/* System Call Interrupt */
+	START_EXCEPTION(SystemCall)
+	NORMAL_EXCEPTION_PROLOG
+	EXC_XFER_EE_LITE(0x0c00, DoSyscall)
+
+	/* Auxillary Processor Unavailable Interrupt */
+	EXCEPTION(0x2020, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE)
+
+	/* Decrementer Interrupt */
+	DECREMENTER_EXCEPTION
+
+	/* Fixed Internal Timer Interrupt */
+	/* TODO: Add FIT support */
+	EXCEPTION(0x1010, FixedIntervalTimer, UnknownException, EXC_XFER_EE)
+
+	/* Watchdog Timer Interrupt */
+	/* TODO: Add watchdog support */
+#ifdef CONFIG_BOOKE_WDT
+	CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
+#else
+	CRITICAL_EXCEPTION(0x1020, WatchdogTimer, UnknownException)
+#endif
+
+	/* Data TLB Error Interrupt */
+	START_EXCEPTION(DataTLBError)
+	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
+	mtspr	SPRN_SPRG1, r11
+	mtspr	SPRN_SPRG4W, r12
+	mtspr	SPRN_SPRG5W, r13
+	mfcr	r11
+	mtspr	SPRN_SPRG7W, r11
+	mfspr	r10, SPRN_DEAR		/* Get faulting address */
+
+	/* If we are faulting a kernel address, we have to use the
+	 * kernel page tables.
+	 */
+	lis	r11, TASK_SIZE@h
+	cmplw	r10, r11
+	blt+	3f
+	lis	r11, swapper_pg_dir@h
+	ori	r11, r11, swapper_pg_dir@l
+
+	mfspr	r12,SPRN_MMUCR
+	rlwinm	r12,r12,0,0,23		/* Clear TID */
+
+	b	4f
+
+	/* Get the PGD for the current thread */
+3:
+	mfspr	r11,SPRN_SPRG3
+	lwz	r11,PGDIR(r11)
+
+	/* Load PID into MMUCR TID */
+	mfspr	r12,SPRN_MMUCR
+	mfspr   r13,SPRN_PID		/* Get PID */
+	rlwimi	r12,r13,0,24,31		/* Set TID */
+
+4:
+	mtspr	SPRN_MMUCR,r12
+
+	rlwinm 	r12, r10, 13, 19, 29	/* Compute pgdir/pmd offset */
+	lwzx	r11, r12, r11		/* Get pgd/pmd entry */
+	rlwinm.	r12, r11, 0, 0, 20	/* Extract pt base address */
+	beq	2f			/* Bail if no table */
+
+	rlwimi	r12, r10, 23, 20, 28	/* Compute pte address */
+	lwz	r11, 4(r12)		/* Get pte entry */
+	andi.	r13, r11, _PAGE_PRESENT	/* Is the page present? */
+	beq	2f			/* Bail if not present */
+
+	ori	r11, r11, _PAGE_ACCESSED
+	stw	r11, 4(r12)
+
+	 /* Jump to common tlb load */
+	b	finish_tlb_load
+
+2:
+	/* The bailout.  Restore registers to pre-exception conditions
+	 * and call the heavyweights to help us out.
+	 */
+	mfspr	r11, SPRN_SPRG7R
+	mtcr	r11
+	mfspr	r13, SPRN_SPRG5R
+	mfspr	r12, SPRN_SPRG4R
+	mfspr	r11, SPRN_SPRG1
+	mfspr	r10, SPRN_SPRG0
+	b	data_access
+
+	/* Instruction TLB Error Interrupt */
+	/*
+	 * Nearly the same as above, except we get our
+	 * information from different registers and bailout
+	 * to a different point.
+	 */
+	START_EXCEPTION(InstructionTLBError)
+	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
+	mtspr	SPRN_SPRG1, r11
+	mtspr	SPRN_SPRG4W, r12
+	mtspr	SPRN_SPRG5W, r13
+	mfcr	r11
+	mtspr	SPRN_SPRG7W, r11
+	mfspr	r10, SPRN_SRR0		/* Get faulting address */
+
+	/* If we are faulting a kernel address, we have to use the
+	 * kernel page tables.
+	 */
+	lis	r11, TASK_SIZE@h
+	cmplw	r10, r11
+	blt+	3f
+	lis	r11, swapper_pg_dir@h
+	ori	r11, r11, swapper_pg_dir@l
+
+	mfspr	r12,SPRN_MMUCR
+	rlwinm	r12,r12,0,0,23		/* Clear TID */
+
+	b	4f
+
+	/* Get the PGD for the current thread */
+3:
+	mfspr	r11,SPRN_SPRG3
+	lwz	r11,PGDIR(r11)
+
+	/* Load PID into MMUCR TID */
+	mfspr	r12,SPRN_MMUCR
+	mfspr   r13,SPRN_PID		/* Get PID */
+	rlwimi	r12,r13,0,24,31		/* Set TID */
+
+4:
+	mtspr	SPRN_MMUCR,r12
+
+	rlwinm	r12, r10, 13, 19, 29	/* Compute pgdir/pmd offset */
+	lwzx	r11, r12, r11		/* Get pgd/pmd entry */
+	rlwinm.	r12, r11, 0, 0, 20	/* Extract pt base address */
+	beq	2f			/* Bail if no table */
+
+	rlwimi	r12, r10, 23, 20, 28	/* Compute pte address */
+	lwz	r11, 4(r12)		/* Get pte entry */
+	andi.	r13, r11, _PAGE_PRESENT	/* Is the page present? */
+	beq	2f			/* Bail if not present */
+
+	ori	r11, r11, _PAGE_ACCESSED
+	stw	r11, 4(r12)
+
+	/* Jump to common TLB load point */
+	b	finish_tlb_load
+
+2:
+	/* The bailout.  Restore registers to pre-exception conditions
+	 * and call the heavyweights to help us out.
+	 */
+	mfspr	r11, SPRN_SPRG7R
+	mtcr	r11
+	mfspr	r13, SPRN_SPRG5R
+	mfspr	r12, SPRN_SPRG4R
+	mfspr	r11, SPRN_SPRG1
+	mfspr	r10, SPRN_SPRG0
+	b	InstructionStorage
+
+	/* Debug Interrupt */
+	DEBUG_EXCEPTION
+
+/*
+ * Local functions
+ */
+	/*
+	 * Data TLB exceptions will bail out to this point
+	 * if they can't resolve the lightweight TLB fault.
+	 */
+data_access:
+	NORMAL_EXCEPTION_PROLOG
+	mfspr	r5,SPRN_ESR		/* Grab the ESR, save it, pass arg3 */
+	stw	r5,_ESR(r11)
+	mfspr	r4,SPRN_DEAR		/* Grab the DEAR, save it, pass arg2 */
+	EXC_XFER_EE_LITE(0x0300, handle_page_fault)
+
+/*
+
+ * Both the instruction and data TLB miss get to this
+ * point to load the TLB.
+ * 	r10 - EA of fault
+ * 	r11 - available to use
+ *	r12 - Pointer to the 64-bit PTE
+ *	r13 - available to use
+ *	MMUCR - loaded with proper value when we get here
+ *	Upon exit, we reload everything and RFI.
+ */
+finish_tlb_load:
+	/*
+	 * We set execute, because we don't have the granularity to
+	 * properly set this at the page level (Linux problem).
+	 * If shared is set, we cause a zero PID->TID load.
+	 * Many of these bits are software only.  Bits we don't set
+	 * here we (properly should) assume have the appropriate value.
+	 */
+
+	/* Load the next available TLB index */
+	lis	r13, tlb_44x_index@ha
+	lwz	r13, tlb_44x_index@l(r13)
+	/* Load the TLB high watermark */
+	lis	r11, tlb_44x_hwater@ha
+	lwz	r11, tlb_44x_hwater@l(r11)
+
+	/* Increment, rollover, and store TLB index */
+	addi	r13, r13, 1
+	cmpw	0, r13, r11			/* reserve entries */
+	ble	7f
+	li	r13, 0
+7:
+	/* Store the next available TLB index */
+	lis	r11, tlb_44x_index@ha
+	stw	r13, tlb_44x_index@l(r11)
+
+	lwz	r11, 0(r12)			/* Get MS word of PTE */
+	lwz	r12, 4(r12)			/* Get LS word of PTE */
+	rlwimi	r11, r12, 0, 0 , 19		/* Insert RPN */
+	tlbwe	r11, r13, PPC44x_TLB_XLAT	/* Write XLAT */
+
+	/*
+	 * Create PAGEID. This is the faulting address,
+	 * page size, and valid flag.
+	 */
+	li	r11, PPC44x_TLB_VALID | PPC44x_TLB_4K
+	rlwimi	r10, r11, 0, 20, 31		/* Insert valid and page size */
+	tlbwe	r10, r13, PPC44x_TLB_PAGEID	/* Write PAGEID */
+
+	li	r10, PPC44x_TLB_SR@l		/* Set SR */
+	rlwimi	r10, r12, 0, 30, 30		/* Set SW = _PAGE_RW */
+	rlwimi	r10, r12, 29, 29, 29		/* SX = _PAGE_HWEXEC */
+	rlwimi	r10, r12, 29, 28, 28		/* UR = _PAGE_USER */
+	rlwimi	r11, r12, 31, 26, 26		/* (_PAGE_USER>>1)->r12 */
+	and	r11, r12, r11			/* HWEXEC & USER */
+	rlwimi	r10, r11, 0, 26, 26		/* UX = HWEXEC & USER */
+
+	rlwimi	r12, r10, 0, 26, 31		/* Insert static perms */
+	rlwinm	r12, r12, 0, 20, 15		/* Clear U0-U3 */
+	tlbwe	r12, r13, PPC44x_TLB_ATTRIB	/* Write ATTRIB */
+
+	/* Done...restore registers and get out of here.
+	*/
+	mfspr	r11, SPRN_SPRG7R
+	mtcr	r11
+	mfspr	r13, SPRN_SPRG5R
+	mfspr	r12, SPRN_SPRG4R
+	mfspr	r11, SPRN_SPRG1
+	mfspr	r10, SPRN_SPRG0
+	rfi					/* Force context change */
+
+/*
+ * Global functions
+ */
+
+/*
+ * extern void giveup_altivec(struct task_struct *prev)
+ *
+ * The 44x core does not have an AltiVec unit.
+ */
+_GLOBAL(giveup_altivec)
+	blr
+
+/*
+ * extern void giveup_fpu(struct task_struct *prev)
+ *
+ * The 44x core does not have an FPU.
+ */
+#ifndef CONFIG_PPC_FPU
+_GLOBAL(giveup_fpu)
+	blr
+#endif
+
+/*
+ * extern void abort(void)
+ *
+ * At present, this routine just applies a system reset.
+ */
+_GLOBAL(abort)
+        mfspr   r13,SPRN_DBCR0
+        oris    r13,r13,DBCR0_RST_SYSTEM@h
+        mtspr   SPRN_DBCR0,r13
+
+_GLOBAL(set_context)
+
+#ifdef CONFIG_BDI_SWITCH
+	/* Context switch the PTE pointer for the Abatron BDI2000.
+	 * The PGDIR is the second parameter.
+	 */
+	lis	r5, abatron_pteptrs@h
+	ori	r5, r5, abatron_pteptrs@l
+	stw	r4, 0x4(r5)
+#endif
+	mtspr	SPRN_PID,r3
+	isync			/* Force context change */
+	blr
+
+/*
+ * We put a few things here that have to be page-aligned. This stuff
+ * goes at the beginning of the data segment, which is page-aligned.
+ */
+	.data
+_GLOBAL(sdata)
+_GLOBAL(empty_zero_page)
+	.space	4096
+
+/*
+ * To support >32-bit physical addresses, we use an 8KB pgdir.
+ */
+_GLOBAL(swapper_pg_dir)
+	.space	8192
+
+/* Reserved 4k for the critical exception stack & 4k for the machine
+ * check stack per CPU for kernel mode exceptions */
+	.section .bss
+        .align 12
+exception_stack_bottom:
+	.space	BOOKE_EXCEPTION_STACK_SIZE
+_GLOBAL(exception_stack_top)
+
+/*
+ * This space gets a copy of optional info passed to us by the bootstrap
+ * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
+ */
+_GLOBAL(cmd_line)
+	.space	512
+
+/*
+ * Room for two PTE pointers, usually the kernel and current user pointers
+ * to their respective root page table.
+ */
+abatron_pteptrs:
+	.space	8
+
+
diff --git a/arch/powerpc/kernel/head_4xx.S b/arch/powerpc/kernel/head_4xx.S
new file mode 100644
index 0000000..8562b80
--- /dev/null
+++ b/arch/powerpc/kernel/head_4xx.S
@@ -0,0 +1,1016 @@
+/*
+ *    Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
+ *      Initial PowerPC version.
+ *    Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
+ *      Rewritten for PReP
+ *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
+ *      Low-level exception handers, MMU support, and rewrite.
+ *    Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
+ *      PowerPC 8xx modifications.
+ *    Copyright (c) 1998-1999 TiVo, Inc.
+ *      PowerPC 403GCX modifications.
+ *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
+ *      PowerPC 403GCX/405GP modifications.
+ *    Copyright 2000 MontaVista Software Inc.
+ *	PPC405 modifications
+ *      PowerPC 403GCX/405GP modifications.
+ * 	Author: MontaVista Software, Inc.
+ *         	frank_rowand@mvista.com or source@mvista.com
+ * 	   	debbie_chu@mvista.com
+ *
+ *
+ *    Module name: head_4xx.S
+ *
+ *    Description:
+ *      Kernel execution entry point code.
+ *
+ *    This program is free software; you can redistribute it and/or
+ *    modify it under the terms of the GNU General Public License
+ *    as published by the Free Software Foundation; either version
+ *    2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/ibm4xx.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+/* As with the other PowerPC ports, it is expected that when code
+ * execution begins here, the following registers contain valid, yet
+ * optional, information:
+ *
+ *   r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
+ *   r4 - Starting address of the init RAM disk
+ *   r5 - Ending address of the init RAM disk
+ *   r6 - Start of kernel command line string (e.g. "mem=96m")
+ *   r7 - End of kernel command line string
+ *
+ * This is all going to change RSN when we add bi_recs.......  -- Dan
+ */
+	.text
+_GLOBAL(_stext)
+_GLOBAL(_start)
+
+	/* Save parameters we are passed.
+	*/
+	mr	r31,r3
+	mr	r30,r4
+	mr	r29,r5
+	mr	r28,r6
+	mr	r27,r7
+
+	/* We have to turn on the MMU right away so we get cache modes
+	 * set correctly.
+	 */
+	bl	initial_mmu
+
+/* We now have the lower 16 Meg mapped into TLB entries, and the caches
+ * ready to work.
+ */
+turn_on_mmu:
+	lis	r0,MSR_KERNEL@h
+	ori	r0,r0,MSR_KERNEL@l
+	mtspr	SPRN_SRR1,r0
+	lis	r0,start_here@h
+	ori	r0,r0,start_here@l
+	mtspr	SPRN_SRR0,r0
+	SYNC
+	rfi				/* enables MMU */
+	b	.			/* prevent prefetch past rfi */
+
+/*
+ * This area is used for temporarily saving registers during the
+ * critical exception prolog.
+ */
+	. = 0xc0
+crit_save:
+_GLOBAL(crit_r10)
+	.space	4
+_GLOBAL(crit_r11)
+	.space	4
+
+/*
+ * Exception vector entry code. This code runs with address translation
+ * turned off (i.e. using physical addresses). We assume SPRG3 has the
+ * physical address of the current task thread_struct.
+ * Note that we have to have decremented r1 before we write to any fields
+ * of the exception frame, since a critical interrupt could occur at any
+ * time, and it will write to the area immediately below the current r1.
+ */
+#define NORMAL_EXCEPTION_PROLOG						     \
+	mtspr	SPRN_SPRG0,r10;		/* save two registers to work with */\
+	mtspr	SPRN_SPRG1,r11;						     \
+	mtspr	SPRN_SPRG2,r1;						     \
+	mfcr	r10;			/* save CR in r10 for now	   */\
+	mfspr	r11,SPRN_SRR1;		/* check whether user or kernel    */\
+	andi.	r11,r11,MSR_PR;						     \
+	beq	1f;							     \
+	mfspr	r1,SPRN_SPRG3;		/* if from user, start at top of   */\
+	lwz	r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack   */\
+	addi	r1,r1,THREAD_SIZE;					     \
+1:	subi	r1,r1,INT_FRAME_SIZE;	/* Allocate an exception frame     */\
+	tophys(r11,r1);							     \
+	stw	r10,_CCR(r11);          /* save various registers	   */\
+	stw	r12,GPR12(r11);						     \
+	stw	r9,GPR9(r11);						     \
+	mfspr	r10,SPRN_SPRG0;						     \
+	stw	r10,GPR10(r11);						     \
+	mfspr	r12,SPRN_SPRG1;						     \
+	stw	r12,GPR11(r11);						     \
+	mflr	r10;							     \
+	stw	r10,_LINK(r11);						     \
+	mfspr	r10,SPRN_SPRG2;						     \
+	mfspr	r12,SPRN_SRR0;						     \
+	stw	r10,GPR1(r11);						     \
+	mfspr	r9,SPRN_SRR1;						     \
+	stw	r10,0(r11);						     \
+	rlwinm	r9,r9,0,14,12;		/* clear MSR_WE (necessary?)	   */\
+	stw	r0,GPR0(r11);						     \
+	SAVE_4GPRS(3, r11);						     \
+	SAVE_2GPRS(7, r11)
+
+/*
+ * Exception prolog for critical exceptions.  This is a little different
+ * from the normal exception prolog above since a critical exception
+ * can potentially occur at any point during normal exception processing.
+ * Thus we cannot use the same SPRG registers as the normal prolog above.
+ * Instead we use a couple of words of memory at low physical addresses.
+ * This is OK since we don't support SMP on these processors.
+ */
+#define CRITICAL_EXCEPTION_PROLOG					     \
+	stw	r10,crit_r10@l(0);	/* save two registers to work with */\
+	stw	r11,crit_r11@l(0);					     \
+	mfcr	r10;			/* save CR in r10 for now	   */\
+	mfspr	r11,SPRN_SRR3;		/* check whether user or kernel    */\
+	andi.	r11,r11,MSR_PR;						     \
+	lis	r11,critical_stack_top@h;				     \
+	ori	r11,r11,critical_stack_top@l;				     \
+	beq	1f;							     \
+	/* COMING FROM USER MODE */					     \
+	mfspr	r11,SPRN_SPRG3;		/* if from user, start at top of   */\
+	lwz	r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
+	addi	r11,r11,THREAD_SIZE;					     \
+1:	subi	r11,r11,INT_FRAME_SIZE;	/* Allocate an exception frame     */\
+	tophys(r11,r11);						     \
+	stw	r10,_CCR(r11);          /* save various registers	   */\
+	stw	r12,GPR12(r11);						     \
+	stw	r9,GPR9(r11);						     \
+	mflr	r10;							     \
+	stw	r10,_LINK(r11);						     \
+	mfspr	r12,SPRN_DEAR;		/* save DEAR and ESR in the frame  */\
+	stw	r12,_DEAR(r11);		/* since they may have had stuff   */\
+	mfspr	r9,SPRN_ESR;		/* in them at the point where the  */\
+	stw	r9,_ESR(r11);		/* exception was taken		   */\
+	mfspr	r12,SPRN_SRR2;						     \
+	stw	r1,GPR1(r11);						     \
+	mfspr	r9,SPRN_SRR3;						     \
+	stw	r1,0(r11);						     \
+	tovirt(r1,r11);							     \
+	rlwinm	r9,r9,0,14,12;		/* clear MSR_WE (necessary?)	   */\
+	stw	r0,GPR0(r11);						     \
+	SAVE_4GPRS(3, r11);						     \
+	SAVE_2GPRS(7, r11)
+
+	/*
+	 * State at this point:
+	 * r9 saved in stack frame, now saved SRR3 & ~MSR_WE
+	 * r10 saved in crit_r10 and in stack frame, trashed
+	 * r11 saved in crit_r11 and in stack frame,
+	 *	now phys stack/exception frame pointer
+	 * r12 saved in stack frame, now saved SRR2
+	 * CR saved in stack frame, CR0.EQ = !SRR3.PR
+	 * LR, DEAR, ESR in stack frame
+	 * r1 saved in stack frame, now virt stack/excframe pointer
+	 * r0, r3-r8 saved in stack frame
+	 */
+
+/*
+ * Exception vectors.
+ */
+#define	START_EXCEPTION(n, label)					     \
+	. = n;								     \
+label:
+
+#define EXCEPTION(n, label, hdlr, xfer)				\
+	START_EXCEPTION(n, label);				\
+	NORMAL_EXCEPTION_PROLOG;				\
+	addi	r3,r1,STACK_FRAME_OVERHEAD;			\
+	xfer(n, hdlr)
+
+#define CRITICAL_EXCEPTION(n, label, hdlr)			\
+	START_EXCEPTION(n, label);				\
+	CRITICAL_EXCEPTION_PROLOG;				\
+	addi	r3,r1,STACK_FRAME_OVERHEAD;			\
+	EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
+			  NOCOPY, crit_transfer_to_handler,	\
+			  ret_from_crit_exc)
+
+#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret)	\
+	li	r10,trap;					\
+	stw	r10,TRAP(r11);					\
+	lis	r10,msr@h;					\
+	ori	r10,r10,msr@l;					\
+	copyee(r10, r9);					\
+	bl	tfer;		 				\
+	.long	hdlr;						\
+	.long	ret
+
+#define COPY_EE(d, s)		rlwimi d,s,0,16,16
+#define NOCOPY(d, s)
+
+#define EXC_XFER_STD(n, hdlr)		\
+	EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \
+			  ret_from_except_full)
+
+#define EXC_XFER_LITE(n, hdlr)		\
+	EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
+			  ret_from_except)
+
+#define EXC_XFER_EE(n, hdlr)		\
+	EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \
+			  ret_from_except_full)
+
+#define EXC_XFER_EE_LITE(n, hdlr)	\
+	EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \
+			  ret_from_except)
+
+
+/*
+ * 0x0100 - Critical Interrupt Exception
+ */
+	CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, UnknownException)
+
+/*
+ * 0x0200 - Machine Check Exception
+ */
+	CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
+
+/*
+ * 0x0300 - Data Storage Exception
+ * This happens for just a few reasons.  U0 set (but we don't do that),
+ * or zone protection fault (user violation, write to protected page).
+ * If this is just an update of modified status, we do that quickly
+ * and exit.  Otherwise, we call heavywight functions to do the work.
+ */
+	START_EXCEPTION(0x0300,	DataStorage)
+	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
+	mtspr	SPRN_SPRG1, r11
+#ifdef CONFIG_403GCX
+	stw     r12, 0(r0)
+	stw     r9, 4(r0)
+	mfcr    r11
+	mfspr   r12, SPRN_PID
+	stw     r11, 8(r0)
+	stw     r12, 12(r0)
+#else
+	mtspr	SPRN_SPRG4, r12
+	mtspr	SPRN_SPRG5, r9
+	mfcr	r11
+	mfspr	r12, SPRN_PID
+	mtspr	SPRN_SPRG7, r11
+	mtspr	SPRN_SPRG6, r12
+#endif
+
+	/* First, check if it was a zone fault (which means a user
+	* tried to access a kernel or read-protected page - always
+	* a SEGV).  All other faults here must be stores, so no
+	* need to check ESR_DST as well. */
+	mfspr	r10, SPRN_ESR
+	andis.	r10, r10, ESR_DIZ@h
+	bne	2f
+
+	mfspr	r10, SPRN_DEAR		/* Get faulting address */
+
+	/* If we are faulting a kernel address, we have to use the
+	 * kernel page tables.
+	 */
+	lis	r11, TASK_SIZE@h
+	cmplw	r10, r11
+	blt+	3f
+	lis	r11, swapper_pg_dir@h
+	ori	r11, r11, swapper_pg_dir@l
+	li	r9, 0
+	mtspr	SPRN_PID, r9		/* TLB will have 0 TID */
+	b	4f
+
+	/* Get the PGD for the current thread.
+	 */
+3:
+	mfspr	r11,SPRN_SPRG3
+	lwz	r11,PGDIR(r11)
+4:
+	tophys(r11, r11)
+	rlwimi	r11, r10, 12, 20, 29	/* Create L1 (pgdir/pmd) address */
+	lwz	r11, 0(r11)		/* Get L1 entry */
+	rlwinm.	r12, r11, 0, 0, 19	/* Extract L2 (pte) base address */
+	beq	2f			/* Bail if no table */
+
+	rlwimi	r12, r10, 22, 20, 29	/* Compute PTE address */
+	lwz	r11, 0(r12)		/* Get Linux PTE */
+
+	andi.	r9, r11, _PAGE_RW	/* Is it writeable? */
+	beq	2f			/* Bail if not */
+
+	/* Update 'changed'.
+	*/
+	ori	r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
+	stw	r11, 0(r12)		/* Update Linux page table */
+
+	/* Most of the Linux PTE is ready to load into the TLB LO.
+	 * We set ZSEL, where only the LS-bit determines user access.
+	 * We set execute, because we don't have the granularity to
+	 * properly set this at the page level (Linux problem).
+	 * If shared is set, we cause a zero PID->TID load.
+	 * Many of these bits are software only.  Bits we don't set
+	 * here we (properly should) assume have the appropriate value.
+	 */
+	li	r12, 0x0ce2
+	andc	r11, r11, r12		/* Make sure 20, 21 are zero */
+
+	/* find the TLB index that caused the fault.  It has to be here.
+	*/
+	tlbsx	r9, 0, r10
+
+	tlbwe	r11, r9, TLB_DATA		/* Load TLB LO */
+
+	/* Done...restore registers and get out of here.
+	*/
+#ifdef CONFIG_403GCX
+	lwz     r12, 12(r0)
+	lwz     r11, 8(r0)
+	mtspr   SPRN_PID, r12
+	mtcr    r11
+	lwz     r9, 4(r0)
+	lwz     r12, 0(r0)
+#else
+	mfspr	r12, SPRN_SPRG6
+	mfspr	r11, SPRN_SPRG7
+	mtspr	SPRN_PID, r12
+	mtcr	r11
+	mfspr	r9, SPRN_SPRG5
+	mfspr	r12, SPRN_SPRG4
+#endif
+	mfspr	r11, SPRN_SPRG1
+	mfspr	r10, SPRN_SPRG0
+	PPC405_ERR77_SYNC
+	rfi			/* Should sync shadow TLBs */
+	b	.		/* prevent prefetch past rfi */
+
+2:
+	/* The bailout.  Restore registers to pre-exception conditions
+	 * and call the heavyweights to help us out.
+	 */
+#ifdef CONFIG_403GCX
+	lwz     r12, 12(r0)
+	lwz     r11, 8(r0)
+	mtspr   SPRN_PID, r12
+	mtcr    r11
+	lwz     r9, 4(r0)
+	lwz     r12, 0(r0)
+#else
+	mfspr	r12, SPRN_SPRG6
+	mfspr	r11, SPRN_SPRG7
+	mtspr	SPRN_PID, r12
+	mtcr	r11
+	mfspr	r9, SPRN_SPRG5
+	mfspr	r12, SPRN_SPRG4
+#endif
+	mfspr	r11, SPRN_SPRG1
+	mfspr	r10, SPRN_SPRG0
+	b	DataAccess
+
+/*
+ * 0x0400 - Instruction Storage Exception
+ * This is caused by a fetch from non-execute or guarded pages.
+ */
+	START_EXCEPTION(0x0400, InstructionAccess)
+	NORMAL_EXCEPTION_PROLOG
+	mr	r4,r12			/* Pass SRR0 as arg2 */
+	li	r5,0			/* Pass zero as arg3 */
+	EXC_XFER_EE_LITE(0x400, handle_page_fault)
+
+/* 0x0500 - External Interrupt Exception */
+	EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
+
+/* 0x0600 - Alignment Exception */
+	START_EXCEPTION(0x0600, Alignment)
+	NORMAL_EXCEPTION_PROLOG
+	mfspr	r4,SPRN_DEAR		/* Grab the DEAR and save it */
+	stw	r4,_DEAR(r11)
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	EXC_XFER_EE(0x600, AlignmentException)
+
+/* 0x0700 - Program Exception */
+	START_EXCEPTION(0x0700, ProgramCheck)
+	NORMAL_EXCEPTION_PROLOG
+	mfspr	r4,SPRN_ESR		/* Grab the ESR and save it */
+	stw	r4,_ESR(r11)
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	EXC_XFER_STD(0x700, ProgramCheckException)
+
+	EXCEPTION(0x0800, Trap_08, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x0900, Trap_09, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x0A00, Trap_0A, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x0B00, Trap_0B, UnknownException, EXC_XFER_EE)
+
+/* 0x0C00 - System Call Exception */
+	START_EXCEPTION(0x0C00,	SystemCall)
+	NORMAL_EXCEPTION_PROLOG
+	EXC_XFER_EE_LITE(0xc00, DoSyscall)
+
+	EXCEPTION(0x0D00, Trap_0D, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x0E00, Trap_0E, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x0F00, Trap_0F, UnknownException, EXC_XFER_EE)
+
+/* 0x1000 - Programmable Interval Timer (PIT) Exception */
+	START_EXCEPTION(0x1000, Decrementer)
+	NORMAL_EXCEPTION_PROLOG
+	lis	r0,TSR_PIS@h
+	mtspr	SPRN_TSR,r0		/* Clear the PIT exception */
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	EXC_XFER_LITE(0x1000, timer_interrupt)
+
+#if 0
+/* NOTE:
+ * FIT and WDT handlers are not implemented yet.
+ */
+
+/* 0x1010 - Fixed Interval Timer (FIT) Exception
+*/
+	STND_EXCEPTION(0x1010,	FITException,		UnknownException)
+
+/* 0x1020 - Watchdog Timer (WDT) Exception
+*/
+#ifdef CONFIG_BOOKE_WDT
+	CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException)
+#else
+	CRITICAL_EXCEPTION(0x1020, WDTException, UnknownException)
+#endif
+#endif
+
+/* 0x1100 - Data TLB Miss Exception
+ * As the name implies, translation is not in the MMU, so search the
+ * page tables and fix it.  The only purpose of this function is to
+ * load TLB entries from the page table if they exist.
+ */
+	START_EXCEPTION(0x1100,	DTLBMiss)
+	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
+	mtspr	SPRN_SPRG1, r11
+#ifdef CONFIG_403GCX
+	stw     r12, 0(r0)
+	stw     r9, 4(r0)
+	mfcr    r11
+	mfspr   r12, SPRN_PID
+	stw     r11, 8(r0)
+	stw     r12, 12(r0)
+#else
+	mtspr	SPRN_SPRG4, r12
+	mtspr	SPRN_SPRG5, r9
+	mfcr	r11
+	mfspr	r12, SPRN_PID
+	mtspr	SPRN_SPRG7, r11
+	mtspr	SPRN_SPRG6, r12
+#endif
+	mfspr	r10, SPRN_DEAR		/* Get faulting address */
+
+	/* If we are faulting a kernel address, we have to use the
+	 * kernel page tables.
+	 */
+	lis	r11, TASK_SIZE@h
+	cmplw	r10, r11
+	blt+	3f
+	lis	r11, swapper_pg_dir@h
+	ori	r11, r11, swapper_pg_dir@l
+	li	r9, 0
+	mtspr	SPRN_PID, r9		/* TLB will have 0 TID */
+	b	4f
+
+	/* Get the PGD for the current thread.
+	 */
+3:
+	mfspr	r11,SPRN_SPRG3
+	lwz	r11,PGDIR(r11)
+4:
+	tophys(r11, r11)
+	rlwimi	r11, r10, 12, 20, 29	/* Create L1 (pgdir/pmd) address */
+	lwz	r12, 0(r11)		/* Get L1 entry */
+	andi.	r9, r12, _PMD_PRESENT	/* Check if it points to a PTE page */
+	beq	2f			/* Bail if no table */
+
+	rlwimi	r12, r10, 22, 20, 29	/* Compute PTE address */
+	lwz	r11, 0(r12)		/* Get Linux PTE */
+	andi.	r9, r11, _PAGE_PRESENT
+	beq	5f
+
+	ori	r11, r11, _PAGE_ACCESSED
+	stw	r11, 0(r12)
+
+	/* Create TLB tag.  This is the faulting address plus a static
+	 * set of bits.  These are size, valid, E, U0.
+	*/
+	li	r12, 0x00c0
+	rlwimi	r10, r12, 0, 20, 31
+
+	b	finish_tlb_load
+
+2:	/* Check for possible large-page pmd entry */
+	rlwinm.	r9, r12, 2, 22, 24
+	beq	5f
+
+	/* Create TLB tag.  This is the faulting address, plus a static
+	 * set of bits (valid, E, U0) plus the size from the PMD.
+	 */
+	ori	r9, r9, 0x40
+	rlwimi	r10, r9, 0, 20, 31
+	mr	r11, r12
+
+	b	finish_tlb_load
+
+5:
+	/* The bailout.  Restore registers to pre-exception conditions
+	 * and call the heavyweights to help us out.
+	 */
+#ifdef CONFIG_403GCX
+	lwz     r12, 12(r0)
+	lwz     r11, 8(r0)
+	mtspr   SPRN_PID, r12
+	mtcr    r11
+	lwz     r9, 4(r0)
+	lwz     r12, 0(r0)
+#else
+	mfspr	r12, SPRN_SPRG6
+	mfspr	r11, SPRN_SPRG7
+	mtspr	SPRN_PID, r12
+	mtcr	r11
+	mfspr	r9, SPRN_SPRG5
+	mfspr	r12, SPRN_SPRG4
+#endif
+	mfspr	r11, SPRN_SPRG1
+	mfspr	r10, SPRN_SPRG0
+	b	DataAccess
+
+/* 0x1200 - Instruction TLB Miss Exception
+ * Nearly the same as above, except we get our information from different
+ * registers and bailout to a different point.
+ */
+	START_EXCEPTION(0x1200,	ITLBMiss)
+	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
+	mtspr	SPRN_SPRG1, r11
+#ifdef CONFIG_403GCX
+	stw     r12, 0(r0)
+	stw     r9, 4(r0)
+	mfcr    r11
+	mfspr   r12, SPRN_PID
+	stw     r11, 8(r0)
+	stw     r12, 12(r0)
+#else
+	mtspr	SPRN_SPRG4, r12
+	mtspr	SPRN_SPRG5, r9
+	mfcr	r11
+	mfspr	r12, SPRN_PID
+	mtspr	SPRN_SPRG7, r11
+	mtspr	SPRN_SPRG6, r12
+#endif
+	mfspr	r10, SPRN_SRR0		/* Get faulting address */
+
+	/* If we are faulting a kernel address, we have to use the
+	 * kernel page tables.
+	 */
+	lis	r11, TASK_SIZE@h
+	cmplw	r10, r11
+	blt+	3f
+	lis	r11, swapper_pg_dir@h
+	ori	r11, r11, swapper_pg_dir@l
+	li	r9, 0
+	mtspr	SPRN_PID, r9		/* TLB will have 0 TID */
+	b	4f
+
+	/* Get the PGD for the current thread.
+	 */
+3:
+	mfspr	r11,SPRN_SPRG3
+	lwz	r11,PGDIR(r11)
+4:
+	tophys(r11, r11)
+	rlwimi	r11, r10, 12, 20, 29	/* Create L1 (pgdir/pmd) address */
+	lwz	r12, 0(r11)		/* Get L1 entry */
+	andi.	r9, r12, _PMD_PRESENT	/* Check if it points to a PTE page */
+	beq	2f			/* Bail if no table */
+
+	rlwimi	r12, r10, 22, 20, 29	/* Compute PTE address */
+	lwz	r11, 0(r12)		/* Get Linux PTE */
+	andi.	r9, r11, _PAGE_PRESENT
+	beq	5f
+
+	ori	r11, r11, _PAGE_ACCESSED
+	stw	r11, 0(r12)
+
+	/* Create TLB tag.  This is the faulting address plus a static
+	 * set of bits.  These are size, valid, E, U0.
+	*/
+	li	r12, 0x00c0
+	rlwimi	r10, r12, 0, 20, 31
+
+	b	finish_tlb_load
+
+2:	/* Check for possible large-page pmd entry */
+	rlwinm.	r9, r12, 2, 22, 24
+	beq	5f
+
+	/* Create TLB tag.  This is the faulting address, plus a static
+	 * set of bits (valid, E, U0) plus the size from the PMD.
+	 */
+	ori	r9, r9, 0x40
+	rlwimi	r10, r9, 0, 20, 31
+	mr	r11, r12
+
+	b	finish_tlb_load
+
+5:
+	/* The bailout.  Restore registers to pre-exception conditions
+	 * and call the heavyweights to help us out.
+	 */
+#ifdef CONFIG_403GCX
+	lwz     r12, 12(r0)
+	lwz     r11, 8(r0)
+	mtspr   SPRN_PID, r12
+	mtcr    r11
+	lwz     r9, 4(r0)
+	lwz     r12, 0(r0)
+#else
+	mfspr	r12, SPRN_SPRG6
+	mfspr	r11, SPRN_SPRG7
+	mtspr	SPRN_PID, r12
+	mtcr	r11
+	mfspr	r9, SPRN_SPRG5
+	mfspr	r12, SPRN_SPRG4
+#endif
+	mfspr	r11, SPRN_SPRG1
+	mfspr	r10, SPRN_SPRG0
+	b	InstructionAccess
+
+	EXCEPTION(0x1300, Trap_13, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1400, Trap_14, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE)
+#ifdef CONFIG_IBM405_ERR51
+	/* 405GP errata 51 */
+	START_EXCEPTION(0x1700, Trap_17)
+	b DTLBMiss
+#else
+	EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE)
+#endif
+	EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1A00, Trap_1A, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1B00, Trap_1B, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1C00, Trap_1C, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1D00, Trap_1D, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1E00, Trap_1E, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1F00, Trap_1F, UnknownException, EXC_XFER_EE)
+
+/* Check for a single step debug exception while in an exception
+ * handler before state has been saved.  This is to catch the case
+ * where an instruction that we are trying to single step causes
+ * an exception (eg ITLB/DTLB miss) and thus the first instruction of
+ * the exception handler generates a single step debug exception.
+ *
+ * If we get a debug trap on the first instruction of an exception handler,
+ * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is
+ * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR).
+ * The exception handler was handling a non-critical interrupt, so it will
+ * save (and later restore) the MSR via SPRN_SRR1, which will still have
+ * the MSR_DE bit set.
+ */
+	/* 0x2000 - Debug Exception */
+	START_EXCEPTION(0x2000, DebugTrap)
+	CRITICAL_EXCEPTION_PROLOG
+
+	/*
+	 * If this is a single step or branch-taken exception in an
+	 * exception entry sequence, it was probably meant to apply to
+	 * the code where the exception occurred (since exception entry
+	 * doesn't turn off DE automatically).  We simulate the effect
+	 * of turning off DE on entry to an exception handler by turning
+	 * off DE in the SRR3 value and clearing the debug status.
+	 */
+	mfspr	r10,SPRN_DBSR		/* check single-step/branch taken */
+	andis.	r10,r10,DBSR_IC@h
+	beq+	2f
+
+	andi.	r10,r9,MSR_IR|MSR_PR	/* check supervisor + MMU off */
+	beq	1f			/* branch and fix it up */
+
+	mfspr   r10,SPRN_SRR2		/* Faulting instruction address */
+	cmplwi  r10,0x2100
+	bgt+    2f			/* address above exception vectors */
+
+	/* here it looks like we got an inappropriate debug exception. */
+1:	rlwinm	r9,r9,0,~MSR_DE		/* clear DE in the SRR3 value */
+	lis	r10,DBSR_IC@h		/* clear the IC event */
+	mtspr	SPRN_DBSR,r10
+	/* restore state and get out */
+	lwz	r10,_CCR(r11)
+	lwz	r0,GPR0(r11)
+	lwz	r1,GPR1(r11)
+	mtcrf	0x80,r10
+	mtspr	SPRN_SRR2,r12
+	mtspr	SPRN_SRR3,r9
+	lwz	r9,GPR9(r11)
+	lwz	r12,GPR12(r11)
+	lwz	r10,crit_r10@l(0)
+	lwz	r11,crit_r11@l(0)
+	PPC405_ERR77_SYNC
+	rfci
+	b	.
+
+	/* continue normal handling for a critical exception... */
+2:	mfspr	r4,SPRN_DBSR
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	EXC_XFER_TEMPLATE(DebugException, 0x2002, \
+		(MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
+		NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
+
+/*
+ * The other Data TLB exceptions bail out to this point
+ * if they can't resolve the lightweight TLB fault.
+ */
+DataAccess:
+	NORMAL_EXCEPTION_PROLOG
+	mfspr	r5,SPRN_ESR		/* Grab the ESR, save it, pass arg3 */
+	stw	r5,_ESR(r11)
+	mfspr	r4,SPRN_DEAR		/* Grab the DEAR, save it, pass arg2 */
+	EXC_XFER_EE_LITE(0x300, handle_page_fault)
+
+/* Other PowerPC processors, namely those derived from the 6xx-series
+ * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved.
+ * However, for the 4xx-series processors these are neither defined nor
+ * reserved.
+ */
+
+	/* Damn, I came up one instruction too many to fit into the
+	 * exception space :-).  Both the instruction and data TLB
+	 * miss get to this point to load the TLB.
+	 * 	r10 - TLB_TAG value
+	 * 	r11 - Linux PTE
+	 *	r12, r9 - avilable to use
+	 *	PID - loaded with proper value when we get here
+	 *	Upon exit, we reload everything and RFI.
+	 * Actually, it will fit now, but oh well.....a common place
+	 * to load the TLB.
+	 */
+tlb_4xx_index:
+	.long	0
+finish_tlb_load:
+	/* load the next available TLB index.
+	*/
+	lwz	r9, tlb_4xx_index@l(0)
+	addi	r9, r9, 1
+	andi.	r9, r9, (PPC4XX_TLB_SIZE-1)
+	stw	r9, tlb_4xx_index@l(0)
+
+6:
+	/*
+	 * Clear out the software-only bits in the PTE to generate the
+	 * TLB_DATA value.  These are the bottom 2 bits of the RPM, the
+	 * top 3 bits of the zone field, and M.
+	 */
+	li	r12, 0x0ce2
+	andc	r11, r11, r12
+
+	tlbwe	r11, r9, TLB_DATA		/* Load TLB LO */
+	tlbwe	r10, r9, TLB_TAG		/* Load TLB HI */
+
+	/* Done...restore registers and get out of here.
+	*/
+#ifdef CONFIG_403GCX
+	lwz     r12, 12(r0)
+	lwz     r11, 8(r0)
+	mtspr   SPRN_PID, r12
+	mtcr    r11
+	lwz     r9, 4(r0)
+	lwz     r12, 0(r0)
+#else
+	mfspr	r12, SPRN_SPRG6
+	mfspr	r11, SPRN_SPRG7
+	mtspr	SPRN_PID, r12
+	mtcr	r11
+	mfspr	r9, SPRN_SPRG5
+	mfspr	r12, SPRN_SPRG4
+#endif
+	mfspr	r11, SPRN_SPRG1
+	mfspr	r10, SPRN_SPRG0
+	PPC405_ERR77_SYNC
+	rfi			/* Should sync shadow TLBs */
+	b	.		/* prevent prefetch past rfi */
+
+/* extern void giveup_fpu(struct task_struct *prev)
+ *
+ * The PowerPC 4xx family of processors do not have an FPU, so this just
+ * returns.
+ */
+_GLOBAL(giveup_fpu)
+	blr
+
+/* This is where the main kernel code starts.
+ */
+start_here:
+
+	/* ptr to current */
+	lis	r2,init_task@h
+	ori	r2,r2,init_task@l
+
+	/* ptr to phys current thread */
+	tophys(r4,r2)
+	addi	r4,r4,THREAD	/* init task's THREAD */
+	mtspr	SPRN_SPRG3,r4
+
+	/* stack */
+	lis	r1,init_thread_union@ha
+	addi	r1,r1,init_thread_union@l
+	li	r0,0
+	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
+
+	bl	early_init	/* We have to do this with MMU on */
+
+/*
+ * Decide what sort of machine this is and initialize the MMU.
+ */
+	mr	r3,r31
+	mr	r4,r30
+	mr	r5,r29
+	mr	r6,r28
+	mr	r7,r27
+	bl	machine_init
+	bl	MMU_init
+
+/* Go back to running unmapped so we can load up new values
+ * and change to using our exception vectors.
+ * On the 4xx, all we have to do is invalidate the TLB to clear
+ * the old 16M byte TLB mappings.
+ */
+	lis	r4,2f@h
+	ori	r4,r4,2f@l
+	tophys(r4,r4)
+	lis	r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h
+	ori	r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l
+	mtspr	SPRN_SRR0,r4
+	mtspr	SPRN_SRR1,r3
+	rfi
+	b	.		/* prevent prefetch past rfi */
+
+/* Load up the kernel context */
+2:
+	sync			/* Flush to memory before changing TLB */
+	tlbia
+	isync			/* Flush shadow TLBs */
+
+	/* set up the PTE pointers for the Abatron bdiGDB.
+	*/
+	lis	r6, swapper_pg_dir@h
+	ori	r6, r6, swapper_pg_dir@l
+	lis	r5, abatron_pteptrs@h
+	ori	r5, r5, abatron_pteptrs@l
+	stw	r5, 0xf0(r0)	/* Must match your Abatron config file */
+	tophys(r5,r5)
+	stw	r6, 0(r5)
+
+/* Now turn on the MMU for real! */
+	lis	r4,MSR_KERNEL@h
+	ori	r4,r4,MSR_KERNEL@l
+	lis	r3,start_kernel@h
+	ori	r3,r3,start_kernel@l
+	mtspr	SPRN_SRR0,r3
+	mtspr	SPRN_SRR1,r4
+	rfi			/* enable MMU and jump to start_kernel */
+	b	.		/* prevent prefetch past rfi */
+
+/* Set up the initial MMU state so we can do the first level of
+ * kernel initialization.  This maps the first 16 MBytes of memory 1:1
+ * virtual to physical and more importantly sets the cache mode.
+ */
+initial_mmu:
+	tlbia			/* Invalidate all TLB entries */
+	isync
+
+	/* We should still be executing code at physical address 0x0000xxxx
+	 * at this point. However, start_here is at virtual address
+	 * 0xC000xxxx. So, set up a TLB mapping to cover this once
+	 * translation is enabled.
+	 */
+
+	lis	r3,KERNELBASE@h		/* Load the kernel virtual address */
+	ori	r3,r3,KERNELBASE@l
+	tophys(r4,r3)			/* Load the kernel physical address */
+
+	iccci	r0,r3			/* Invalidate the i-cache before use */
+
+	/* Load the kernel PID.
+	*/
+	li	r0,0
+	mtspr	SPRN_PID,r0
+	sync
+
+	/* Configure and load two entries into TLB slots 62 and 63.
+	 * In case we are pinning TLBs, these are reserved in by the
+	 * other TLB functions.  If not reserving, then it doesn't
+	 * matter where they are loaded.
+	 */
+	clrrwi	r4,r4,10		/* Mask off the real page number */
+	ori	r4,r4,(TLB_WR | TLB_EX)	/* Set the write and execute bits */
+
+	clrrwi	r3,r3,10		/* Mask off the effective page number */
+	ori	r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M))
+
+        li      r0,63                    /* TLB slot 63 */
+
+	tlbwe	r4,r0,TLB_DATA		/* Load the data portion of the entry */
+	tlbwe	r3,r0,TLB_TAG		/* Load the tag portion of the entry */
+
+#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE)
+
+	/* Load a TLB entry for the UART, so that ppc4xx_progress() can use
+	 * the UARTs nice and early.  We use a 4k real==virtual mapping. */
+
+	lis	r3,SERIAL_DEBUG_IO_BASE@h
+	ori	r3,r3,SERIAL_DEBUG_IO_BASE@l
+	mr	r4,r3
+	clrrwi	r4,r4,12
+	ori	r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
+
+	clrrwi	r3,r3,12
+	ori	r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
+
+	li	r0,0			/* TLB slot 0 */
+	tlbwe	r4,r0,TLB_DATA
+	tlbwe	r3,r0,TLB_TAG
+#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */
+
+	isync
+
+	/* Establish the exception vector base
+	*/
+	lis	r4,KERNELBASE@h		/* EVPR only uses the high 16-bits */
+	tophys(r0,r4)			/* Use the physical address */
+	mtspr	SPRN_EVPR,r0
+
+	blr
+
+_GLOBAL(abort)
+        mfspr   r13,SPRN_DBCR0
+        oris    r13,r13,DBCR0_RST_SYSTEM@h
+        mtspr   SPRN_DBCR0,r13
+
+_GLOBAL(set_context)
+
+#ifdef CONFIG_BDI_SWITCH
+	/* Context switch the PTE pointer for the Abatron BDI2000.
+	 * The PGDIR is the second parameter.
+	 */
+	lis	r5, KERNELBASE@h
+	lwz	r5, 0xf0(r5)
+	stw	r4, 0x4(r5)
+#endif
+	sync
+	mtspr	SPRN_PID,r3
+	isync				/* Need an isync to flush shadow */
+					/* TLBs after changing PID */
+	blr
+
+/* We put a few things here that have to be page-aligned. This stuff
+ * goes at the beginning of the data segment, which is page-aligned.
+ */
+	.data
+_GLOBAL(sdata)
+_GLOBAL(empty_zero_page)
+	.space	4096
+_GLOBAL(swapper_pg_dir)
+	.space	4096
+
+
+/* Stack for handling critical exceptions from kernel mode */
+	.section .bss
+        .align 12
+exception_stack_bottom:
+	.space	4096
+critical_stack_top:
+_GLOBAL(exception_stack_top)
+
+/* This space gets a copy of optional info passed to us by the bootstrap
+ * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
+ */
+_GLOBAL(cmd_line)
+	.space	512
+
+/* Room for two PTE pointers, usually the kernel and current user pointers
+ * to their respective root page table.
+ */
+abatron_pteptrs:
+	.space	8
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
new file mode 100644
index 0000000..22a5ee0
--- /dev/null
+++ b/arch/powerpc/kernel/head_64.S
@@ -0,0 +1,2011 @@
+/*
+ *  arch/ppc64/kernel/head.S
+ *
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
+ *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
+ *  Adapted for Power Macintosh by Paul Mackerras.
+ *  Low-level exception handlers and MMU support
+ *  rewritten by Paul Mackerras.
+ *    Copyright (C) 1996 Paul Mackerras.
+ *
+ *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
+ *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
+ *
+ *  This file contains the low-level support and setup for the
+ *  PowerPC-64 platform, including trap and interrupt dispatch.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/systemcfg.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/bug.h>
+#include <asm/cputable.h>
+#include <asm/setup.h>
+#include <asm/hvcall.h>
+#include <asm/iSeries/LparMap.h>
+
+#ifdef CONFIG_PPC_ISERIES
+#define DO_SOFT_DISABLE
+#endif
+
+/*
+ * We layout physical memory as follows:
+ * 0x0000 - 0x00ff : Secondary processor spin code
+ * 0x0100 - 0x2fff : pSeries Interrupt prologs
+ * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
+ * 0x6000 - 0x6fff : Initial (CPU0) segment table
+ * 0x7000 - 0x7fff : FWNMI data area
+ * 0x8000 -        : Early init and support code
+ */
+
+/*
+ *   SPRG Usage
+ *
+ *   Register	Definition
+ *
+ *   SPRG0	reserved for hypervisor
+ *   SPRG1	temp - used to save gpr
+ *   SPRG2	temp - used to save gpr
+ *   SPRG3	virt addr of paca
+ */
+
+/*
+ * Entering into this code we make the following assumptions:
+ *  For pSeries:
+ *   1. The MMU is off & open firmware is running in real mode.
+ *   2. The kernel is entered at __start
+ *
+ *  For iSeries:
+ *   1. The MMU is on (as it always is for iSeries)
+ *   2. The kernel is entered at system_reset_iSeries
+ */
+
+	.text
+	.globl  _stext
+_stext:
+#ifdef CONFIG_PPC_MULTIPLATFORM
+_GLOBAL(__start)
+	/* NOP this out unconditionally */
+BEGIN_FTR_SECTION
+	b .__start_initialization_multiplatform
+END_FTR_SECTION(0, 1)
+#endif /* CONFIG_PPC_MULTIPLATFORM */
+
+	/* Catch branch to 0 in real mode */
+	trap
+
+#ifdef CONFIG_PPC_ISERIES
+	/*
+	 * At offset 0x20, there is a pointer to iSeries LPAR data.
+	 * This is required by the hypervisor
+	 */
+	. = 0x20
+	.llong hvReleaseData-KERNELBASE
+
+	/*
+	 * At offset 0x28 and 0x30 are offsets to the mschunks_map
+	 * array (used by the iSeries LPAR debugger to do translation
+	 * between physical addresses and absolute addresses) and
+	 * to the pidhash table (also used by the debugger)
+	 */
+	.llong mschunks_map-KERNELBASE
+	.llong 0	/* pidhash-KERNELBASE SFRXXX */
+
+	/* Offset 0x38 - Pointer to start of embedded System.map */
+	.globl	embedded_sysmap_start
+embedded_sysmap_start:
+	.llong	0
+	/* Offset 0x40 - Pointer to end of embedded System.map */
+	.globl	embedded_sysmap_end
+embedded_sysmap_end:
+	.llong	0
+
+#endif /* CONFIG_PPC_ISERIES */
+
+	/* Secondary processors spin on this value until it goes to 1. */
+	.globl  __secondary_hold_spinloop
+__secondary_hold_spinloop:
+	.llong	0x0
+
+	/* Secondary processors write this value with their cpu # */
+	/* after they enter the spin loop immediately below.	  */
+	.globl	__secondary_hold_acknowledge
+__secondary_hold_acknowledge:
+	.llong	0x0
+
+	. = 0x60
+/*
+ * The following code is used on pSeries to hold secondary processors
+ * in a spin loop after they have been freed from OpenFirmware, but
+ * before the bulk of the kernel has been relocated.  This code
+ * is relocated to physical address 0x60 before prom_init is run.
+ * All of it must fit below the first exception vector at 0x100.
+ */
+_GLOBAL(__secondary_hold)
+	mfmsr	r24
+	ori	r24,r24,MSR_RI
+	mtmsrd	r24			/* RI on */
+
+	/* Grab our linux cpu number */
+	mr	r24,r3
+
+	/* Tell the master cpu we're here */
+	/* Relocation is off & we are located at an address less */
+	/* than 0x100, so only need to grab low order offset.    */
+	std	r24,__secondary_hold_acknowledge@l(0)
+	sync
+
+	/* All secondary cpus wait here until told to start. */
+100:	ld	r4,__secondary_hold_spinloop@l(0)
+	cmpdi	0,r4,1
+	bne	100b
+
+#ifdef CONFIG_HMT
+	b	.hmt_init
+#else
+#ifdef CONFIG_SMP
+	mr	r3,r24
+	b	.pSeries_secondary_smp_init
+#else
+	BUG_OPCODE
+#endif
+#endif
+
+/* This value is used to mark exception frames on the stack. */
+	.section ".toc","aw"
+exception_marker:
+	.tc	ID_72656773_68657265[TC],0x7265677368657265
+	.text
+
+/*
+ * The following macros define the code that appears as
+ * the prologue to each of the exception handlers.  They
+ * are split into two parts to allow a single kernel binary
+ * to be used for pSeries and iSeries.
+ * LOL.  One day... - paulus
+ */
+
+/*
+ * We make as much of the exception code common between native
+ * exception handlers (including pSeries LPAR) and iSeries LPAR
+ * implementations as possible.
+ */
+
+/*
+ * This is the start of the interrupt handlers for pSeries
+ * This code runs with relocation off.
+ */
+#define EX_R9		0
+#define EX_R10		8
+#define EX_R11		16
+#define EX_R12		24
+#define EX_R13		32
+#define EX_SRR0		40
+#define EX_R3		40	/* SLB miss saves R3, but not SRR0 */
+#define EX_DAR		48
+#define EX_LR		48	/* SLB miss saves LR, but not DAR */
+#define EX_DSISR	56
+#define EX_CCR		60
+
+#define EXCEPTION_PROLOG_PSERIES(area, label)				\
+	mfspr	r13,SPRG3;		/* get paca address into r13 */	\
+	std	r9,area+EX_R9(r13);	/* save r9 - r12 */		\
+	std	r10,area+EX_R10(r13);					\
+	std	r11,area+EX_R11(r13);					\
+	std	r12,area+EX_R12(r13);					\
+	mfspr	r9,SPRG1;						\
+	std	r9,area+EX_R13(r13);					\
+	mfcr	r9;							\
+	clrrdi	r12,r13,32;		/* get high part of &label */	\
+	mfmsr	r10;							\
+	mfspr	r11,SRR0;		/* save SRR0 */			\
+	ori	r12,r12,(label)@l;	/* virt addr of handler */	\
+	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI;				\
+	mtspr	SRR0,r12;						\
+	mfspr	r12,SRR1;		/* and SRR1 */			\
+	mtspr	SRR1,r10;						\
+	rfid;								\
+	b	.	/* prevent speculative execution */
+
+/*
+ * This is the start of the interrupt handlers for iSeries
+ * This code runs with relocation on.
+ */
+#define EXCEPTION_PROLOG_ISERIES_1(area)				\
+	mfspr	r13,SPRG3;		/* get paca address into r13 */	\
+	std	r9,area+EX_R9(r13);	/* save r9 - r12 */		\
+	std	r10,area+EX_R10(r13);					\
+	std	r11,area+EX_R11(r13);					\
+	std	r12,area+EX_R12(r13);					\
+	mfspr	r9,SPRG1;						\
+	std	r9,area+EX_R13(r13);					\
+	mfcr	r9
+
+#define EXCEPTION_PROLOG_ISERIES_2					\
+	mfmsr	r10;							\
+	ld	r11,PACALPPACA+LPPACASRR0(r13);				\
+	ld	r12,PACALPPACA+LPPACASRR1(r13);				\
+	ori	r10,r10,MSR_RI;						\
+	mtmsrd	r10,1
+
+/*
+ * The common exception prolog is used for all except a few exceptions
+ * such as a segment miss on a kernel address.  We have to be prepared
+ * to take another exception from the point where we first touch the
+ * kernel stack onwards.
+ *
+ * On entry r13 points to the paca, r9-r13 are saved in the paca,
+ * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
+ * SRR1, and relocation is on.
+ */
+#define EXCEPTION_PROLOG_COMMON(n, area)				   \
+	andi.	r10,r12,MSR_PR;		/* See if coming from user	*/ \
+	mr	r10,r1;			/* Save r1			*/ \
+	subi	r1,r1,INT_FRAME_SIZE;	/* alloc frame on kernel stack	*/ \
+	beq-	1f;							   \
+	ld	r1,PACAKSAVE(r13);	/* kernel stack to use		*/ \
+1:	cmpdi	cr1,r1,0;		/* check if r1 is in userspace	*/ \
+	bge-	cr1,bad_stack;		/* abort if it is		*/ \
+	std	r9,_CCR(r1);		/* save CR in stackframe	*/ \
+	std	r11,_NIP(r1);		/* save SRR0 in stackframe	*/ \
+	std	r12,_MSR(r1);		/* save SRR1 in stackframe	*/ \
+	std	r10,0(r1);		/* make stack chain pointer	*/ \
+	std	r0,GPR0(r1);		/* save r0 in stackframe	*/ \
+	std	r10,GPR1(r1);		/* save r1 in stackframe	*/ \
+	std	r2,GPR2(r1);		/* save r2 in stackframe	*/ \
+	SAVE_4GPRS(3, r1);		/* save r3 - r6 in stackframe	*/ \
+	SAVE_2GPRS(7, r1);		/* save r7, r8 in stackframe	*/ \
+	ld	r9,area+EX_R9(r13);	/* move r9, r10 to stackframe	*/ \
+	ld	r10,area+EX_R10(r13);					   \
+	std	r9,GPR9(r1);						   \
+	std	r10,GPR10(r1);						   \
+	ld	r9,area+EX_R11(r13);	/* move r11 - r13 to stackframe	*/ \
+	ld	r10,area+EX_R12(r13);					   \
+	ld	r11,area+EX_R13(r13);					   \
+	std	r9,GPR11(r1);						   \
+	std	r10,GPR12(r1);						   \
+	std	r11,GPR13(r1);						   \
+	ld	r2,PACATOC(r13);	/* get kernel TOC into r2	*/ \
+	mflr	r9;			/* save LR in stackframe	*/ \
+	std	r9,_LINK(r1);						   \
+	mfctr	r10;			/* save CTR in stackframe	*/ \
+	std	r10,_CTR(r1);						   \
+	mfspr	r11,XER;		/* save XER in stackframe	*/ \
+	std	r11,_XER(r1);						   \
+	li	r9,(n)+1;						   \
+	std	r9,_TRAP(r1);		/* set trap number		*/ \
+	li	r10,0;							   \
+	ld	r11,exception_marker@toc(r2);				   \
+	std	r10,RESULT(r1);		/* clear regs->result		*/ \
+	std	r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame	*/
+
+/*
+ * Exception vectors.
+ */
+#define STD_EXCEPTION_PSERIES(n, label)			\
+	. = n;						\
+	.globl label##_pSeries;				\
+label##_pSeries:					\
+	HMT_MEDIUM;					\
+	mtspr	SPRG1,r13;		/* save r13 */	\
+	RUNLATCH_ON(r13);				\
+	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
+
+#define STD_EXCEPTION_ISERIES(n, label, area)		\
+	.globl label##_iSeries;				\
+label##_iSeries:					\
+	HMT_MEDIUM;					\
+	mtspr	SPRG1,r13;		/* save r13 */	\
+	RUNLATCH_ON(r13);				\
+	EXCEPTION_PROLOG_ISERIES_1(area);		\
+	EXCEPTION_PROLOG_ISERIES_2;			\
+	b	label##_common
+
+#define MASKABLE_EXCEPTION_ISERIES(n, label)				\
+	.globl label##_iSeries;						\
+label##_iSeries:							\
+	HMT_MEDIUM;							\
+	mtspr	SPRG1,r13;		/* save r13 */			\
+	RUNLATCH_ON(r13);						\
+	EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN);				\
+	lbz	r10,PACAPROCENABLED(r13);				\
+	cmpwi	0,r10,0;						\
+	beq-	label##_iSeries_masked;					\
+	EXCEPTION_PROLOG_ISERIES_2;					\
+	b	label##_common;						\
+
+#ifdef DO_SOFT_DISABLE
+#define DISABLE_INTS				\
+	lbz	r10,PACAPROCENABLED(r13);	\
+	li	r11,0;				\
+	std	r10,SOFTE(r1);			\
+	mfmsr	r10;				\
+	stb	r11,PACAPROCENABLED(r13);	\
+	ori	r10,r10,MSR_EE;			\
+	mtmsrd	r10,1
+
+#define ENABLE_INTS				\
+	lbz	r10,PACAPROCENABLED(r13);	\
+	mfmsr	r11;				\
+	std	r10,SOFTE(r1);			\
+	ori	r11,r11,MSR_EE;			\
+	mtmsrd	r11,1
+
+#else	/* hard enable/disable interrupts */
+#define DISABLE_INTS
+
+#define ENABLE_INTS				\
+	ld	r12,_MSR(r1);			\
+	mfmsr	r11;				\
+	rlwimi	r11,r12,0,MSR_EE;		\
+	mtmsrd	r11,1
+
+#endif
+
+#define STD_EXCEPTION_COMMON(trap, label, hdlr)		\
+	.align	7;					\
+	.globl label##_common;				\
+label##_common:						\
+	EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);	\
+	DISABLE_INTS;					\
+	bl	.save_nvgprs;				\
+	addi	r3,r1,STACK_FRAME_OVERHEAD;		\
+	bl	hdlr;					\
+	b	.ret_from_except
+
+#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr)	\
+	.align	7;					\
+	.globl label##_common;				\
+label##_common:						\
+	EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);	\
+	DISABLE_INTS;					\
+	addi	r3,r1,STACK_FRAME_OVERHEAD;		\
+	bl	hdlr;					\
+	b	.ret_from_except_lite
+
+/*
+ * Start of pSeries system interrupt routines
+ */
+	. = 0x100
+	.globl __start_interrupts
+__start_interrupts:
+
+	STD_EXCEPTION_PSERIES(0x100, system_reset)
+
+	. = 0x200
+_machine_check_pSeries:
+	HMT_MEDIUM
+	mtspr	SPRG1,r13		/* save r13 */
+	RUNLATCH_ON(r13)
+	EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
+
+	. = 0x300
+	.globl data_access_pSeries
+data_access_pSeries:
+	HMT_MEDIUM
+	mtspr	SPRG1,r13
+BEGIN_FTR_SECTION
+	mtspr	SPRG2,r12
+	mfspr	r13,DAR
+	mfspr	r12,DSISR
+	srdi	r13,r13,60
+	rlwimi	r13,r12,16,0x20
+	mfcr	r12
+	cmpwi	r13,0x2c
+	beq	.do_stab_bolted_pSeries
+	mtcrf	0x80,r12
+	mfspr	r12,SPRG2
+END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
+	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
+
+	. = 0x380
+	.globl data_access_slb_pSeries
+data_access_slb_pSeries:
+	HMT_MEDIUM
+	mtspr	SPRG1,r13
+	RUNLATCH_ON(r13)
+	mfspr	r13,SPRG3		/* get paca address into r13 */
+	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
+	std	r10,PACA_EXSLB+EX_R10(r13)
+	std	r11,PACA_EXSLB+EX_R11(r13)
+	std	r12,PACA_EXSLB+EX_R12(r13)
+	std	r3,PACA_EXSLB+EX_R3(r13)
+	mfspr	r9,SPRG1
+	std	r9,PACA_EXSLB+EX_R13(r13)
+	mfcr	r9
+	mfspr	r12,SRR1		/* and SRR1 */
+	mfspr	r3,DAR
+	b	.do_slb_miss		/* Rel. branch works in real mode */
+
+	STD_EXCEPTION_PSERIES(0x400, instruction_access)
+
+	. = 0x480
+	.globl instruction_access_slb_pSeries
+instruction_access_slb_pSeries:
+	HMT_MEDIUM
+	mtspr	SPRG1,r13
+	RUNLATCH_ON(r13)
+	mfspr	r13,SPRG3		/* get paca address into r13 */
+	std	r9,PACA_EXSLB+EX_R9(r13)	/* save r9 - r12 */
+	std	r10,PACA_EXSLB+EX_R10(r13)
+	std	r11,PACA_EXSLB+EX_R11(r13)
+	std	r12,PACA_EXSLB+EX_R12(r13)
+	std	r3,PACA_EXSLB+EX_R3(r13)
+	mfspr	r9,SPRG1
+	std	r9,PACA_EXSLB+EX_R13(r13)
+	mfcr	r9
+	mfspr	r12,SRR1		/* and SRR1 */
+	mfspr	r3,SRR0			/* SRR0 is faulting address */
+	b	.do_slb_miss		/* Rel. branch works in real mode */
+
+	STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
+	STD_EXCEPTION_PSERIES(0x600, alignment)
+	STD_EXCEPTION_PSERIES(0x700, program_check)
+	STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
+	STD_EXCEPTION_PSERIES(0x900, decrementer)
+	STD_EXCEPTION_PSERIES(0xa00, trap_0a)
+	STD_EXCEPTION_PSERIES(0xb00, trap_0b)
+
+	. = 0xc00
+	.globl	system_call_pSeries
+system_call_pSeries:
+	HMT_MEDIUM
+	RUNLATCH_ON(r9)
+	mr	r9,r13
+	mfmsr	r10
+	mfspr	r13,SPRG3
+	mfspr	r11,SRR0
+	clrrdi	r12,r13,32
+	oris	r12,r12,system_call_common@h
+	ori	r12,r12,system_call_common@l
+	mtspr	SRR0,r12
+	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
+	mfspr	r12,SRR1
+	mtspr	SRR1,r10
+	rfid
+	b	.	/* prevent speculative execution */
+
+	STD_EXCEPTION_PSERIES(0xd00, single_step)
+	STD_EXCEPTION_PSERIES(0xe00, trap_0e)
+
+	/* We need to deal with the Altivec unavailable exception
+	 * here which is at 0xf20, thus in the middle of the
+	 * prolog code of the PerformanceMonitor one. A little
+	 * trickery is thus necessary
+	 */
+	. = 0xf00
+	b	performance_monitor_pSeries
+
+	STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
+
+	STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
+	STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
+
+	. = 0x3000
+
+/*** pSeries interrupt support ***/
+
+	/* moved from 0xf00 */
+	STD_EXCEPTION_PSERIES(., performance_monitor)
+
+	.align	7
+_GLOBAL(do_stab_bolted_pSeries)
+	mtcrf	0x80,r12
+	mfspr	r12,SPRG2
+	EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
+
+/*
+ * Vectors for the FWNMI option.  Share common code.
+ */
+      .globl system_reset_fwnmi
+system_reset_fwnmi:
+      HMT_MEDIUM
+      mtspr   SPRG1,r13               /* save r13 */
+      RUNLATCH_ON(r13)
+      EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
+
+      .globl machine_check_fwnmi
+machine_check_fwnmi:
+      HMT_MEDIUM
+      mtspr   SPRG1,r13               /* save r13 */
+      RUNLATCH_ON(r13)
+      EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
+
+#ifdef CONFIG_PPC_ISERIES
+/***  ISeries-LPAR interrupt handlers ***/
+
+	STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
+
+	.globl data_access_iSeries
+data_access_iSeries:
+	mtspr	SPRG1,r13
+BEGIN_FTR_SECTION
+	mtspr	SPRG2,r12
+	mfspr	r13,DAR
+	mfspr	r12,DSISR
+	srdi	r13,r13,60
+	rlwimi	r13,r12,16,0x20
+	mfcr	r12
+	cmpwi	r13,0x2c
+	beq	.do_stab_bolted_iSeries
+	mtcrf	0x80,r12
+	mfspr	r12,SPRG2
+END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
+	EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
+	EXCEPTION_PROLOG_ISERIES_2
+	b	data_access_common
+
+.do_stab_bolted_iSeries:
+	mtcrf	0x80,r12
+	mfspr	r12,SPRG2
+	EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
+	EXCEPTION_PROLOG_ISERIES_2
+	b	.do_stab_bolted
+
+	.globl	data_access_slb_iSeries
+data_access_slb_iSeries:
+	mtspr	SPRG1,r13		/* save r13 */
+	EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
+	std	r3,PACA_EXSLB+EX_R3(r13)
+	ld	r12,PACALPPACA+LPPACASRR1(r13)
+	mfspr	r3,DAR
+	b	.do_slb_miss
+
+	STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
+
+	.globl	instruction_access_slb_iSeries
+instruction_access_slb_iSeries:
+	mtspr	SPRG1,r13		/* save r13 */
+	EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
+	std	r3,PACA_EXSLB+EX_R3(r13)
+	ld	r12,PACALPPACA+LPPACASRR1(r13)
+	ld	r3,PACALPPACA+LPPACASRR0(r13)
+	b	.do_slb_miss
+
+	MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
+	STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
+	STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
+	STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
+	MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
+	STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
+	STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
+
+	.globl	system_call_iSeries
+system_call_iSeries:
+	mr	r9,r13
+	mfspr	r13,SPRG3
+	EXCEPTION_PROLOG_ISERIES_2
+	b	system_call_common
+
+	STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
+	STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
+	STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
+
+	.globl system_reset_iSeries
+system_reset_iSeries:
+	mfspr	r13,SPRG3		/* Get paca address */
+	mfmsr	r24
+	ori	r24,r24,MSR_RI
+	mtmsrd	r24			/* RI on */
+	lhz	r24,PACAPACAINDEX(r13)	/* Get processor # */
+	cmpwi	0,r24,0			/* Are we processor 0? */
+	beq	.__start_initialization_iSeries	/* Start up the first processor */
+	mfspr	r4,SPRN_CTRLF
+	li	r5,CTRL_RUNLATCH	/* Turn off the run light */
+	andc	r4,r4,r5
+	mtspr	SPRN_CTRLT,r4
+
+1:
+	HMT_LOW
+#ifdef CONFIG_SMP
+	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor
+					 * should start */
+	sync
+	LOADADDR(r3,current_set)
+	sldi	r28,r24,3		/* get current_set[cpu#] */
+	ldx	r3,r3,r28
+	addi	r1,r3,THREAD_SIZE
+	subi	r1,r1,STACK_FRAME_OVERHEAD
+
+	cmpwi	0,r23,0
+	beq	iSeries_secondary_smp_loop	/* Loop until told to go */
+	bne	.__secondary_start		/* Loop until told to go */
+iSeries_secondary_smp_loop:
+	/* Let the Hypervisor know we are alive */
+	/* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
+	lis	r3,0x8002
+	rldicr	r3,r3,32,15		/* r0 = (r3 << 32) & 0xffff000000000000 */
+#else /* CONFIG_SMP */
+	/* Yield the processor.  This is required for non-SMP kernels
+		which are running on multi-threaded machines. */
+	lis	r3,0x8000
+	rldicr	r3,r3,32,15		/* r3 = (r3 << 32) & 0xffff000000000000 */
+	addi	r3,r3,18		/* r3 = 0x8000000000000012 which is "yield" */
+	li	r4,0			/* "yield timed" */
+	li	r5,-1			/* "yield forever" */
+#endif /* CONFIG_SMP */
+	li	r0,-1			/* r0=-1 indicates a Hypervisor call */
+	sc				/* Invoke the hypervisor via a system call */
+	mfspr	r13,SPRG3		/* Put r13 back ???? */
+	b	1b			/* If SMP not configured, secondaries
+					 * loop forever */
+
+	.globl decrementer_iSeries_masked
+decrementer_iSeries_masked:
+	li	r11,1
+	stb	r11,PACALPPACA+LPPACADECRINT(r13)
+	lwz	r12,PACADEFAULTDECR(r13)
+	mtspr	SPRN_DEC,r12
+	/* fall through */
+
+	.globl hardware_interrupt_iSeries_masked
+hardware_interrupt_iSeries_masked:
+	mtcrf	0x80,r9		/* Restore regs */
+	ld	r11,PACALPPACA+LPPACASRR0(r13)
+	ld	r12,PACALPPACA+LPPACASRR1(r13)
+	mtspr	SRR0,r11
+	mtspr	SRR1,r12
+	ld	r9,PACA_EXGEN+EX_R9(r13)
+	ld	r10,PACA_EXGEN+EX_R10(r13)
+	ld	r11,PACA_EXGEN+EX_R11(r13)
+	ld	r12,PACA_EXGEN+EX_R12(r13)
+	ld	r13,PACA_EXGEN+EX_R13(r13)
+	rfid
+	b	.	/* prevent speculative execution */
+#endif /* CONFIG_PPC_ISERIES */
+
+/*** Common interrupt handlers ***/
+
+	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
+
+	/*
+	 * Machine check is different because we use a different
+	 * save area: PACA_EXMC instead of PACA_EXGEN.
+	 */
+	.align	7
+	.globl machine_check_common
+machine_check_common:
+	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
+	DISABLE_INTS
+	bl	.save_nvgprs
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	bl	.machine_check_exception
+	b	.ret_from_except
+
+	STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
+	STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
+	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
+	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
+	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
+	STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
+	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
+#ifdef CONFIG_ALTIVEC
+	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
+#else
+	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
+#endif
+
+/*
+ * Here we have detected that the kernel stack pointer is bad.
+ * R9 contains the saved CR, r13 points to the paca,
+ * r10 contains the (bad) kernel stack pointer,
+ * r11 and r12 contain the saved SRR0 and SRR1.
+ * We switch to using an emergency stack, save the registers there,
+ * and call kernel_bad_stack(), which panics.
+ */
+bad_stack:
+	ld	r1,PACAEMERGSP(r13)
+	subi	r1,r1,64+INT_FRAME_SIZE
+	std	r9,_CCR(r1)
+	std	r10,GPR1(r1)
+	std	r11,_NIP(r1)
+	std	r12,_MSR(r1)
+	mfspr	r11,DAR
+	mfspr	r12,DSISR
+	std	r11,_DAR(r1)
+	std	r12,_DSISR(r1)
+	mflr	r10
+	mfctr	r11
+	mfxer	r12
+	std	r10,_LINK(r1)
+	std	r11,_CTR(r1)
+	std	r12,_XER(r1)
+	SAVE_GPR(0,r1)
+	SAVE_GPR(2,r1)
+	SAVE_4GPRS(3,r1)
+	SAVE_2GPRS(7,r1)
+	SAVE_10GPRS(12,r1)
+	SAVE_10GPRS(22,r1)
+	addi	r11,r1,INT_FRAME_SIZE
+	std	r11,0(r1)
+	li	r12,0
+	std	r12,0(r11)
+	ld	r2,PACATOC(r13)
+1:	addi	r3,r1,STACK_FRAME_OVERHEAD
+	bl	.kernel_bad_stack
+	b	1b
+
+/*
+ * Return from an exception with minimal checks.
+ * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
+ * If interrupts have been enabled, or anything has been
+ * done that might have changed the scheduling status of
+ * any task or sent any task a signal, you should use
+ * ret_from_except or ret_from_except_lite instead of this.
+ */
+fast_exception_return:
+	ld	r12,_MSR(r1)
+	ld	r11,_NIP(r1)
+	andi.	r3,r12,MSR_RI		/* check if RI is set */
+	beq-	unrecov_fer
+	ld	r3,_CCR(r1)
+	ld	r4,_LINK(r1)
+	ld	r5,_CTR(r1)
+	ld	r6,_XER(r1)
+	mtcr	r3
+	mtlr	r4
+	mtctr	r5
+	mtxer	r6
+	REST_GPR(0, r1)
+	REST_8GPRS(2, r1)
+
+	mfmsr	r10
+	clrrdi	r10,r10,2		/* clear RI (LE is 0 already) */
+	mtmsrd	r10,1
+
+	mtspr	SRR1,r12
+	mtspr	SRR0,r11
+	REST_4GPRS(10, r1)
+	ld	r1,GPR1(r1)
+	rfid
+	b	.	/* prevent speculative execution */
+
+unrecov_fer:
+	bl	.save_nvgprs
+1:	addi	r3,r1,STACK_FRAME_OVERHEAD
+	bl	.unrecoverable_exception
+	b	1b
+
+/*
+ * Here r13 points to the paca, r9 contains the saved CR,
+ * SRR0 and SRR1 are saved in r11 and r12,
+ * r9 - r13 are saved in paca->exgen.
+ */
+	.align	7
+	.globl data_access_common
+data_access_common:
+	RUNLATCH_ON(r10)		/* It wont fit in the 0x300 handler */
+	mfspr	r10,DAR
+	std	r10,PACA_EXGEN+EX_DAR(r13)
+	mfspr	r10,DSISR
+	stw	r10,PACA_EXGEN+EX_DSISR(r13)
+	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
+	ld	r3,PACA_EXGEN+EX_DAR(r13)
+	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
+	li	r5,0x300
+	b	.do_hash_page	 	/* Try to handle as hpte fault */
+
+	.align	7
+	.globl instruction_access_common
+instruction_access_common:
+	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
+	ld	r3,_NIP(r1)
+	andis.	r4,r12,0x5820
+	li	r5,0x400
+	b	.do_hash_page		/* Try to handle as hpte fault */
+
+	.align	7
+	.globl hardware_interrupt_common
+	.globl hardware_interrupt_entry
+hardware_interrupt_common:
+	EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
+hardware_interrupt_entry:
+	DISABLE_INTS
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	bl	.do_IRQ
+	b	.ret_from_except_lite
+
+	.align	7
+	.globl alignment_common
+alignment_common:
+	mfspr	r10,DAR
+	std	r10,PACA_EXGEN+EX_DAR(r13)
+	mfspr	r10,DSISR
+	stw	r10,PACA_EXGEN+EX_DSISR(r13)
+	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
+	ld	r3,PACA_EXGEN+EX_DAR(r13)
+	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
+	std	r3,_DAR(r1)
+	std	r4,_DSISR(r1)
+	bl	.save_nvgprs
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	ENABLE_INTS
+	bl	.alignment_exception
+	b	.ret_from_except
+
+	.align	7
+	.globl program_check_common
+program_check_common:
+	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
+	bl	.save_nvgprs
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	ENABLE_INTS
+	bl	.program_check_exception
+	b	.ret_from_except
+
+	.align	7
+	.globl fp_unavailable_common
+fp_unavailable_common:
+	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
+	bne	.load_up_fpu		/* if from user, just load it up */
+	bl	.save_nvgprs
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	ENABLE_INTS
+	bl	.kernel_fp_unavailable_exception
+	BUG_OPCODE
+
+/*
+ * load_up_fpu(unused, unused, tsk)
+ * Disable FP for the task which had the FPU previously,
+ * and save its floating-point registers in its thread_struct.
+ * Enables the FPU for use in the kernel on return.
+ * On SMP we know the fpu is free, since we give it up every
+ * switch (ie, no lazy save of the FP registers).
+ * On entry: r13 == 'current' && last_task_used_math != 'current'
+ */
+_STATIC(load_up_fpu)
+	mfmsr	r5			/* grab the current MSR */
+	ori	r5,r5,MSR_FP
+	mtmsrd	r5			/* enable use of fpu now */
+	isync
+/*
+ * For SMP, we don't do lazy FPU switching because it just gets too
+ * horrendously complex, especially when a task switches from one CPU
+ * to another.  Instead we call giveup_fpu in switch_to.
+ *
+ */
+#ifndef CONFIG_SMP
+	ld	r3,last_task_used_math@got(r2)
+	ld	r4,0(r3)
+	cmpdi	0,r4,0
+	beq	1f
+	/* Save FP state to last_task_used_math's THREAD struct */
+	addi	r4,r4,THREAD
+	SAVE_32FPRS(0, r4)
+	mffs	fr0
+	stfd	fr0,THREAD_FPSCR(r4)
+	/* Disable FP for last_task_used_math */
+	ld	r5,PT_REGS(r4)
+	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+	li	r6,MSR_FP|MSR_FE0|MSR_FE1
+	andc	r4,r4,r6
+	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#endif /* CONFIG_SMP */
+	/* enable use of FP after return */
+	ld	r4,PACACURRENT(r13)
+	addi	r5,r4,THREAD		/* Get THREAD */
+	ld	r4,THREAD_FPEXC_MODE(r5)
+	ori	r12,r12,MSR_FP
+	or	r12,r12,r4
+	std	r12,_MSR(r1)
+	lfd	fr0,THREAD_FPSCR(r5)
+	mtfsf	0xff,fr0
+	REST_32FPRS(0, r5)
+#ifndef CONFIG_SMP
+	/* Update last_task_used_math to 'current' */
+	subi	r4,r5,THREAD		/* Back to 'current' */
+	std	r4,0(r3)
+#endif /* CONFIG_SMP */
+	/* restore registers and return */
+	b	fast_exception_return
+
+	.align	7
+	.globl altivec_unavailable_common
+altivec_unavailable_common:
+	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+	bne	.load_up_altivec	/* if from user, just load it up */
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif
+	bl	.save_nvgprs
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	ENABLE_INTS
+	bl	.altivec_unavailable_exception
+	b	.ret_from_except
+
+#ifdef CONFIG_ALTIVEC
+/*
+ * load_up_altivec(unused, unused, tsk)
+ * Disable VMX for the task which had it previously,
+ * and save its vector registers in its thread_struct.
+ * Enables the VMX for use in the kernel on return.
+ * On SMP we know the VMX is free, since we give it up every
+ * switch (ie, no lazy save of the vector registers).
+ * On entry: r13 == 'current' && last_task_used_altivec != 'current'
+ */
+_STATIC(load_up_altivec)
+	mfmsr	r5			/* grab the current MSR */
+	oris	r5,r5,MSR_VEC@h
+	mtmsrd	r5			/* enable use of VMX now */
+	isync
+
+/*
+ * For SMP, we don't do lazy VMX switching because it just gets too
+ * horrendously complex, especially when a task switches from one CPU
+ * to another.  Instead we call giveup_altvec in switch_to.
+ * VRSAVE isn't dealt with here, that is done in the normal context
+ * switch code. Note that we could rely on vrsave value to eventually
+ * avoid saving all of the VREGs here...
+ */
+#ifndef CONFIG_SMP
+	ld	r3,last_task_used_altivec@got(r2)
+	ld	r4,0(r3)
+	cmpdi	0,r4,0
+	beq	1f
+	/* Save VMX state to last_task_used_altivec's THREAD struct */
+	addi	r4,r4,THREAD
+	SAVE_32VRS(0,r5,r4)
+	mfvscr	vr0
+	li	r10,THREAD_VSCR
+	stvx	vr0,r10,r4
+	/* Disable VMX for last_task_used_altivec */
+	ld	r5,PT_REGS(r4)
+	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+	lis	r6,MSR_VEC@h
+	andc	r4,r4,r6
+	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#endif /* CONFIG_SMP */
+	/* Hack: if we get an altivec unavailable trap with VRSAVE
+	 * set to all zeros, we assume this is a broken application
+	 * that fails to set it properly, and thus we switch it to
+	 * all 1's
+	 */
+	mfspr	r4,SPRN_VRSAVE
+	cmpdi	0,r4,0
+	bne+	1f
+	li	r4,-1
+	mtspr	SPRN_VRSAVE,r4
+1:
+	/* enable use of VMX after return */
+	ld	r4,PACACURRENT(r13)
+	addi	r5,r4,THREAD		/* Get THREAD */
+	oris	r12,r12,MSR_VEC@h
+	std	r12,_MSR(r1)
+	li	r4,1
+	li	r10,THREAD_VSCR
+	stw	r4,THREAD_USED_VR(r5)
+	lvx	vr0,r10,r5
+	mtvscr	vr0
+	REST_32VRS(0,r4,r5)
+#ifndef CONFIG_SMP
+	/* Update last_task_used_math to 'current' */
+	subi	r4,r5,THREAD		/* Back to 'current' */
+	std	r4,0(r3)
+#endif /* CONFIG_SMP */
+	/* restore registers and return */
+	b	fast_exception_return
+#endif /* CONFIG_ALTIVEC */
+
+/*
+ * Hash table stuff
+ */
+	.align	7
+_GLOBAL(do_hash_page)
+	std	r3,_DAR(r1)
+	std	r4,_DSISR(r1)
+
+	andis.	r0,r4,0xa450		/* weird error? */
+	bne-	.handle_page_fault	/* if not, try to insert a HPTE */
+BEGIN_FTR_SECTION
+	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
+	bne-	.do_ste_alloc		/* If so handle it */
+END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
+
+	/*
+	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
+	 * accessing a userspace segment (even from the kernel). We assume
+	 * kernel addresses always have the high bit set.
+	 */
+	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
+	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
+	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
+	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
+	ori	r4,r4,1			/* add _PAGE_PRESENT */
+	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
+
+	/*
+	 * On iSeries, we soft-disable interrupts here, then
+	 * hard-enable interrupts so that the hash_page code can spin on
+	 * the hash_table_lock without problems on a shared processor.
+	 */
+	DISABLE_INTS
+
+	/*
+	 * r3 contains the faulting address
+	 * r4 contains the required access permissions
+	 * r5 contains the trap number
+	 *
+	 * at return r3 = 0 for success
+	 */
+	bl	.hash_page		/* build HPTE if possible */
+	cmpdi	r3,0			/* see if hash_page succeeded */
+
+#ifdef DO_SOFT_DISABLE
+	/*
+	 * If we had interrupts soft-enabled at the point where the
+	 * DSI/ISI occurred, and an interrupt came in during hash_page,
+	 * handle it now.
+	 * We jump to ret_from_except_lite rather than fast_exception_return
+	 * because ret_from_except_lite will check for and handle pending
+	 * interrupts if necessary.
+	 */
+	beq	.ret_from_except_lite
+	/* For a hash failure, we don't bother re-enabling interrupts */
+	ble-	12f
+
+	/*
+	 * hash_page couldn't handle it, set soft interrupt enable back
+	 * to what it was before the trap.  Note that .local_irq_restore
+	 * handles any interrupts pending at this point.
+	 */
+	ld	r3,SOFTE(r1)
+	bl	.local_irq_restore
+	b	11f
+#else
+	beq	fast_exception_return   /* Return from exception on success */
+	ble-	12f			/* Failure return from hash_page */
+
+	/* fall through */
+#endif
+
+/* Here we have a page fault that hash_page can't handle. */
+_GLOBAL(handle_page_fault)
+	ENABLE_INTS
+11:	ld	r4,_DAR(r1)
+	ld	r5,_DSISR(r1)
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	bl	.do_page_fault
+	cmpdi	r3,0
+	beq+	.ret_from_except_lite
+	bl	.save_nvgprs
+	mr	r5,r3
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	lwz	r4,_DAR(r1)
+	bl	.bad_page_fault
+	b	.ret_from_except
+
+/* We have a page fault that hash_page could handle but HV refused
+ * the PTE insertion
+ */
+12:	bl	.save_nvgprs
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	lwz	r4,_DAR(r1)
+	bl	.low_hash_fault
+	b	.ret_from_except
+
+	/* here we have a segment miss */
+_GLOBAL(do_ste_alloc)
+	bl	.ste_allocate		/* try to insert stab entry */
+	cmpdi	r3,0
+	beq+	fast_exception_return
+	b	.handle_page_fault
+
+/*
+ * r13 points to the PACA, r9 contains the saved CR,
+ * r11 and r12 contain the saved SRR0 and SRR1.
+ * r9 - r13 are saved in paca->exslb.
+ * We assume we aren't going to take any exceptions during this procedure.
+ * We assume (DAR >> 60) == 0xc.
+ */
+	.align	7
+_GLOBAL(do_stab_bolted)
+	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
+	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
+
+	/* Hash to the primary group */
+	ld	r10,PACASTABVIRT(r13)
+	mfspr	r11,DAR
+	srdi	r11,r11,28
+	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
+
+	/* Calculate VSID */
+	/* This is a kernel address, so protovsid = ESID */
+	ASM_VSID_SCRAMBLE(r11, r9)
+	rldic	r9,r11,12,16	/* r9 = vsid << 12 */
+
+	/* Search the primary group for a free entry */
+1:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
+	andi.	r11,r11,0x80
+	beq	2f
+	addi	r10,r10,16
+	andi.	r11,r10,0x70
+	bne	1b
+
+	/* Stick for only searching the primary group for now.		*/
+	/* At least for now, we use a very simple random castout scheme */
+	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
+	mftb	r11
+	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
+	ori	r11,r11,0x10
+
+	/* r10 currently points to an ste one past the group of interest */
+	/* make it point to the randomly selected entry			*/
+	subi	r10,r10,128
+	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
+
+	isync			/* mark the entry invalid		*/
+	ld	r11,0(r10)
+	rldicl	r11,r11,56,1	/* clear the valid bit */
+	rotldi	r11,r11,8
+	std	r11,0(r10)
+	sync
+
+	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
+	slbie	r11
+
+2:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
+	eieio
+
+	mfspr	r11,DAR		/* Get the new esid			*/
+	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
+	ori	r11,r11,0x90	/* Turn on valid and kp			*/
+	std	r11,0(r10)	/* Put new entry back into the stab	*/
+
+	sync
+
+	/* All done -- return from exception. */
+	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
+	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
+
+	andi.	r10,r12,MSR_RI
+	beq-	unrecov_slb
+
+	mtcrf	0x80,r9			/* restore CR */
+
+	mfmsr	r10
+	clrrdi	r10,r10,2
+	mtmsrd	r10,1
+
+	mtspr	SRR0,r11
+	mtspr	SRR1,r12
+	ld	r9,PACA_EXSLB+EX_R9(r13)
+	ld	r10,PACA_EXSLB+EX_R10(r13)
+	ld	r11,PACA_EXSLB+EX_R11(r13)
+	ld	r12,PACA_EXSLB+EX_R12(r13)
+	ld	r13,PACA_EXSLB+EX_R13(r13)
+	rfid
+	b	.	/* prevent speculative execution */
+
+/*
+ * r13 points to the PACA, r9 contains the saved CR,
+ * r11 and r12 contain the saved SRR0 and SRR1.
+ * r3 has the faulting address
+ * r9 - r13 are saved in paca->exslb.
+ * r3 is saved in paca->slb_r3
+ * We assume we aren't going to take any exceptions during this procedure.
+ */
+_GLOBAL(do_slb_miss)
+	mflr	r10
+
+	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
+	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
+
+	bl	.slb_allocate			/* handle it */
+
+	/* All done -- return from exception. */
+
+	ld	r10,PACA_EXSLB+EX_LR(r13)
+	ld	r3,PACA_EXSLB+EX_R3(r13)
+	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
+#ifdef CONFIG_PPC_ISERIES
+	ld	r11,PACALPPACA+LPPACASRR0(r13)	/* get SRR0 value */
+#endif /* CONFIG_PPC_ISERIES */
+
+	mtlr	r10
+
+	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
+	beq-	unrecov_slb
+
+.machine	push
+.machine	"power4"
+	mtcrf	0x80,r9
+	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
+.machine	pop
+
+#ifdef CONFIG_PPC_ISERIES
+	mtspr	SRR0,r11
+	mtspr	SRR1,r12
+#endif /* CONFIG_PPC_ISERIES */
+	ld	r9,PACA_EXSLB+EX_R9(r13)
+	ld	r10,PACA_EXSLB+EX_R10(r13)
+	ld	r11,PACA_EXSLB+EX_R11(r13)
+	ld	r12,PACA_EXSLB+EX_R12(r13)
+	ld	r13,PACA_EXSLB+EX_R13(r13)
+	rfid
+	b	.	/* prevent speculative execution */
+
+unrecov_slb:
+	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
+	DISABLE_INTS
+	bl	.save_nvgprs
+1:	addi	r3,r1,STACK_FRAME_OVERHEAD
+	bl	.unrecoverable_exception
+	b	1b
+
+/*
+ * Space for CPU0's segment table.
+ *
+ * On iSeries, the hypervisor must fill in at least one entry before
+ * we get control (with relocate on).  The address is give to the hv
+ * as a page number (see xLparMap in LparData.c), so this must be at a
+ * fixed address (the linker can't compute (u64)&initial_stab >>
+ * PAGE_SHIFT).
+ */
+	. = STAB0_PHYS_ADDR	/* 0x6000 */
+	.globl initial_stab
+initial_stab:
+	.space	4096
+
+/*
+ * Data area reserved for FWNMI option.
+ * This address (0x7000) is fixed by the RPA.
+ */
+	.= 0x7000
+	.globl fwnmi_data_area
+fwnmi_data_area:
+
+	/* iSeries does not use the FWNMI stuff, so it is safe to put
+	 * this here, even if we later allow kernels that will boot on
+	 * both pSeries and iSeries */
+#ifdef CONFIG_PPC_ISERIES
+        . = LPARMAP_PHYS
+#include "lparmap.s"
+/*
+ * This ".text" is here for old compilers that generate a trailing
+ * .note section when compiling .c files to .s
+ */
+	.text
+#endif /* CONFIG_PPC_ISERIES */
+
+        . = 0x8000
+
+/*
+ * On pSeries, secondary processors spin in the following code.
+ * At entry, r3 = this processor's number (physical cpu id)
+ */
+_GLOBAL(pSeries_secondary_smp_init)
+	mr	r24,r3
+	
+	/* turn on 64-bit mode */
+	bl	.enable_64b_mode
+	isync
+
+	/* Copy some CPU settings from CPU 0 */
+	bl	.__restore_cpu_setup
+
+	/* Set up a paca value for this processor. Since we have the
+	 * physical cpu id in r24, we need to search the pacas to find
+	 * which logical id maps to our physical one.
+	 */
+	LOADADDR(r13, paca) 		/* Get base vaddr of paca array	 */
+	li	r5,0			/* logical cpu id                */
+1:	lhz	r6,PACAHWCPUID(r13)	/* Load HW procid from paca      */
+	cmpw	r6,r24			/* Compare to our id             */
+	beq	2f
+	addi	r13,r13,PACA_SIZE	/* Loop to next PACA on miss     */
+	addi	r5,r5,1
+	cmpwi	r5,NR_CPUS
+	blt	1b
+
+	mr	r3,r24			/* not found, copy phys to r3	 */
+	b	.kexec_wait		/* next kernel might do better	 */
+
+2:	mtspr	SPRG3,r13		/* Save vaddr of paca in SPRG3	 */
+	/* From now on, r24 is expected to be logical cpuid */
+	mr	r24,r5
+3:	HMT_LOW
+	lbz	r23,PACAPROCSTART(r13)	/* Test if this processor should */
+					/* start.			 */
+	sync
+
+	/* Create a temp kernel stack for use before relocation is on.	*/
+	ld	r1,PACAEMERGSP(r13)
+	subi	r1,r1,STACK_FRAME_OVERHEAD
+
+	cmpwi	0,r23,0
+#ifdef CONFIG_SMP
+	bne	.__secondary_start
+#endif
+	b 	3b			/* Loop until told to go	 */
+
+#ifdef CONFIG_PPC_ISERIES
+_STATIC(__start_initialization_iSeries)
+	/* Clear out the BSS */
+	LOADADDR(r11,__bss_stop)
+	LOADADDR(r8,__bss_start)
+	sub	r11,r11,r8		/* bss size			*/
+	addi	r11,r11,7		/* round up to an even double word */
+	rldicl. r11,r11,61,3		/* shift right by 3		*/
+	beq	4f
+	addi	r8,r8,-8
+	li	r0,0
+	mtctr	r11			/* zero this many doublewords	*/
+3:	stdu	r0,8(r8)
+	bdnz	3b
+4:
+	LOADADDR(r1,init_thread_union)
+	addi	r1,r1,THREAD_SIZE
+	li	r0,0
+	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
+
+	LOADADDR(r3,cpu_specs)
+	LOADADDR(r4,cur_cpu_spec)
+	li	r5,0
+	bl	.identify_cpu
+
+	LOADADDR(r2,__toc_start)
+	addi	r2,r2,0x4000
+	addi	r2,r2,0x4000
+
+	bl	.iSeries_early_setup
+
+	/* relocation is on at this point */
+
+	b	.start_here_common
+#endif /* CONFIG_PPC_ISERIES */
+
+#ifdef CONFIG_PPC_MULTIPLATFORM
+
+_STATIC(__mmu_off)
+	mfmsr	r3
+	andi.	r0,r3,MSR_IR|MSR_DR
+	beqlr
+	andc	r3,r3,r0
+	mtspr	SPRN_SRR0,r4
+	mtspr	SPRN_SRR1,r3
+	sync
+	rfid
+	b	.	/* prevent speculative execution */
+
+
+/*
+ * Here is our main kernel entry point. We support currently 2 kind of entries
+ * depending on the value of r5.
+ *
+ *   r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
+ *                 in r3...r7
+ *   
+ *   r5 == NULL -> kexec style entry. r3 is a physical pointer to the
+ *                 DT block, r4 is a physical pointer to the kernel itself
+ *
+ */
+_GLOBAL(__start_initialization_multiplatform)
+	/*
+	 * Are we booted from a PROM Of-type client-interface ?
+	 */
+	cmpldi	cr0,r5,0
+	bne	.__boot_from_prom		/* yes -> prom */
+
+	/* Save parameters */
+	mr	r31,r3
+	mr	r30,r4
+
+	/* Make sure we are running in 64 bits mode */
+	bl	.enable_64b_mode
+
+	/* Setup some critical 970 SPRs before switching MMU off */
+	bl	.__970_cpu_preinit
+
+	/* cpu # */
+	li	r24,0
+
+	/* Switch off MMU if not already */
+	LOADADDR(r4, .__after_prom_start - KERNELBASE)
+	add	r4,r4,r30
+	bl	.__mmu_off
+	b	.__after_prom_start
+
+_STATIC(__boot_from_prom)
+	/* Save parameters */
+	mr	r31,r3
+	mr	r30,r4
+	mr	r29,r5
+	mr	r28,r6
+	mr	r27,r7
+
+	/* Make sure we are running in 64 bits mode */
+	bl	.enable_64b_mode
+
+	/* put a relocation offset into r3 */
+	bl	.reloc_offset
+
+	LOADADDR(r2,__toc_start)
+	addi	r2,r2,0x4000
+	addi	r2,r2,0x4000
+
+	/* Relocate the TOC from a virt addr to a real addr */
+	sub	r2,r2,r3
+
+	/* Restore parameters */
+	mr	r3,r31
+	mr	r4,r30
+	mr	r5,r29
+	mr	r6,r28
+	mr	r7,r27
+
+	/* Do all of the interaction with OF client interface */
+	bl	.prom_init
+	/* We never return */
+	trap
+
+/*
+ * At this point, r3 contains the physical address we are running at,
+ * returned by prom_init()
+ */
+_STATIC(__after_prom_start)
+
+/*
+ * We need to run with __start at physical address 0.
+ * This will leave some code in the first 256B of
+ * real memory, which are reserved for software use.
+ * The remainder of the first page is loaded with the fixed
+ * interrupt vectors.  The next two pages are filled with
+ * unknown exception placeholders.
+ *
+ * Note: This process overwrites the OF exception vectors.
+ *	r26 == relocation offset
+ *	r27 == KERNELBASE
+ */
+	bl	.reloc_offset
+	mr	r26,r3
+	SET_REG_TO_CONST(r27,KERNELBASE)
+
+	li	r3,0			/* target addr */
+
+	// XXX FIXME: Use phys returned by OF (r30)
+	sub	r4,r27,r26 		/* source addr			 */
+					/* current address of _start	 */
+					/*   i.e. where we are running	 */
+					/*	the source addr		 */
+
+	LOADADDR(r5,copy_to_here)	/* # bytes of memory to copy	 */
+	sub	r5,r5,r27
+
+	li	r6,0x100		/* Start offset, the first 0x100 */
+					/* bytes were copied earlier.	 */
+
+	bl	.copy_and_flush		/* copy the first n bytes	 */
+					/* this includes the code being	 */
+					/* executed here.		 */
+
+	LOADADDR(r0, 4f)		/* Jump to the copy of this code */
+	mtctr	r0			/* that we just made/relocated	 */
+	bctr
+
+4:	LOADADDR(r5,klimit)
+	sub	r5,r5,r26
+	ld	r5,0(r5)		/* get the value of klimit */
+	sub	r5,r5,r27
+	bl	.copy_and_flush		/* copy the rest */
+	b	.start_here_multiplatform
+
+#endif /* CONFIG_PPC_MULTIPLATFORM */
+
+/*
+ * Copy routine used to copy the kernel to start at physical address 0
+ * and flush and invalidate the caches as needed.
+ * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
+ * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
+ *
+ * Note: this routine *only* clobbers r0, r6 and lr
+ */
+_GLOBAL(copy_and_flush)
+	addi	r5,r5,-8
+	addi	r6,r6,-8
+4:	li	r0,16			/* Use the least common		*/
+					/* denominator cache line	*/
+					/* size.  This results in	*/
+					/* extra cache line flushes	*/
+					/* but operation is correct.	*/
+					/* Can't get cache line size	*/
+					/* from NACA as it is being	*/
+					/* moved too.			*/
+
+	mtctr	r0			/* put # words/line in ctr	*/
+3:	addi	r6,r6,8			/* copy a cache line		*/
+	ldx	r0,r6,r4
+	stdx	r0,r6,r3
+	bdnz	3b
+	dcbst	r6,r3			/* write it to memory		*/
+	sync
+	icbi	r6,r3			/* flush the icache line	*/
+	cmpld	0,r6,r5
+	blt	4b
+	sync
+	addi	r5,r5,8
+	addi	r6,r6,8
+	blr
+
+.align 8
+copy_to_here:
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_PPC_PMAC
+/*
+ * On PowerMac, secondary processors starts from the reset vector, which
+ * is temporarily turned into a call to one of the functions below.
+ */
+	.section ".text";
+	.align 2 ;
+
+	.globl	pmac_secondary_start_1	
+pmac_secondary_start_1:	
+	li	r24, 1
+	b	.pmac_secondary_start
+	
+	.globl pmac_secondary_start_2
+pmac_secondary_start_2:	
+	li	r24, 2
+	b	.pmac_secondary_start
+	
+	.globl pmac_secondary_start_3
+pmac_secondary_start_3:
+	li	r24, 3
+	b	.pmac_secondary_start
+	
+_GLOBAL(pmac_secondary_start)
+	/* turn on 64-bit mode */
+	bl	.enable_64b_mode
+	isync
+
+	/* Copy some CPU settings from CPU 0 */
+	bl	.__restore_cpu_setup
+
+	/* pSeries do that early though I don't think we really need it */
+	mfmsr	r3
+	ori	r3,r3,MSR_RI
+	mtmsrd	r3			/* RI on */
+
+	/* Set up a paca value for this processor. */
+	LOADADDR(r4, paca) 		 /* Get base vaddr of paca array	*/
+	mulli	r13,r24,PACA_SIZE	 /* Calculate vaddr of right paca */
+	add	r13,r13,r4		/* for this processor.		*/
+	mtspr	SPRG3,r13		 /* Save vaddr of paca in SPRG3	*/
+
+	/* Create a temp kernel stack for use before relocation is on.	*/
+	ld	r1,PACAEMERGSP(r13)
+	subi	r1,r1,STACK_FRAME_OVERHEAD
+
+	b	.__secondary_start
+
+#endif /* CONFIG_PPC_PMAC */
+
+/*
+ * This function is called after the master CPU has released the
+ * secondary processors.  The execution environment is relocation off.
+ * The paca for this processor has the following fields initialized at
+ * this point:
+ *   1. Processor number
+ *   2. Segment table pointer (virtual address)
+ * On entry the following are set:
+ *   r1	= stack pointer.  vaddr for iSeries, raddr (temp stack) for pSeries
+ *   r24   = cpu# (in Linux terms)
+ *   r13   = paca virtual address
+ *   SPRG3 = paca virtual address
+ */
+_GLOBAL(__secondary_start)
+
+	HMT_MEDIUM			/* Set thread priority to MEDIUM */
+
+	ld	r2,PACATOC(r13)
+	li	r6,0
+	stb	r6,PACAPROCENABLED(r13)
+
+#ifndef CONFIG_PPC_ISERIES
+	/* Initialize the page table pointer register. */
+	LOADADDR(r6,_SDR1)
+	ld	r6,0(r6)		/* get the value of _SDR1	 */
+	mtspr	SDR1,r6			/* set the htab location	 */
+#endif
+	/* Initialize the first segment table (or SLB) entry		 */
+	ld	r3,PACASTABVIRT(r13)	/* get addr of segment table	 */
+	bl	.stab_initialize
+
+	/* Initialize the kernel stack.  Just a repeat for iSeries.	 */
+	LOADADDR(r3,current_set)
+	sldi	r28,r24,3		/* get current_set[cpu#]	 */
+	ldx	r1,r3,r28
+	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+	std	r1,PACAKSAVE(r13)
+
+	ld	r3,PACASTABREAL(r13)	/* get raddr of segment table	 */
+	ori	r4,r3,1			/* turn on valid bit		 */
+
+#ifdef CONFIG_PPC_ISERIES
+	li	r0,-1			/* hypervisor call */
+	li	r3,1
+	sldi	r3,r3,63		/* 0x8000000000000000 */
+	ori	r3,r3,4			/* 0x8000000000000004 */
+	sc				/* HvCall_setASR */
+#else
+	/* set the ASR */
+	ld	r3,systemcfg@got(r2)	/* r3 = ptr to systemcfg	 */
+	ld	r3,0(r3)
+	lwz	r3,PLATFORM(r3)		/* r3 = platform flags		 */
+	andi.	r3,r3,PLATFORM_LPAR	/* Test if bit 0 is set (LPAR bit) */
+	beq	98f			/* branch if result is 0  */
+	mfspr	r3,PVR
+	srwi	r3,r3,16
+	cmpwi	r3,0x37			/* SStar  */
+	beq	97f
+	cmpwi	r3,0x36			/* IStar  */
+	beq	97f
+	cmpwi	r3,0x34			/* Pulsar */
+	bne	98f
+97:	li	r3,H_SET_ASR		/* hcall = H_SET_ASR */
+	HVSC				/* Invoking hcall */
+	b	99f
+98:					/* !(rpa hypervisor) || !(star)  */
+	mtasr	r4			/* set the stab location	 */
+99:
+#endif
+	li	r7,0
+	mtlr	r7
+
+	/* enable MMU and jump to start_secondary */
+	LOADADDR(r3,.start_secondary_prolog)
+	SET_REG_TO_CONST(r4, MSR_KERNEL)
+#ifdef DO_SOFT_DISABLE
+	ori	r4,r4,MSR_EE
+#endif
+	mtspr	SRR0,r3
+	mtspr	SRR1,r4
+	rfid
+	b	.	/* prevent speculative execution */
+
+/* 
+ * Running with relocation on at this point.  All we want to do is
+ * zero the stack back-chain pointer before going into C code.
+ */
+_GLOBAL(start_secondary_prolog)
+	li	r3,0
+	std	r3,0(r1)		/* Zero the stack frame pointer	*/
+	bl	.start_secondary
+#endif
+
+/*
+ * This subroutine clobbers r11 and r12
+ */
+_GLOBAL(enable_64b_mode)
+	mfmsr	r11			/* grab the current MSR */
+	li	r12,1
+	rldicr	r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
+	or	r11,r11,r12
+	li	r12,1
+	rldicr	r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
+	or	r11,r11,r12
+	mtmsrd	r11
+	isync
+	blr
+
+#ifdef CONFIG_PPC_MULTIPLATFORM
+/*
+ * This is where the main kernel code starts.
+ */
+_STATIC(start_here_multiplatform)
+	/* get a new offset, now that the kernel has moved. */
+	bl	.reloc_offset
+	mr	r26,r3
+
+	/* Clear out the BSS. It may have been done in prom_init,
+	 * already but that's irrelevant since prom_init will soon
+	 * be detached from the kernel completely. Besides, we need
+	 * to clear it now for kexec-style entry.
+	 */
+	LOADADDR(r11,__bss_stop)
+	LOADADDR(r8,__bss_start)
+	sub	r11,r11,r8		/* bss size			*/
+	addi	r11,r11,7		/* round up to an even double word */
+	rldicl. r11,r11,61,3		/* shift right by 3		*/
+	beq	4f
+	addi	r8,r8,-8
+	li	r0,0
+	mtctr	r11			/* zero this many doublewords	*/
+3:	stdu	r0,8(r8)
+	bdnz	3b
+4:
+
+	mfmsr	r6
+	ori	r6,r6,MSR_RI
+	mtmsrd	r6			/* RI on */
+
+#ifdef CONFIG_HMT
+	/* Start up the second thread on cpu 0 */
+	mfspr	r3,PVR
+	srwi	r3,r3,16
+	cmpwi	r3,0x34			/* Pulsar  */
+	beq	90f
+	cmpwi	r3,0x36			/* Icestar */
+	beq	90f
+	cmpwi	r3,0x37			/* SStar   */
+	beq	90f
+	b	91f			/* HMT not supported */
+90:	li	r3,0
+	bl	.hmt_start_secondary
+91:
+#endif
+
+	/* The following gets the stack and TOC set up with the regs */
+	/* pointing to the real addr of the kernel stack.  This is   */
+	/* all done to support the C function call below which sets  */
+	/* up the htab.  This is done because we have relocated the  */
+	/* kernel but are still running in real mode. */
+
+	LOADADDR(r3,init_thread_union)
+	sub	r3,r3,r26
+
+	/* set up a stack pointer (physical address) */
+	addi	r1,r3,THREAD_SIZE
+	li	r0,0
+	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
+
+	/* set up the TOC (physical address) */
+	LOADADDR(r2,__toc_start)
+	addi	r2,r2,0x4000
+	addi	r2,r2,0x4000
+	sub	r2,r2,r26
+
+	LOADADDR(r3,cpu_specs)
+	sub	r3,r3,r26
+	LOADADDR(r4,cur_cpu_spec)
+	sub	r4,r4,r26
+	mr	r5,r26
+	bl	.identify_cpu
+
+	/* Save some low level config HIDs of CPU0 to be copied to
+	 * other CPUs later on, or used for suspend/resume
+	 */
+	bl	.__save_cpu_setup
+	sync
+
+	/* Setup a valid physical PACA pointer in SPRG3 for early_setup
+	 * note that boot_cpuid can always be 0 nowadays since there is
+	 * nowhere it can be initialized differently before we reach this
+	 * code
+	 */
+	LOADADDR(r27, boot_cpuid)
+	sub	r27,r27,r26
+	lwz	r27,0(r27)
+
+	LOADADDR(r24, paca) 		/* Get base vaddr of paca array	 */
+	mulli	r13,r27,PACA_SIZE	/* Calculate vaddr of right paca */
+	add	r13,r13,r24		/* for this processor.		 */
+	sub	r13,r13,r26		/* convert to physical addr	 */
+	mtspr	SPRG3,r13		/* PPPBBB: Temp... -Peter */
+	
+	/* Do very early kernel initializations, including initial hash table,
+	 * stab and slb setup before we turn on relocation.	*/
+
+	/* Restore parameters passed from prom_init/kexec */
+	mr	r3,r31
+ 	bl	.early_setup
+
+	/* set the ASR */
+	ld	r3,PACASTABREAL(r13)
+	ori	r4,r3,1			/* turn on valid bit		 */
+	ld	r3,systemcfg@got(r2)	/* r3 = ptr to systemcfg */
+	ld	r3,0(r3)
+	lwz	r3,PLATFORM(r3)		/* r3 = platform flags */
+	andi.	r3,r3,PLATFORM_LPAR	/* Test if bit 0 is set (LPAR bit) */
+	beq	98f			/* branch if result is 0  */
+	mfspr	r3,PVR
+	srwi	r3,r3,16
+	cmpwi	r3,0x37			/* SStar */
+	beq	97f
+	cmpwi	r3,0x36			/* IStar  */
+	beq	97f
+	cmpwi	r3,0x34			/* Pulsar */
+	bne	98f
+97:	li	r3,H_SET_ASR		/* hcall = H_SET_ASR */
+	HVSC				/* Invoking hcall */
+	b	99f
+98:					/* !(rpa hypervisor) || !(star) */
+	mtasr	r4			/* set the stab location	*/
+99:
+	/* Set SDR1 (hash table pointer) */
+	ld	r3,systemcfg@got(r2)	/* r3 = ptr to systemcfg */
+	ld	r3,0(r3)
+	lwz	r3,PLATFORM(r3)		/* r3 = platform flags */
+	/* Test if bit 0 is set (LPAR bit) */
+	andi.	r3,r3,PLATFORM_LPAR
+	bne	98f			/* branch if result is !0  */
+	LOADADDR(r6,_SDR1)		/* Only if NOT LPAR */
+	sub	r6,r6,r26
+	ld	r6,0(r6)		/* get the value of _SDR1 */
+	mtspr	SDR1,r6			/* set the htab location  */
+98: 
+	LOADADDR(r3,.start_here_common)
+	SET_REG_TO_CONST(r4, MSR_KERNEL)
+	mtspr	SRR0,r3
+	mtspr	SRR1,r4
+	rfid
+	b	.	/* prevent speculative execution */
+#endif /* CONFIG_PPC_MULTIPLATFORM */
+	
+	/* This is where all platforms converge execution */
+_STATIC(start_here_common)
+	/* relocation is on at this point */
+
+	/* The following code sets up the SP and TOC now that we are */
+	/* running with translation enabled. */
+
+	LOADADDR(r3,init_thread_union)
+
+	/* set up the stack */
+	addi	r1,r3,THREAD_SIZE
+	li	r0,0
+	stdu	r0,-STACK_FRAME_OVERHEAD(r1)
+
+	/* Apply the CPUs-specific fixups (nop out sections not relevant
+	 * to this CPU
+	 */
+	li	r3,0
+	bl	.do_cpu_ftr_fixups
+
+	LOADADDR(r26, boot_cpuid)
+	lwz	r26,0(r26)
+
+	LOADADDR(r24, paca) 		/* Get base vaddr of paca array  */
+	mulli	r13,r26,PACA_SIZE	/* Calculate vaddr of right paca */
+	add	r13,r13,r24		/* for this processor.		 */
+	mtspr	SPRG3,r13
+
+	/* ptr to current */
+	LOADADDR(r4,init_task)
+	std	r4,PACACURRENT(r13)
+
+	/* Load the TOC */
+	ld	r2,PACATOC(r13)
+	std	r1,PACAKSAVE(r13)
+
+	bl	.setup_system
+
+	/* Load up the kernel context */
+5:
+#ifdef DO_SOFT_DISABLE
+	li	r5,0
+	stb	r5,PACAPROCENABLED(r13)	/* Soft Disabled */
+	mfmsr	r5
+	ori	r5,r5,MSR_EE		/* Hard Enabled */
+	mtmsrd	r5
+#endif
+
+	bl .start_kernel
+
+_GLOBAL(hmt_init)
+#ifdef CONFIG_HMT
+	LOADADDR(r5, hmt_thread_data)
+	mfspr	r7,PVR
+	srwi	r7,r7,16
+	cmpwi	r7,0x34			/* Pulsar  */
+	beq	90f
+	cmpwi	r7,0x36			/* Icestar */
+	beq	91f
+	cmpwi	r7,0x37			/* SStar   */
+	beq	91f
+	b	101f
+90:	mfspr	r6,PIR
+	andi.	r6,r6,0x1f
+	b	92f
+91:	mfspr	r6,PIR
+	andi.	r6,r6,0x3ff
+92:	sldi	r4,r24,3
+	stwx	r6,r5,r4
+	bl	.hmt_start_secondary
+	b	101f
+
+__hmt_secondary_hold:
+	LOADADDR(r5, hmt_thread_data)
+	clrldi	r5,r5,4
+	li	r7,0
+	mfspr	r6,PIR
+	mfspr	r8,PVR
+	srwi	r8,r8,16
+	cmpwi	r8,0x34
+	bne	93f
+	andi.	r6,r6,0x1f
+	b	103f
+93:	andi.	r6,r6,0x3f
+
+103:	lwzx	r8,r5,r7
+	cmpw	r8,r6
+	beq	104f
+	addi	r7,r7,8
+	b	103b
+
+104:	addi	r7,r7,4
+	lwzx	r9,r5,r7
+	mr	r24,r9
+101:
+#endif
+	mr	r3,r24
+	b	.pSeries_secondary_smp_init
+
+#ifdef CONFIG_HMT
+_GLOBAL(hmt_start_secondary)
+	LOADADDR(r4,__hmt_secondary_hold)
+	clrldi	r4,r4,4
+	mtspr	NIADORM, r4
+	mfspr	r4, MSRDORM
+	li	r5, -65
+	and	r4, r4, r5
+	mtspr	MSRDORM, r4
+	lis	r4,0xffef
+	ori	r4,r4,0x7403
+	mtspr	TSC, r4
+	li	r4,0x1f4
+	mtspr	TST, r4
+	mfspr	r4, HID0
+	ori	r4, r4, 0x1
+	mtspr	HID0, r4
+	mfspr	r4, SPRN_CTRLF
+	oris	r4, r4, 0x40
+	mtspr	SPRN_CTRLT, r4
+	blr
+#endif
+
+#if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES))
+_GLOBAL(smp_release_cpus)
+	/* All secondary cpus are spinning on a common
+	 * spinloop, release them all now so they can start
+	 * to spin on their individual paca spinloops.
+	 * For non SMP kernels, the secondary cpus never
+	 * get out of the common spinloop.
+	 */
+	li	r3,1
+	LOADADDR(r5,__secondary_hold_spinloop)
+	std	r3,0(r5)
+	sync
+	blr
+#endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */
+
+
+/*
+ * We put a few things here that have to be page-aligned.
+ * This stuff goes at the beginning of the bss, which is page-aligned.
+ */
+	.section ".bss"
+
+	.align	PAGE_SHIFT
+
+	.globl	empty_zero_page
+empty_zero_page:
+	.space	PAGE_SIZE
+
+	.globl	swapper_pg_dir
+swapper_pg_dir:
+	.space	PAGE_SIZE
+
+/*
+ * This space gets a copy of optional info passed to us by the bootstrap
+ * Used to pass parameters into the kernel like root=/dev/sda1, etc.
+ */
+	.globl	cmd_line
+cmd_line:
+	.space	COMMAND_LINE_SIZE
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
new file mode 100644
index 0000000..cb1a3a5
--- /dev/null
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -0,0 +1,860 @@
+/*
+ *  arch/ppc/kernel/except_8xx.S
+ *
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
+ *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
+ *  Low-level exception handlers and MMU support
+ *  rewritten by Paul Mackerras.
+ *    Copyright (C) 1996 Paul Mackerras.
+ *  MPC8xx modifications by Dan Malek
+ *    Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
+ *
+ *  This file contains low-level support and setup for PowerPC 8xx
+ *  embedded processors, including trap and interrupt dispatch.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/cache.h>
+#include <asm/pgtable.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+/* Macro to make the code more readable. */
+#ifdef CONFIG_8xx_CPU6
+#define DO_8xx_CPU6(val, reg)	\
+	li	reg, val;	\
+	stw	reg, 12(r0);	\
+	lwz	reg, 12(r0);
+#else
+#define DO_8xx_CPU6(val, reg)
+#endif
+	.text
+	.globl	_stext
+_stext:
+	.text
+	.globl	_start
+_start:
+
+/* MPC8xx
+ * This port was done on an MBX board with an 860.  Right now I only
+ * support an ELF compressed (zImage) boot from EPPC-Bug because the
+ * code there loads up some registers before calling us:
+ *   r3: ptr to board info data
+ *   r4: initrd_start or if no initrd then 0
+ *   r5: initrd_end - unused if r4 is 0
+ *   r6: Start of command line string
+ *   r7: End of command line string
+ *
+ * I decided to use conditional compilation instead of checking PVR and
+ * adding more processor specific branches around code I don't need.
+ * Since this is an embedded processor, I also appreciate any memory
+ * savings I can get.
+ *
+ * The MPC8xx does not have any BATs, but it supports large page sizes.
+ * We first initialize the MMU to support 8M byte pages, then load one
+ * entry into each of the instruction and data TLBs to map the first
+ * 8M 1:1.  I also mapped an additional I/O space 1:1 so we can get to
+ * the "internal" processor registers before MMU_init is called.
+ *
+ * The TLB code currently contains a major hack.  Since I use the condition
+ * code register, I have to save and restore it.  I am out of registers, so
+ * I just store it in memory location 0 (the TLB handlers are not reentrant).
+ * To avoid making any decisions, I need to use the "segment" valid bit
+ * in the first level table, but that would require many changes to the
+ * Linux page directory/table functions that I don't want to do right now.
+ *
+ * I used to use SPRG2 for a temporary register in the TLB handler, but it
+ * has since been put to other uses.  I now use a hack to save a register
+ * and the CCR at memory location 0.....Someday I'll fix this.....
+ *	-- Dan
+ */
+	.globl	__start
+__start:
+	mr	r31,r3			/* save parameters */
+	mr	r30,r4
+	mr	r29,r5
+	mr	r28,r6
+	mr	r27,r7
+
+	/* We have to turn on the MMU right away so we get cache modes
+	 * set correctly.
+	 */
+	bl	initial_mmu
+
+/* We now have the lower 8 Meg mapped into TLB entries, and the caches
+ * ready to work.
+ */
+
+turn_on_mmu:
+	mfmsr	r0
+	ori	r0,r0,MSR_DR|MSR_IR
+	mtspr	SPRN_SRR1,r0
+	lis	r0,start_here@h
+	ori	r0,r0,start_here@l
+	mtspr	SPRN_SRR0,r0
+	SYNC
+	rfi				/* enables MMU */
+
+/*
+ * Exception entry code.  This code runs with address translation
+ * turned off, i.e. using physical addresses.
+ * We assume sprg3 has the physical address of the current
+ * task's thread_struct.
+ */
+#define EXCEPTION_PROLOG	\
+	mtspr	SPRN_SPRG0,r10;	\
+	mtspr	SPRN_SPRG1,r11;	\
+	mfcr	r10;		\
+	EXCEPTION_PROLOG_1;	\
+	EXCEPTION_PROLOG_2
+
+#define EXCEPTION_PROLOG_1	\
+	mfspr	r11,SPRN_SRR1;		/* check whether user or kernel */ \
+	andi.	r11,r11,MSR_PR;	\
+	tophys(r11,r1);			/* use tophys(r1) if kernel */ \
+	beq	1f;		\
+	mfspr	r11,SPRN_SPRG3;	\
+	lwz	r11,THREAD_INFO-THREAD(r11);	\
+	addi	r11,r11,THREAD_SIZE;	\
+	tophys(r11,r11);	\
+1:	subi	r11,r11,INT_FRAME_SIZE	/* alloc exc. frame */
+
+
+#define EXCEPTION_PROLOG_2	\
+	CLR_TOP32(r11);		\
+	stw	r10,_CCR(r11);		/* save registers */ \
+	stw	r12,GPR12(r11);	\
+	stw	r9,GPR9(r11);	\
+	mfspr	r10,SPRN_SPRG0;	\
+	stw	r10,GPR10(r11);	\
+	mfspr	r12,SPRN_SPRG1;	\
+	stw	r12,GPR11(r11);	\
+	mflr	r10;		\
+	stw	r10,_LINK(r11);	\
+	mfspr	r12,SPRN_SRR0;	\
+	mfspr	r9,SPRN_SRR1;	\
+	stw	r1,GPR1(r11);	\
+	stw	r1,0(r11);	\
+	tovirt(r1,r11);			/* set new kernel sp */	\
+	li	r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
+	MTMSRD(r10);			/* (except for mach check in rtas) */ \
+	stw	r0,GPR0(r11);	\
+	SAVE_4GPRS(3, r11);	\
+	SAVE_2GPRS(7, r11)
+
+/*
+ * Note: code which follows this uses cr0.eq (set if from kernel),
+ * r11, r12 (SRR0), and r9 (SRR1).
+ *
+ * Note2: once we have set r1 we are in a position to take exceptions
+ * again, and we could thus set MSR:RI at that point.
+ */
+
+/*
+ * Exception vectors.
+ */
+#define EXCEPTION(n, label, hdlr, xfer)		\
+	. = n;					\
+label:						\
+	EXCEPTION_PROLOG;			\
+	addi	r3,r1,STACK_FRAME_OVERHEAD;	\
+	xfer(n, hdlr)
+
+#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret)	\
+	li	r10,trap;					\
+	stw	r10,TRAP(r11);					\
+	li	r10,MSR_KERNEL;					\
+	copyee(r10, r9);					\
+	bl	tfer;						\
+i##n:								\
+	.long	hdlr;						\
+	.long	ret
+
+#define COPY_EE(d, s)		rlwimi d,s,0,16,16
+#define NOCOPY(d, s)
+
+#define EXC_XFER_STD(n, hdlr)		\
+	EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full,	\
+			  ret_from_except_full)
+
+#define EXC_XFER_LITE(n, hdlr)		\
+	EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
+			  ret_from_except)
+
+#define EXC_XFER_EE(n, hdlr)		\
+	EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
+			  ret_from_except_full)
+
+#define EXC_XFER_EE_LITE(n, hdlr)	\
+	EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
+			  ret_from_except)
+
+/* System reset */
+	EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD)
+
+/* Machine check */
+	. = 0x200
+MachineCheck:
+	EXCEPTION_PROLOG
+	mfspr r4,SPRN_DAR
+	stw r4,_DAR(r11)
+	mfspr r5,SPRN_DSISR
+	stw r5,_DSISR(r11)
+	addi r3,r1,STACK_FRAME_OVERHEAD
+	EXC_XFER_STD(0x200, MachineCheckException)
+
+/* Data access exception.
+ * This is "never generated" by the MPC8xx.  We jump to it for other
+ * translation errors.
+ */
+	. = 0x300
+DataAccess:
+	EXCEPTION_PROLOG
+	mfspr	r10,SPRN_DSISR
+	stw	r10,_DSISR(r11)
+	mr	r5,r10
+	mfspr	r4,SPRN_DAR
+	EXC_XFER_EE_LITE(0x300, handle_page_fault)
+
+/* Instruction access exception.
+ * This is "never generated" by the MPC8xx.  We jump to it for other
+ * translation errors.
+ */
+	. = 0x400
+InstructionAccess:
+	EXCEPTION_PROLOG
+	mr	r4,r12
+	mr	r5,r9
+	EXC_XFER_EE_LITE(0x400, handle_page_fault)
+
+/* External interrupt */
+	EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
+
+/* Alignment exception */
+	. = 0x600
+Alignment:
+	EXCEPTION_PROLOG
+	mfspr	r4,SPRN_DAR
+	stw	r4,_DAR(r11)
+	mfspr	r5,SPRN_DSISR
+	stw	r5,_DSISR(r11)
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	EXC_XFER_EE(0x600, AlignmentException)
+
+/* Program check exception */
+	EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD)
+
+/* No FPU on MPC8xx.  This exception is not supposed to happen.
+*/
+	EXCEPTION(0x800, FPUnavailable, UnknownException, EXC_XFER_STD)
+
+/* Decrementer */
+	EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
+
+	EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE)
+
+/* System call */
+	. = 0xc00
+SystemCall:
+	EXCEPTION_PROLOG
+	EXC_XFER_EE_LITE(0xc00, DoSyscall)
+
+/* Single step - not used on 601 */
+	EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD)
+	EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0xf00, Trap_0f, UnknownException, EXC_XFER_EE)
+
+/* On the MPC8xx, this is a software emulation interrupt.  It occurs
+ * for all unimplemented and illegal instructions.
+ */
+	EXCEPTION(0x1000, SoftEmu, SoftwareEmulation, EXC_XFER_STD)
+
+	. = 0x1100
+/*
+ * For the MPC8xx, this is a software tablewalk to load the instruction
+ * TLB.  It is modelled after the example in the Motorola manual.  The task
+ * switch loads the M_TWB register with the pointer to the first level table.
+ * If we discover there is no second level table (value is zero) or if there
+ * is an invalid pte, we load that into the TLB, which causes another fault
+ * into the TLB Error interrupt where we can handle such problems.
+ * We have to use the MD_xxx registers for the tablewalk because the
+ * equivalent MI_xxx registers only perform the attribute functions.
+ */
+InstructionTLBMiss:
+#ifdef CONFIG_8xx_CPU6
+	stw	r3, 8(r0)
+#endif
+	DO_8xx_CPU6(0x3f80, r3)
+	mtspr	SPRN_M_TW, r10	/* Save a couple of working registers */
+	mfcr	r10
+	stw	r10, 0(r0)
+	stw	r11, 4(r0)
+	mfspr	r10, SPRN_SRR0	/* Get effective address of fault */
+	DO_8xx_CPU6(0x3780, r3)
+	mtspr	SPRN_MD_EPN, r10	/* Have to use MD_EPN for walk, MI_EPN can't */
+	mfspr	r10, SPRN_M_TWB	/* Get level 1 table entry address */
+
+	/* If we are faulting a kernel address, we have to use the
+	 * kernel page tables.
+	 */
+	andi.	r11, r10, 0x0800	/* Address >= 0x80000000 */
+	beq	3f
+	lis	r11, swapper_pg_dir@h
+	ori	r11, r11, swapper_pg_dir@l
+	rlwimi	r10, r11, 0, 2, 19
+3:
+	lwz	r11, 0(r10)	/* Get the level 1 entry */
+	rlwinm.	r10, r11,0,0,19	/* Extract page descriptor page address */
+	beq	2f		/* If zero, don't try to find a pte */
+
+	/* We have a pte table, so load the MI_TWC with the attributes
+	 * for this "segment."
+	 */
+	ori	r11,r11,1		/* Set valid bit */
+	DO_8xx_CPU6(0x2b80, r3)
+	mtspr	SPRN_MI_TWC, r11	/* Set segment attributes */
+	DO_8xx_CPU6(0x3b80, r3)
+	mtspr	SPRN_MD_TWC, r11	/* Load pte table base address */
+	mfspr	r11, SPRN_MD_TWC	/* ....and get the pte address */
+	lwz	r10, 0(r11)	/* Get the pte */
+
+	ori	r10, r10, _PAGE_ACCESSED
+	stw	r10, 0(r11)
+
+	/* The Linux PTE won't go exactly into the MMU TLB.
+	 * Software indicator bits 21, 22 and 28 must be clear.
+	 * Software indicator bits 24, 25, 26, and 27 must be
+	 * set.  All other Linux PTE bits control the behavior
+	 * of the MMU.
+	 */
+2:	li	r11, 0x00f0
+	rlwimi	r10, r11, 0, 24, 28	/* Set 24-27, clear 28 */
+	DO_8xx_CPU6(0x2d80, r3)
+	mtspr	SPRN_MI_RPN, r10	/* Update TLB entry */
+
+	mfspr	r10, SPRN_M_TW	/* Restore registers */
+	lwz	r11, 0(r0)
+	mtcr	r11
+	lwz	r11, 4(r0)
+#ifdef CONFIG_8xx_CPU6
+	lwz	r3, 8(r0)
+#endif
+	rfi
+
+	. = 0x1200
+DataStoreTLBMiss:
+#ifdef CONFIG_8xx_CPU6
+	stw	r3, 8(r0)
+#endif
+	DO_8xx_CPU6(0x3f80, r3)
+	mtspr	SPRN_M_TW, r10	/* Save a couple of working registers */
+	mfcr	r10
+	stw	r10, 0(r0)
+	stw	r11, 4(r0)
+	mfspr	r10, SPRN_M_TWB	/* Get level 1 table entry address */
+
+	/* If we are faulting a kernel address, we have to use the
+	 * kernel page tables.
+	 */
+	andi.	r11, r10, 0x0800
+	beq	3f
+	lis	r11, swapper_pg_dir@h
+	ori	r11, r11, swapper_pg_dir@l
+	rlwimi	r10, r11, 0, 2, 19
+3:
+	lwz	r11, 0(r10)	/* Get the level 1 entry */
+	rlwinm.	r10, r11,0,0,19	/* Extract page descriptor page address */
+	beq	2f		/* If zero, don't try to find a pte */
+
+	/* We have a pte table, so load fetch the pte from the table.
+	 */
+	ori	r11, r11, 1	/* Set valid bit in physical L2 page */
+	DO_8xx_CPU6(0x3b80, r3)
+	mtspr	SPRN_MD_TWC, r11	/* Load pte table base address */
+	mfspr	r10, SPRN_MD_TWC	/* ....and get the pte address */
+	lwz	r10, 0(r10)	/* Get the pte */
+
+	/* Insert the Guarded flag into the TWC from the Linux PTE.
+	 * It is bit 27 of both the Linux PTE and the TWC (at least
+	 * I got that right :-).  It will be better when we can put
+	 * this into the Linux pgd/pmd and load it in the operation
+	 * above.
+	 */
+	rlwimi	r11, r10, 0, 27, 27
+	DO_8xx_CPU6(0x3b80, r3)
+	mtspr	SPRN_MD_TWC, r11
+
+	mfspr	r11, SPRN_MD_TWC	/* get the pte address again */
+	ori	r10, r10, _PAGE_ACCESSED
+	stw	r10, 0(r11)
+
+	/* The Linux PTE won't go exactly into the MMU TLB.
+	 * Software indicator bits 21, 22 and 28 must be clear.
+	 * Software indicator bits 24, 25, 26, and 27 must be
+	 * set.  All other Linux PTE bits control the behavior
+	 * of the MMU.
+	 */
+2:	li	r11, 0x00f0
+	rlwimi	r10, r11, 0, 24, 28	/* Set 24-27, clear 28 */
+	DO_8xx_CPU6(0x3d80, r3)
+	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */
+
+	mfspr	r10, SPRN_M_TW	/* Restore registers */
+	lwz	r11, 0(r0)
+	mtcr	r11
+	lwz	r11, 4(r0)
+#ifdef CONFIG_8xx_CPU6
+	lwz	r3, 8(r0)
+#endif
+	rfi
+
+/* This is an instruction TLB error on the MPC8xx.  This could be due
+ * to many reasons, such as executing guarded memory or illegal instruction
+ * addresses.  There is nothing to do but handle a big time error fault.
+ */
+	. = 0x1300
+InstructionTLBError:
+	b	InstructionAccess
+
+/* This is the data TLB error on the MPC8xx.  This could be due to
+ * many reasons, including a dirty update to a pte.  We can catch that
+ * one here, but anything else is an error.  First, we track down the
+ * Linux pte.  If it is valid, write access is allowed, but the
+ * page dirty bit is not set, we will set it and reload the TLB.  For
+ * any other case, we bail out to a higher level function that can
+ * handle it.
+ */
+	. = 0x1400
+DataTLBError:
+#ifdef CONFIG_8xx_CPU6
+	stw	r3, 8(r0)
+#endif
+	DO_8xx_CPU6(0x3f80, r3)
+	mtspr	SPRN_M_TW, r10	/* Save a couple of working registers */
+	mfcr	r10
+	stw	r10, 0(r0)
+	stw	r11, 4(r0)
+
+	/* First, make sure this was a store operation.
+	*/
+	mfspr	r10, SPRN_DSISR
+	andis.	r11, r10, 0x0200	/* If set, indicates store op */
+	beq	2f
+
+	/* The EA of a data TLB miss is automatically stored in the MD_EPN
+	 * register.  The EA of a data TLB error is automatically stored in
+	 * the DAR, but not the MD_EPN register.  We must copy the 20 most
+	 * significant bits of the EA from the DAR to MD_EPN before we
+	 * start walking the page tables.  We also need to copy the CASID
+	 * value from the M_CASID register.
+	 * Addendum:  The EA of a data TLB error is _supposed_ to be stored
+	 * in DAR, but it seems that this doesn't happen in some cases, such
+	 * as when the error is due to a dcbi instruction to a page with a
+	 * TLB that doesn't have the changed bit set.  In such cases, there
+	 * does not appear to be any way  to recover the EA of the error
+	 * since it is neither in DAR nor MD_EPN.  As a workaround, the
+	 * _PAGE_HWWRITE bit is set for all kernel data pages when the PTEs
+	 * are initialized in mapin_ram().  This will avoid the problem,
+	 * assuming we only use the dcbi instruction on kernel addresses.
+	 */
+	mfspr	r10, SPRN_DAR
+	rlwinm	r11, r10, 0, 0, 19
+	ori	r11, r11, MD_EVALID
+	mfspr	r10, SPRN_M_CASID
+	rlwimi	r11, r10, 0, 28, 31
+	DO_8xx_CPU6(0x3780, r3)
+	mtspr	SPRN_MD_EPN, r11
+
+	mfspr	r10, SPRN_M_TWB	/* Get level 1 table entry address */
+
+	/* If we are faulting a kernel address, we have to use the
+	 * kernel page tables.
+	 */
+	andi.	r11, r10, 0x0800
+	beq	3f
+	lis	r11, swapper_pg_dir@h
+	ori	r11, r11, swapper_pg_dir@l
+	rlwimi	r10, r11, 0, 2, 19
+3:
+	lwz	r11, 0(r10)	/* Get the level 1 entry */
+	rlwinm.	r10, r11,0,0,19	/* Extract page descriptor page address */
+	beq	2f		/* If zero, bail */
+
+	/* We have a pte table, so fetch the pte from the table.
+	 */
+	ori	r11, r11, 1		/* Set valid bit in physical L2 page */
+	DO_8xx_CPU6(0x3b80, r3)
+	mtspr	SPRN_MD_TWC, r11		/* Load pte table base address */
+	mfspr	r11, SPRN_MD_TWC		/* ....and get the pte address */
+	lwz	r10, 0(r11)		/* Get the pte */
+
+	andi.	r11, r10, _PAGE_RW	/* Is it writeable? */
+	beq	2f			/* Bail out if not */
+
+	/* Update 'changed', among others.
+	*/
+	ori	r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
+	mfspr	r11, SPRN_MD_TWC		/* Get pte address again */
+	stw	r10, 0(r11)		/* and update pte in table */
+
+	/* The Linux PTE won't go exactly into the MMU TLB.
+	 * Software indicator bits 21, 22 and 28 must be clear.
+	 * Software indicator bits 24, 25, 26, and 27 must be
+	 * set.  All other Linux PTE bits control the behavior
+	 * of the MMU.
+	 */
+	li	r11, 0x00f0
+	rlwimi	r10, r11, 0, 24, 28	/* Set 24-27, clear 28 */
+	DO_8xx_CPU6(0x3d80, r3)
+	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */
+
+	mfspr	r10, SPRN_M_TW	/* Restore registers */
+	lwz	r11, 0(r0)
+	mtcr	r11
+	lwz	r11, 4(r0)
+#ifdef CONFIG_8xx_CPU6
+	lwz	r3, 8(r0)
+#endif
+	rfi
+2:
+	mfspr	r10, SPRN_M_TW	/* Restore registers */
+	lwz	r11, 0(r0)
+	mtcr	r11
+	lwz	r11, 4(r0)
+#ifdef CONFIG_8xx_CPU6
+	lwz	r3, 8(r0)
+#endif
+	b	DataAccess
+
+	EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE)
+
+/* On the MPC8xx, these next four traps are used for development
+ * support of breakpoints and such.  Someday I will get around to
+ * using them.
+ */
+	EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE)
+	EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE)
+
+	. = 0x2000
+
+	.globl	giveup_fpu
+giveup_fpu:
+	blr
+
+/*
+ * This is where the main kernel code starts.
+ */
+start_here:
+	/* ptr to current */
+	lis	r2,init_task@h
+	ori	r2,r2,init_task@l
+
+	/* ptr to phys current thread */
+	tophys(r4,r2)
+	addi	r4,r4,THREAD	/* init task's THREAD */
+	mtspr	SPRN_SPRG3,r4
+	li	r3,0
+	mtspr	SPRN_SPRG2,r3	/* 0 => r1 has kernel sp */
+
+	/* stack */
+	lis	r1,init_thread_union@ha
+	addi	r1,r1,init_thread_union@l
+	li	r0,0
+	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
+
+	bl	early_init	/* We have to do this with MMU on */
+
+/*
+ * Decide what sort of machine this is and initialize the MMU.
+ */
+	mr	r3,r31
+	mr	r4,r30
+	mr	r5,r29
+	mr	r6,r28
+	mr	r7,r27
+	bl	machine_init
+	bl	MMU_init
+
+/*
+ * Go back to running unmapped so we can load up new values
+ * and change to using our exception vectors.
+ * On the 8xx, all we have to do is invalidate the TLB to clear
+ * the old 8M byte TLB mappings and load the page table base register.
+ */
+	/* The right way to do this would be to track it down through
+	 * init's THREAD like the context switch code does, but this is
+	 * easier......until someone changes init's static structures.
+	 */
+	lis	r6, swapper_pg_dir@h
+	ori	r6, r6, swapper_pg_dir@l
+	tophys(r6,r6)
+#ifdef CONFIG_8xx_CPU6
+	lis	r4, cpu6_errata_word@h
+	ori	r4, r4, cpu6_errata_word@l
+	li	r3, 0x3980
+	stw	r3, 12(r4)
+	lwz	r3, 12(r4)
+#endif
+	mtspr	SPRN_M_TWB, r6
+	lis	r4,2f@h
+	ori	r4,r4,2f@l
+	tophys(r4,r4)
+	li	r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
+	mtspr	SPRN_SRR0,r4
+	mtspr	SPRN_SRR1,r3
+	rfi
+/* Load up the kernel context */
+2:
+	SYNC			/* Force all PTE updates to finish */
+	tlbia			/* Clear all TLB entries */
+	sync			/* wait for tlbia/tlbie to finish */
+	TLBSYNC			/* ... on all CPUs */
+
+	/* set up the PTE pointers for the Abatron bdiGDB.
+	*/
+	tovirt(r6,r6)
+	lis	r5, abatron_pteptrs@h
+	ori	r5, r5, abatron_pteptrs@l
+	stw	r5, 0xf0(r0)	/* Must match your Abatron config file */
+	tophys(r5,r5)
+	stw	r6, 0(r5)
+
+/* Now turn on the MMU for real! */
+	li	r4,MSR_KERNEL
+	lis	r3,start_kernel@h
+	ori	r3,r3,start_kernel@l
+	mtspr	SPRN_SRR0,r3
+	mtspr	SPRN_SRR1,r4
+	rfi			/* enable MMU and jump to start_kernel */
+
+/* Set up the initial MMU state so we can do the first level of
+ * kernel initialization.  This maps the first 8 MBytes of memory 1:1
+ * virtual to physical.  Also, set the cache mode since that is defined
+ * by TLB entries and perform any additional mapping (like of the IMMR).
+ * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel,
+ * 24 Mbytes of data, and the 8M IMMR space.  Anything not covered by
+ * these mappings is mapped by page tables.
+ */
+initial_mmu:
+	tlbia			/* Invalidate all TLB entries */
+#ifdef CONFIG_PIN_TLB
+	lis	r8, MI_RSV4I@h
+	ori	r8, r8, 0x1c00
+#else
+	li	r8, 0
+#endif
+	mtspr	SPRN_MI_CTR, r8	/* Set instruction MMU control */
+
+#ifdef CONFIG_PIN_TLB
+	lis	r10, (MD_RSV4I | MD_RESETVAL)@h
+	ori	r10, r10, 0x1c00
+	mr	r8, r10
+#else
+	lis	r10, MD_RESETVAL@h
+#endif
+#ifndef CONFIG_8xx_COPYBACK
+	oris	r10, r10, MD_WTDEF@h
+#endif
+	mtspr	SPRN_MD_CTR, r10	/* Set data TLB control */
+
+	/* Now map the lower 8 Meg into the TLBs.  For this quick hack,
+	 * we can load the instruction and data TLB registers with the
+	 * same values.
+	 */
+	lis	r8, KERNELBASE@h	/* Create vaddr for TLB */
+	ori	r8, r8, MI_EVALID	/* Mark it valid */
+	mtspr	SPRN_MI_EPN, r8
+	mtspr	SPRN_MD_EPN, r8
+	li	r8, MI_PS8MEG		/* Set 8M byte page */
+	ori	r8, r8, MI_SVALID	/* Make it valid */
+	mtspr	SPRN_MI_TWC, r8
+	mtspr	SPRN_MD_TWC, r8
+	li	r8, MI_BOOTINIT		/* Create RPN for address 0 */
+	mtspr	SPRN_MI_RPN, r8		/* Store TLB entry */
+	mtspr	SPRN_MD_RPN, r8
+	lis	r8, MI_Kp@h		/* Set the protection mode */
+	mtspr	SPRN_MI_AP, r8
+	mtspr	SPRN_MD_AP, r8
+
+	/* Map another 8 MByte at the IMMR to get the processor
+	 * internal registers (among other things).
+	 */
+#ifdef CONFIG_PIN_TLB
+	addi	r10, r10, 0x0100
+	mtspr	SPRN_MD_CTR, r10
+#endif
+	mfspr	r9, 638			/* Get current IMMR */
+	andis.	r9, r9, 0xff80		/* Get 8Mbyte boundary */
+
+	mr	r8, r9			/* Create vaddr for TLB */
+	ori	r8, r8, MD_EVALID	/* Mark it valid */
+	mtspr	SPRN_MD_EPN, r8
+	li	r8, MD_PS8MEG		/* Set 8M byte page */
+	ori	r8, r8, MD_SVALID	/* Make it valid */
+	mtspr	SPRN_MD_TWC, r8
+	mr	r8, r9			/* Create paddr for TLB */
+	ori	r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
+	mtspr	SPRN_MD_RPN, r8
+
+#ifdef CONFIG_PIN_TLB
+	/* Map two more 8M kernel data pages.
+	*/
+	addi	r10, r10, 0x0100
+	mtspr	SPRN_MD_CTR, r10
+
+	lis	r8, KERNELBASE@h	/* Create vaddr for TLB */
+	addis	r8, r8, 0x0080		/* Add 8M */
+	ori	r8, r8, MI_EVALID	/* Mark it valid */
+	mtspr	SPRN_MD_EPN, r8
+	li	r9, MI_PS8MEG		/* Set 8M byte page */
+	ori	r9, r9, MI_SVALID	/* Make it valid */
+	mtspr	SPRN_MD_TWC, r9
+	li	r11, MI_BOOTINIT	/* Create RPN for address 0 */
+	addis	r11, r11, 0x0080	/* Add 8M */
+	mtspr	SPRN_MD_RPN, r8
+
+	addis	r8, r8, 0x0080		/* Add 8M */
+	mtspr	SPRN_MD_EPN, r8
+	mtspr	SPRN_MD_TWC, r9
+	addis	r11, r11, 0x0080	/* Add 8M */
+	mtspr	SPRN_MD_RPN, r8
+#endif
+
+	/* Since the cache is enabled according to the information we
+	 * just loaded into the TLB, invalidate and enable the caches here.
+	 * We should probably check/set other modes....later.
+	 */
+	lis	r8, IDC_INVALL@h
+	mtspr	SPRN_IC_CST, r8
+	mtspr	SPRN_DC_CST, r8
+	lis	r8, IDC_ENABLE@h
+	mtspr	SPRN_IC_CST, r8
+#ifdef CONFIG_8xx_COPYBACK
+	mtspr	SPRN_DC_CST, r8
+#else
+	/* For a debug option, I left this here to easily enable
+	 * the write through cache mode
+	 */
+	lis	r8, DC_SFWT@h
+	mtspr	SPRN_DC_CST, r8
+	lis	r8, IDC_ENABLE@h
+	mtspr	SPRN_DC_CST, r8
+#endif
+	blr
+
+
+/*
+ * Set up to use a given MMU context.
+ * r3 is context number, r4 is PGD pointer.
+ *
+ * We place the physical address of the new task page directory loaded
+ * into the MMU base register, and set the ASID compare register with
+ * the new "context."
+ */
+_GLOBAL(set_context)
+
+#ifdef CONFIG_BDI_SWITCH
+	/* Context switch the PTE pointer for the Abatron BDI2000.
+	 * The PGDIR is passed as second argument.
+	 */
+	lis	r5, KERNELBASE@h
+	lwz	r5, 0xf0(r5)
+	stw	r4, 0x4(r5)
+#endif
+
+#ifdef CONFIG_8xx_CPU6
+	lis	r6, cpu6_errata_word@h
+	ori	r6, r6, cpu6_errata_word@l
+	tophys	(r4, r4)
+	li	r7, 0x3980
+	stw	r7, 12(r6)
+	lwz	r7, 12(r6)
+        mtspr   SPRN_M_TWB, r4               /* Update MMU base address */
+	li	r7, 0x3380
+	stw	r7, 12(r6)
+	lwz	r7, 12(r6)
+        mtspr   SPRN_M_CASID, r3             /* Update context */
+#else
+        mtspr   SPRN_M_CASID,r3		/* Update context */
+	tophys	(r4, r4)
+	mtspr	SPRN_M_TWB, r4		/* and pgd */
+#endif
+	SYNC
+	blr
+
+#ifdef CONFIG_8xx_CPU6
+/* It's here because it is unique to the 8xx.
+ * It is important we get called with interrupts disabled.  I used to
+ * do that, but it appears that all code that calls this already had
+ * interrupt disabled.
+ */
+	.globl	set_dec_cpu6
+set_dec_cpu6:
+	lis	r7, cpu6_errata_word@h
+	ori	r7, r7, cpu6_errata_word@l
+	li	r4, 0x2c00
+	stw	r4, 8(r7)
+	lwz	r4, 8(r7)
+        mtspr   22, r3		/* Update Decrementer */
+	SYNC
+	blr
+#endif
+
+/*
+ * We put a few things here that have to be page-aligned.
+ * This stuff goes at the beginning of the data segment,
+ * which is page-aligned.
+ */
+	.data
+	.globl	sdata
+sdata:
+	.globl	empty_zero_page
+empty_zero_page:
+	.space	4096
+
+	.globl	swapper_pg_dir
+swapper_pg_dir:
+	.space	4096
+
+/*
+ * This space gets a copy of optional info passed to us by the bootstrap
+ * Used to pass parameters into the kernel like root=/dev/sda1, etc.
+ */
+	.globl	cmd_line
+cmd_line:
+	.space	512
+
+/* Room for two PTE table poiners, usually the kernel and current user
+ * pointer to their respective root page table (pgdir).
+ */
+abatron_pteptrs:
+	.space	8
+
+#ifdef CONFIG_8xx_CPU6
+	.globl	cpu6_errata_word
+cpu6_errata_word:
+	.space	16
+#endif
+
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
new file mode 100644
index 0000000..eba5a5f
--- /dev/null
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -0,0 +1,1058 @@
+/*
+ * arch/ppc/kernel/head_fsl_booke.S
+ *
+ * Kernel execution entry point code.
+ *
+ *    Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
+ *      Initial PowerPC version.
+ *    Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
+ *      Rewritten for PReP
+ *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
+ *      Low-level exception handers, MMU support, and rewrite.
+ *    Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
+ *      PowerPC 8xx modifications.
+ *    Copyright (c) 1998-1999 TiVo, Inc.
+ *      PowerPC 403GCX modifications.
+ *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
+ *      PowerPC 403GCX/405GP modifications.
+ *    Copyright 2000 MontaVista Software Inc.
+ *	PPC405 modifications
+ *      PowerPC 403GCX/405GP modifications.
+ * 	Author: MontaVista Software, Inc.
+ *         	frank_rowand@mvista.com or source@mvista.com
+ * 	   	debbie_chu@mvista.com
+ *    Copyright 2002-2004 MontaVista Software, Inc.
+ *      PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
+ *    Copyright 2004 Freescale Semiconductor, Inc
+ *      PowerPC e500 modifications, Kumar Gala <kumar.gala@freescale.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include "head_booke.h"
+
+/* As with the other PowerPC ports, it is expected that when code
+ * execution begins here, the following registers contain valid, yet
+ * optional, information:
+ *
+ *   r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
+ *   r4 - Starting address of the init RAM disk
+ *   r5 - Ending address of the init RAM disk
+ *   r6 - Start of kernel command line string (e.g. "mem=128")
+ *   r7 - End of kernel command line string
+ *
+ */
+	.text
+_GLOBAL(_stext)
+_GLOBAL(_start)
+	/*
+	 * Reserve a word at a fixed location to store the address
+	 * of abatron_pteptrs
+	 */
+	nop
+/*
+ * Save parameters we are passed
+ */
+	mr	r31,r3
+	mr	r30,r4
+	mr	r29,r5
+	mr	r28,r6
+	mr	r27,r7
+	li	r24,0		/* CPU number */
+
+/* We try to not make any assumptions about how the boot loader
+ * setup or used the TLBs.  We invalidate all mappings from the
+ * boot loader and load a single entry in TLB1[0] to map the
+ * first 16M of kernel memory.  Any boot info passed from the
+ * bootloader needs to live in this first 16M.
+ *
+ * Requirement on bootloader:
+ *  - The page we're executing in needs to reside in TLB1 and
+ *    have IPROT=1.  If not an invalidate broadcast could
+ *    evict the entry we're currently executing in.
+ *
+ *  r3 = Index of TLB1 were executing in
+ *  r4 = Current MSR[IS]
+ *  r5 = Index of TLB1 temp mapping
+ *
+ * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0]
+ * if needed
+ */
+
+/* 1. Find the index of the entry we're executing in */
+	bl	invstr				/* Find our address */
+invstr:	mflr	r6				/* Make it accessible */
+	mfmsr	r7
+	rlwinm	r4,r7,27,31,31			/* extract MSR[IS] */
+	mfspr	r7, SPRN_PID0
+	slwi	r7,r7,16
+	or	r7,r7,r4
+	mtspr	SPRN_MAS6,r7
+	tlbsx	0,r6				/* search MSR[IS], SPID=PID0 */
+#ifndef CONFIG_E200
+	mfspr	r7,SPRN_MAS1
+	andis.	r7,r7,MAS1_VALID@h
+	bne	match_TLB
+	mfspr	r7,SPRN_PID1
+	slwi	r7,r7,16
+	or	r7,r7,r4
+	mtspr	SPRN_MAS6,r7
+	tlbsx	0,r6				/* search MSR[IS], SPID=PID1 */
+	mfspr	r7,SPRN_MAS1
+	andis.	r7,r7,MAS1_VALID@h
+	bne	match_TLB
+	mfspr	r7, SPRN_PID2
+	slwi	r7,r7,16
+	or	r7,r7,r4
+	mtspr	SPRN_MAS6,r7
+	tlbsx	0,r6				/* Fall through, we had to match */
+#endif
+match_TLB:
+	mfspr	r7,SPRN_MAS0
+	rlwinm	r3,r7,16,20,31			/* Extract MAS0(Entry) */
+
+	mfspr	r7,SPRN_MAS1			/* Insure IPROT set */
+	oris	r7,r7,MAS1_IPROT@h
+	mtspr	SPRN_MAS1,r7
+	tlbwe
+
+/* 2. Invalidate all entries except the entry we're executing in */
+	mfspr	r9,SPRN_TLB1CFG
+	andi.	r9,r9,0xfff
+	li	r6,0				/* Set Entry counter to 0 */
+1:	lis	r7,0x1000			/* Set MAS0(TLBSEL) = 1 */
+	rlwimi	r7,r6,16,4,15			/* Setup MAS0 = TLBSEL | ESEL(r6) */
+	mtspr	SPRN_MAS0,r7
+	tlbre
+	mfspr	r7,SPRN_MAS1
+	rlwinm	r7,r7,0,2,31			/* Clear MAS1 Valid and IPROT */
+	cmpw	r3,r6
+	beq	skpinv				/* Dont update the current execution TLB */
+	mtspr	SPRN_MAS1,r7
+	tlbwe
+	isync
+skpinv:	addi	r6,r6,1				/* Increment */
+	cmpw	r6,r9				/* Are we done? */
+	bne	1b				/* If not, repeat */
+
+	/* Invalidate TLB0 */
+	li      r6,0x04
+	tlbivax 0,r6
+#ifdef CONFIG_SMP
+	tlbsync
+#endif
+	/* Invalidate TLB1 */
+	li      r6,0x0c
+	tlbivax 0,r6
+#ifdef CONFIG_SMP
+	tlbsync
+#endif
+	msync
+
+/* 3. Setup a temp mapping and jump to it */
+	andi.	r5, r3, 0x1	/* Find an entry not used and is non-zero */
+	addi	r5, r5, 0x1
+	lis	r7,0x1000	/* Set MAS0(TLBSEL) = 1 */
+	rlwimi	r7,r3,16,4,15	/* Setup MAS0 = TLBSEL | ESEL(r3) */
+	mtspr	SPRN_MAS0,r7
+	tlbre
+
+	/* Just modify the entry ID and EPN for the temp mapping */
+	lis	r7,0x1000	/* Set MAS0(TLBSEL) = 1 */
+	rlwimi	r7,r5,16,4,15	/* Setup MAS0 = TLBSEL | ESEL(r5) */
+	mtspr	SPRN_MAS0,r7
+	xori	r6,r4,1		/* Setup TMP mapping in the other Address space */
+	slwi	r6,r6,12
+	oris	r6,r6,(MAS1_VALID|MAS1_IPROT)@h
+	ori	r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l
+	mtspr	SPRN_MAS1,r6
+	mfspr	r6,SPRN_MAS2
+	li	r7,0		/* temp EPN = 0 */
+	rlwimi	r7,r6,0,20,31
+	mtspr	SPRN_MAS2,r7
+	tlbwe
+
+	xori	r6,r4,1
+	slwi	r6,r6,5		/* setup new context with other address space */
+	bl	1f		/* Find our address */
+1:	mflr	r9
+	rlwimi	r7,r9,0,20,31
+	addi	r7,r7,24
+	mtspr	SPRN_SRR0,r7
+	mtspr	SPRN_SRR1,r6
+	rfi
+
+/* 4. Clear out PIDs & Search info */
+	li	r6,0
+	mtspr	SPRN_PID0,r6
+#ifndef CONFIG_E200
+	mtspr	SPRN_PID1,r6
+	mtspr	SPRN_PID2,r6
+#endif
+	mtspr	SPRN_MAS6,r6
+
+/* 5. Invalidate mapping we started in */
+	lis	r7,0x1000	/* Set MAS0(TLBSEL) = 1 */
+	rlwimi	r7,r3,16,4,15	/* Setup MAS0 = TLBSEL | ESEL(r3) */
+	mtspr	SPRN_MAS0,r7
+	tlbre
+	li	r6,0
+	mtspr	SPRN_MAS1,r6
+	tlbwe
+	/* Invalidate TLB1 */
+	li      r9,0x0c
+	tlbivax 0,r9
+#ifdef CONFIG_SMP
+	tlbsync
+#endif
+	msync
+
+/* 6. Setup KERNELBASE mapping in TLB1[0] */
+	lis	r6,0x1000		/* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
+	mtspr	SPRN_MAS0,r6
+	lis	r6,(MAS1_VALID|MAS1_IPROT)@h
+	ori	r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l
+	mtspr	SPRN_MAS1,r6
+	li	r7,0
+	lis	r6,KERNELBASE@h
+	ori	r6,r6,KERNELBASE@l
+	rlwimi	r6,r7,0,20,31
+	mtspr	SPRN_MAS2,r6
+	li	r7,(MAS3_SX|MAS3_SW|MAS3_SR)
+	mtspr	SPRN_MAS3,r7
+	tlbwe
+
+/* 7. Jump to KERNELBASE mapping */
+	lis	r7,MSR_KERNEL@h
+	ori	r7,r7,MSR_KERNEL@l
+	bl	1f			/* Find our address */
+1:	mflr	r9
+	rlwimi	r6,r9,0,20,31
+	addi	r6,r6,24
+	mtspr	SPRN_SRR0,r6
+	mtspr	SPRN_SRR1,r7
+	rfi				/* start execution out of TLB1[0] entry */
+
+/* 8. Clear out the temp mapping */
+	lis	r7,0x1000	/* Set MAS0(TLBSEL) = 1 */
+	rlwimi	r7,r5,16,4,15	/* Setup MAS0 = TLBSEL | ESEL(r5) */
+	mtspr	SPRN_MAS0,r7
+	tlbre
+	mtspr	SPRN_MAS1,r8
+	tlbwe
+	/* Invalidate TLB1 */
+	li      r9,0x0c
+	tlbivax 0,r9
+#ifdef CONFIG_SMP
+	tlbsync
+#endif
+	msync
+
+	/* Establish the interrupt vector offsets */
+	SET_IVOR(0,  CriticalInput);
+	SET_IVOR(1,  MachineCheck);
+	SET_IVOR(2,  DataStorage);
+	SET_IVOR(3,  InstructionStorage);
+	SET_IVOR(4,  ExternalInput);
+	SET_IVOR(5,  Alignment);
+	SET_IVOR(6,  Program);
+	SET_IVOR(7,  FloatingPointUnavailable);
+	SET_IVOR(8,  SystemCall);
+	SET_IVOR(9,  AuxillaryProcessorUnavailable);
+	SET_IVOR(10, Decrementer);
+	SET_IVOR(11, FixedIntervalTimer);
+	SET_IVOR(12, WatchdogTimer);
+	SET_IVOR(13, DataTLBError);
+	SET_IVOR(14, InstructionTLBError);
+	SET_IVOR(15, Debug);
+	SET_IVOR(32, SPEUnavailable);
+	SET_IVOR(33, SPEFloatingPointData);
+	SET_IVOR(34, SPEFloatingPointRound);
+#ifndef CONFIG_E200
+	SET_IVOR(35, PerformanceMonitor);
+#endif
+
+	/* Establish the interrupt vector base */
+	lis	r4,interrupt_base@h	/* IVPR only uses the high 16-bits */
+	mtspr	SPRN_IVPR,r4
+
+	/* Setup the defaults for TLB entries */
+	li	r2,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l
+#ifdef CONFIG_E200
+	oris	r2,r2,MAS4_TLBSELD(1)@h
+#endif
+   	mtspr	SPRN_MAS4, r2
+
+#if 0
+	/* Enable DOZE */
+	mfspr	r2,SPRN_HID0
+	oris	r2,r2,HID0_DOZE@h
+	mtspr	SPRN_HID0, r2
+#endif
+#ifdef CONFIG_E200
+	/* enable dedicated debug exception handling resources (Debug APU) */
+	mfspr	r2,SPRN_HID0
+	ori 	r2,r2,HID0_DAPUEN@l
+	mtspr	SPRN_HID0,r2
+#endif
+
+#if !defined(CONFIG_BDI_SWITCH)
+	/*
+	 * The Abatron BDI JTAG debugger does not tolerate others
+	 * mucking with the debug registers.
+	 */
+	lis	r2,DBCR0_IDM@h
+	mtspr	SPRN_DBCR0,r2
+	/* clear any residual debug events */
+	li	r2,-1
+	mtspr	SPRN_DBSR,r2
+#endif
+
+	/*
+	 * This is where the main kernel code starts.
+	 */
+
+	/* ptr to current */
+	lis	r2,init_task@h
+	ori	r2,r2,init_task@l
+
+	/* ptr to current thread */
+	addi	r4,r2,THREAD	/* init task's THREAD */
+	mtspr	SPRN_SPRG3,r4
+
+	/* stack */
+	lis	r1,init_thread_union@h
+	ori	r1,r1,init_thread_union@l
+	li	r0,0
+	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
+
+	bl	early_init
+
+	mfspr	r3,SPRN_TLB1CFG
+	andi.	r3,r3,0xfff
+	lis	r4,num_tlbcam_entries@ha
+	stw	r3,num_tlbcam_entries@l(r4)
+/*
+ * Decide what sort of machine this is and initialize the MMU.
+ */
+	mr	r3,r31
+	mr	r4,r30
+	mr	r5,r29
+	mr	r6,r28
+	mr	r7,r27
+	bl	machine_init
+	bl	MMU_init
+
+	/* Setup PTE pointers for the Abatron bdiGDB */
+	lis	r6, swapper_pg_dir@h
+	ori	r6, r6, swapper_pg_dir@l
+	lis	r5, abatron_pteptrs@h
+	ori	r5, r5, abatron_pteptrs@l
+	lis	r4, KERNELBASE@h
+	ori	r4, r4, KERNELBASE@l
+	stw	r5, 0(r4)	/* Save abatron_pteptrs at a fixed location */
+	stw	r6, 0(r5)
+
+	/* Let's move on */
+	lis	r4,start_kernel@h
+	ori	r4,r4,start_kernel@l
+	lis	r3,MSR_KERNEL@h
+	ori	r3,r3,MSR_KERNEL@l
+	mtspr	SPRN_SRR0,r4
+	mtspr	SPRN_SRR1,r3
+	rfi			/* change context and jump to start_kernel */
+
+/* Macros to hide the PTE size differences
+ *
+ * FIND_PTE -- walks the page tables given EA & pgdir pointer
+ *   r10 -- EA of fault
+ *   r11 -- PGDIR pointer
+ *   r12 -- free
+ *   label 2: is the bailout case
+ *
+ * if we find the pte (fall through):
+ *   r11 is low pte word
+ *   r12 is pointer to the pte
+ */
+#ifdef CONFIG_PTE_64BIT
+#define PTE_FLAGS_OFFSET	4
+#define FIND_PTE	\
+	rlwinm 	r12, r10, 13, 19, 29;	/* Compute pgdir/pmd offset */	\
+	lwzx	r11, r12, r11;		/* Get pgd/pmd entry */		\
+	rlwinm.	r12, r11, 0, 0, 20;	/* Extract pt base address */	\
+	beq	2f;			/* Bail if no table */		\
+	rlwimi	r12, r10, 23, 20, 28;	/* Compute pte address */	\
+	lwz	r11, 4(r12);		/* Get pte entry */
+#else
+#define PTE_FLAGS_OFFSET	0
+#define FIND_PTE	\
+	rlwimi	r11, r10, 12, 20, 29;	/* Create L1 (pgdir/pmd) address */	\
+	lwz	r11, 0(r11);		/* Get L1 entry */			\
+	rlwinm.	r12, r11, 0, 0, 19;	/* Extract L2 (pte) base address */	\
+	beq	2f;			/* Bail if no table */			\
+	rlwimi	r12, r10, 22, 20, 29;	/* Compute PTE address */		\
+	lwz	r11, 0(r12);		/* Get Linux PTE */
+#endif
+
+/*
+ * Interrupt vector entry code
+ *
+ * The Book E MMUs are always on so we don't need to handle
+ * interrupts in real mode as with previous PPC processors. In
+ * this case we handle interrupts in the kernel virtual address
+ * space.
+ *
+ * Interrupt vectors are dynamically placed relative to the
+ * interrupt prefix as determined by the address of interrupt_base.
+ * The interrupt vectors offsets are programmed using the labels
+ * for each interrupt vector entry.
+ *
+ * Interrupt vectors must be aligned on a 16 byte boundary.
+ * We align on a 32 byte cache line boundary for good measure.
+ */
+
+interrupt_base:
+	/* Critical Input Interrupt */
+	CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException)
+
+	/* Machine Check Interrupt */
+#ifdef CONFIG_E200
+	/* no RFMCI, MCSRRs on E200 */
+	CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
+#else
+	MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
+#endif
+
+	/* Data Storage Interrupt */
+	START_EXCEPTION(DataStorage)
+	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
+	mtspr	SPRN_SPRG1, r11
+	mtspr	SPRN_SPRG4W, r12
+	mtspr	SPRN_SPRG5W, r13
+	mfcr	r11
+	mtspr	SPRN_SPRG7W, r11
+
+	/*
+	 * Check if it was a store fault, if not then bail
+	 * because a user tried to access a kernel or
+	 * read-protected page.  Otherwise, get the
+	 * offending address and handle it.
+	 */
+	mfspr	r10, SPRN_ESR
+	andis.	r10, r10, ESR_ST@h
+	beq	2f
+
+	mfspr	r10, SPRN_DEAR		/* Get faulting address */
+
+	/* If we are faulting a kernel address, we have to use the
+	 * kernel page tables.
+	 */
+	lis	r11, TASK_SIZE@h
+	ori	r11, r11, TASK_SIZE@l
+	cmplw	0, r10, r11
+	bge	2f
+
+	/* Get the PGD for the current thread */
+3:
+	mfspr	r11,SPRN_SPRG3
+	lwz	r11,PGDIR(r11)
+4:
+	FIND_PTE
+
+	/* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */
+	andi.	r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE
+	cmpwi	0, r13, _PAGE_RW|_PAGE_USER
+	bne	2f			/* Bail if not */
+
+	/* Update 'changed'. */
+	ori	r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
+	stw	r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */
+
+	/* MAS2 not updated as the entry does exist in the tlb, this
+	   fault taken to detect state transition (eg: COW -> DIRTY)
+	 */
+	andi.	r11, r11, _PAGE_HWEXEC
+	rlwimi	r11, r11, 31, 27, 27	/* SX <- _PAGE_HWEXEC */
+	ori     r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */
+
+	/* update search PID in MAS6, AS = 0 */
+	mfspr	r12, SPRN_PID0
+	slwi	r12, r12, 16
+	mtspr	SPRN_MAS6, r12
+
+	/* find the TLB index that caused the fault.  It has to be here. */
+	tlbsx	0, r10
+
+	/* only update the perm bits, assume the RPN is fine */
+	mfspr	r12, SPRN_MAS3
+	rlwimi	r12, r11, 0, 20, 31
+	mtspr	SPRN_MAS3,r12
+	tlbwe
+
+	/* Done...restore registers and get out of here.  */
+	mfspr	r11, SPRN_SPRG7R
+	mtcr	r11
+	mfspr	r13, SPRN_SPRG5R
+	mfspr	r12, SPRN_SPRG4R
+	mfspr	r11, SPRN_SPRG1
+	mfspr	r10, SPRN_SPRG0
+	rfi			/* Force context change */
+
+2:
+	/*
+	 * The bailout.  Restore registers to pre-exception conditions
+	 * and call the heavyweights to help us out.
+	 */
+	mfspr	r11, SPRN_SPRG7R
+	mtcr	r11
+	mfspr	r13, SPRN_SPRG5R
+	mfspr	r12, SPRN_SPRG4R
+	mfspr	r11, SPRN_SPRG1
+	mfspr	r10, SPRN_SPRG0
+	b	data_access
+
+	/* Instruction Storage Interrupt */
+	INSTRUCTION_STORAGE_EXCEPTION
+
+	/* External Input Interrupt */
+	EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
+
+	/* Alignment Interrupt */
+	ALIGNMENT_EXCEPTION
+
+	/* Program Interrupt */
+	PROGRAM_EXCEPTION
+
+	/* Floating Point Unavailable Interrupt */
+#ifdef CONFIG_PPC_FPU
+	FP_UNAVAILABLE_EXCEPTION
+#else
+#ifdef CONFIG_E200
+	/* E200 treats 'normal' floating point instructions as FP Unavail exception */
+	EXCEPTION(0x0800, FloatingPointUnavailable, ProgramCheckException, EXC_XFER_EE)
+#else
+	EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE)
+#endif
+#endif
+
+	/* System Call Interrupt */
+	START_EXCEPTION(SystemCall)
+	NORMAL_EXCEPTION_PROLOG
+	EXC_XFER_EE_LITE(0x0c00, DoSyscall)
+
+	/* Auxillary Processor Unavailable Interrupt */
+	EXCEPTION(0x2900, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE)
+
+	/* Decrementer Interrupt */
+	DECREMENTER_EXCEPTION
+
+	/* Fixed Internal Timer Interrupt */
+	/* TODO: Add FIT support */
+	EXCEPTION(0x3100, FixedIntervalTimer, UnknownException, EXC_XFER_EE)
+
+	/* Watchdog Timer Interrupt */
+#ifdef CONFIG_BOOKE_WDT
+	CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException)
+#else
+	CRITICAL_EXCEPTION(0x3200, WatchdogTimer, UnknownException)
+#endif
+
+	/* Data TLB Error Interrupt */
+	START_EXCEPTION(DataTLBError)
+	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
+	mtspr	SPRN_SPRG1, r11
+	mtspr	SPRN_SPRG4W, r12
+	mtspr	SPRN_SPRG5W, r13
+	mfcr	r11
+	mtspr	SPRN_SPRG7W, r11
+	mfspr	r10, SPRN_DEAR		/* Get faulting address */
+
+	/* If we are faulting a kernel address, we have to use the
+	 * kernel page tables.
+	 */
+	lis	r11, TASK_SIZE@h
+	ori	r11, r11, TASK_SIZE@l
+	cmplw	5, r10, r11
+	blt	5, 3f
+	lis	r11, swapper_pg_dir@h
+	ori	r11, r11, swapper_pg_dir@l
+
+	mfspr	r12,SPRN_MAS1		/* Set TID to 0 */
+	rlwinm	r12,r12,0,16,1
+	mtspr	SPRN_MAS1,r12
+
+	b	4f
+
+	/* Get the PGD for the current thread */
+3:
+	mfspr	r11,SPRN_SPRG3
+	lwz	r11,PGDIR(r11)
+
+4:
+	FIND_PTE
+	andi.	r13, r11, _PAGE_PRESENT	/* Is the page present? */
+	beq	2f			/* Bail if not present */
+
+#ifdef CONFIG_PTE_64BIT
+	lwz	r13, 0(r12)
+#endif
+	ori	r11, r11, _PAGE_ACCESSED
+	stw	r11, PTE_FLAGS_OFFSET(r12)
+
+	 /* Jump to common tlb load */
+	b	finish_tlb_load
+2:
+	/* The bailout.  Restore registers to pre-exception conditions
+	 * and call the heavyweights to help us out.
+	 */
+	mfspr	r11, SPRN_SPRG7R
+	mtcr	r11
+	mfspr	r13, SPRN_SPRG5R
+	mfspr	r12, SPRN_SPRG4R
+	mfspr	r11, SPRN_SPRG1
+	mfspr	r10, SPRN_SPRG0
+	b	data_access
+
+	/* Instruction TLB Error Interrupt */
+	/*
+	 * Nearly the same as above, except we get our
+	 * information from different registers and bailout
+	 * to a different point.
+	 */
+	START_EXCEPTION(InstructionTLBError)
+	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
+	mtspr	SPRN_SPRG1, r11
+	mtspr	SPRN_SPRG4W, r12
+	mtspr	SPRN_SPRG5W, r13
+	mfcr	r11
+	mtspr	SPRN_SPRG7W, r11
+	mfspr	r10, SPRN_SRR0		/* Get faulting address */
+
+	/* If we are faulting a kernel address, we have to use the
+	 * kernel page tables.
+	 */
+	lis	r11, TASK_SIZE@h
+	ori	r11, r11, TASK_SIZE@l
+	cmplw	5, r10, r11
+	blt	5, 3f
+	lis	r11, swapper_pg_dir@h
+	ori	r11, r11, swapper_pg_dir@l
+
+	mfspr	r12,SPRN_MAS1		/* Set TID to 0 */
+	rlwinm	r12,r12,0,16,1
+	mtspr	SPRN_MAS1,r12
+
+	b	4f
+
+	/* Get the PGD for the current thread */
+3:
+	mfspr	r11,SPRN_SPRG3
+	lwz	r11,PGDIR(r11)
+
+4:
+	FIND_PTE
+	andi.	r13, r11, _PAGE_PRESENT	/* Is the page present? */
+	beq	2f			/* Bail if not present */
+
+#ifdef CONFIG_PTE_64BIT
+	lwz	r13, 0(r12)
+#endif
+	ori	r11, r11, _PAGE_ACCESSED
+	stw	r11, PTE_FLAGS_OFFSET(r12)
+
+	/* Jump to common TLB load point */
+	b	finish_tlb_load
+
+2:
+	/* The bailout.  Restore registers to pre-exception conditions
+	 * and call the heavyweights to help us out.
+	 */
+	mfspr	r11, SPRN_SPRG7R
+	mtcr	r11
+	mfspr	r13, SPRN_SPRG5R
+	mfspr	r12, SPRN_SPRG4R
+	mfspr	r11, SPRN_SPRG1
+	mfspr	r10, SPRN_SPRG0
+	b	InstructionStorage
+
+#ifdef CONFIG_SPE
+	/* SPE Unavailable */
+	START_EXCEPTION(SPEUnavailable)
+	NORMAL_EXCEPTION_PROLOG
+	bne	load_up_spe
+	addi    r3,r1,STACK_FRAME_OVERHEAD
+	EXC_XFER_EE_LITE(0x2010, KernelSPE)
+#else
+	EXCEPTION(0x2020, SPEUnavailable, UnknownException, EXC_XFER_EE)
+#endif /* CONFIG_SPE */
+
+	/* SPE Floating Point Data */
+#ifdef CONFIG_SPE
+	EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
+#else
+	EXCEPTION(0x2040, SPEFloatingPointData, UnknownException, EXC_XFER_EE)
+#endif /* CONFIG_SPE */
+
+	/* SPE Floating Point Round */
+	EXCEPTION(0x2050, SPEFloatingPointRound, UnknownException, EXC_XFER_EE)
+
+	/* Performance Monitor */
+	EXCEPTION(0x2060, PerformanceMonitor, PerformanceMonitorException, EXC_XFER_STD)
+
+
+	/* Debug Interrupt */
+	DEBUG_EXCEPTION
+
+/*
+ * Local functions
+ */
+
+	/*
+	 * Data TLB exceptions will bail out to this point
+	 * if they can't resolve the lightweight TLB fault.
+	 */
+data_access:
+	NORMAL_EXCEPTION_PROLOG
+	mfspr	r5,SPRN_ESR		/* Grab the ESR, save it, pass arg3 */
+	stw	r5,_ESR(r11)
+	mfspr	r4,SPRN_DEAR		/* Grab the DEAR, save it, pass arg2 */
+	andis.	r10,r5,(ESR_ILK|ESR_DLK)@h
+	bne	1f
+	EXC_XFER_EE_LITE(0x0300, handle_page_fault)
+1:
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	EXC_XFER_EE_LITE(0x0300, CacheLockingException)
+
+/*
+
+ * Both the instruction and data TLB miss get to this
+ * point to load the TLB.
+ * 	r10 - EA of fault
+ * 	r11 - TLB (info from Linux PTE)
+ * 	r12, r13 - available to use
+ * 	CR5 - results of addr < TASK_SIZE
+ *	MAS0, MAS1 - loaded with proper value when we get here
+ *	MAS2, MAS3 - will need additional info from Linux PTE
+ *	Upon exit, we reload everything and RFI.
+ */
+finish_tlb_load:
+	/*
+	 * We set execute, because we don't have the granularity to
+	 * properly set this at the page level (Linux problem).
+	 * Many of these bits are software only.  Bits we don't set
+	 * here we (properly should) assume have the appropriate value.
+	 */
+
+	mfspr	r12, SPRN_MAS2
+#ifdef CONFIG_PTE_64BIT
+	rlwimi	r12, r11, 26, 24, 31	/* extract ...WIMGE from pte */
+#else
+	rlwimi	r12, r11, 26, 27, 31	/* extract WIMGE from pte */
+#endif
+	mtspr	SPRN_MAS2, r12
+
+	bge	5, 1f
+
+	/* is user addr */
+	andi.	r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC)
+	andi.	r10, r11, _PAGE_USER	/* Test for _PAGE_USER */
+	srwi	r10, r12, 1
+	or	r12, r12, r10	/* Copy user perms into supervisor */
+	iseleq	r12, 0, r12
+	b	2f
+
+	/* is kernel addr */
+1:	rlwinm	r12, r11, 31, 29, 29	/* Extract _PAGE_HWWRITE into SW */
+	ori	r12, r12, (MAS3_SX | MAS3_SR)
+
+#ifdef CONFIG_PTE_64BIT
+2:	rlwimi	r12, r13, 24, 0, 7	/* grab RPN[32:39] */
+	rlwimi	r12, r11, 24, 8, 19	/* grab RPN[40:51] */
+	mtspr	SPRN_MAS3, r12
+BEGIN_FTR_SECTION
+	srwi	r10, r13, 8		/* grab RPN[8:31] */
+	mtspr	SPRN_MAS7, r10
+END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS)
+#else
+2:	rlwimi	r11, r12, 0, 20, 31	/* Extract RPN from PTE and merge with perms */
+	mtspr	SPRN_MAS3, r11
+#endif
+#ifdef CONFIG_E200
+	/* Round robin TLB1 entries assignment */
+	mfspr	r12, SPRN_MAS0
+
+	/* Extract TLB1CFG(NENTRY) */
+	mfspr	r11, SPRN_TLB1CFG
+	andi.	r11, r11, 0xfff
+
+	/* Extract MAS0(NV) */
+	andi.	r13, r12, 0xfff
+	addi	r13, r13, 1
+	cmpw	0, r13, r11
+	addi	r12, r12, 1
+
+	/* check if we need to wrap */
+	blt	7f
+
+	/* wrap back to first free tlbcam entry */
+	lis	r13, tlbcam_index@ha
+	lwz	r13, tlbcam_index@l(r13)
+	rlwimi	r12, r13, 0, 20, 31
+7:
+	mtspr   SPRN_MAS0,r12
+#endif /* CONFIG_E200 */
+
+	tlbwe
+
+	/* Done...restore registers and get out of here.  */
+	mfspr	r11, SPRN_SPRG7R
+	mtcr	r11
+	mfspr	r13, SPRN_SPRG5R
+	mfspr	r12, SPRN_SPRG4R
+	mfspr	r11, SPRN_SPRG1
+	mfspr	r10, SPRN_SPRG0
+	rfi					/* Force context change */
+
+#ifdef CONFIG_SPE
+/* Note that the SPE support is closely modeled after the AltiVec
+ * support.  Changes to one are likely to be applicable to the
+ * other!  */
+load_up_spe:
+/*
+ * Disable SPE for the task which had SPE previously,
+ * and save its SPE registers in its thread_struct.
+ * Enables SPE for use in the kernel on return.
+ * On SMP we know the SPE units are free, since we give it up every
+ * switch.  -- Kumar
+ */
+	mfmsr	r5
+	oris	r5,r5,MSR_SPE@h
+	mtmsr	r5			/* enable use of SPE now */
+	isync
+/*
+ * For SMP, we don't do lazy SPE switching because it just gets too
+ * horrendously complex, especially when a task switches from one CPU
+ * to another.  Instead we call giveup_spe in switch_to.
+ */
+#ifndef CONFIG_SMP
+	lis	r3,last_task_used_spe@ha
+	lwz	r4,last_task_used_spe@l(r3)
+	cmpi	0,r4,0
+	beq	1f
+	addi	r4,r4,THREAD	/* want THREAD of last_task_used_spe */
+	SAVE_32EVRS(0,r10,r4)
+   	evxor	evr10, evr10, evr10	/* clear out evr10 */
+	evmwumiaa evr10, evr10, evr10	/* evr10 <- ACC = 0 * 0 + ACC */
+	li	r5,THREAD_ACC
+   	evstddx	evr10, r4, r5		/* save off accumulator */
+	lwz	r5,PT_REGS(r4)
+	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+	lis	r10,MSR_SPE@h
+	andc	r4,r4,r10	/* disable SPE for previous task */
+	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#endif /* CONFIG_SMP */
+	/* enable use of SPE after return */
+	oris	r9,r9,MSR_SPE@h
+	mfspr	r5,SPRN_SPRG3		/* current task's THREAD (phys) */
+	li	r4,1
+	li	r10,THREAD_ACC
+	stw	r4,THREAD_USED_SPE(r5)
+	evlddx	evr4,r10,r5
+	evmra	evr4,evr4
+	REST_32EVRS(0,r10,r5)
+#ifndef CONFIG_SMP
+	subi	r4,r5,THREAD
+	stw	r4,last_task_used_spe@l(r3)
+#endif /* CONFIG_SMP */
+	/* restore registers and return */
+2:	REST_4GPRS(3, r11)
+	lwz	r10,_CCR(r11)
+	REST_GPR(1, r11)
+	mtcr	r10
+	lwz	r10,_LINK(r11)
+	mtlr	r10
+	REST_GPR(10, r11)
+	mtspr	SPRN_SRR1,r9
+	mtspr	SPRN_SRR0,r12
+	REST_GPR(9, r11)
+	REST_GPR(12, r11)
+	lwz	r11,GPR11(r11)
+	SYNC
+	rfi
+
+/*
+ * SPE unavailable trap from kernel - print a message, but let
+ * the task use SPE in the kernel until it returns to user mode.
+ */
+KernelSPE:
+	lwz	r3,_MSR(r1)
+	oris	r3,r3,MSR_SPE@h
+	stw	r3,_MSR(r1)	/* enable use of SPE after return */
+	lis	r3,87f@h
+	ori	r3,r3,87f@l
+	mr	r4,r2		/* current */
+	lwz	r5,_NIP(r1)
+	bl	printk
+	b	ret_from_except
+87:	.string	"SPE used in kernel  (task=%p, pc=%x)  \n"
+	.align	4,0
+
+#endif /* CONFIG_SPE */
+
+/*
+ * Global functions
+ */
+
+/*
+ * extern void loadcam_entry(unsigned int index)
+ *
+ * Load TLBCAM[index] entry in to the L2 CAM MMU
+ */
+_GLOBAL(loadcam_entry)
+	lis	r4,TLBCAM@ha
+	addi	r4,r4,TLBCAM@l
+	mulli	r5,r3,20
+	add	r3,r5,r4
+	lwz	r4,0(r3)
+	mtspr	SPRN_MAS0,r4
+	lwz	r4,4(r3)
+	mtspr	SPRN_MAS1,r4
+	lwz	r4,8(r3)
+	mtspr	SPRN_MAS2,r4
+	lwz	r4,12(r3)
+	mtspr	SPRN_MAS3,r4
+	tlbwe
+	isync
+	blr
+
+/*
+ * extern void giveup_altivec(struct task_struct *prev)
+ *
+ * The e500 core does not have an AltiVec unit.
+ */
+_GLOBAL(giveup_altivec)
+	blr
+
+#ifdef CONFIG_SPE
+/*
+ * extern void giveup_spe(struct task_struct *prev)
+ *
+ */
+_GLOBAL(giveup_spe)
+	mfmsr	r5
+	oris	r5,r5,MSR_SPE@h
+	SYNC
+	mtmsr	r5			/* enable use of SPE now */
+	isync
+	cmpi	0,r3,0
+	beqlr-				/* if no previous owner, done */
+	addi	r3,r3,THREAD		/* want THREAD of task */
+	lwz	r5,PT_REGS(r3)
+	cmpi	0,r5,0
+	SAVE_32EVRS(0, r4, r3)
+   	evxor	evr6, evr6, evr6	/* clear out evr6 */
+	evmwumiaa evr6, evr6, evr6	/* evr6 <- ACC = 0 * 0 + ACC */
+	li	r4,THREAD_ACC
+   	evstddx	evr6, r4, r3		/* save off accumulator */
+	mfspr	r6,SPRN_SPEFSCR
+	stw	r6,THREAD_SPEFSCR(r3)	/* save spefscr register value */
+	beq	1f
+	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+	lis	r3,MSR_SPE@h
+	andc	r4,r4,r3		/* disable SPE for previous task */
+	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#ifndef CONFIG_SMP
+	li	r5,0
+	lis	r4,last_task_used_spe@ha
+	stw	r5,last_task_used_spe@l(r4)
+#endif /* CONFIG_SMP */
+	blr
+#endif /* CONFIG_SPE */
+
+/*
+ * extern void giveup_fpu(struct task_struct *prev)
+ *
+ * Not all FSL Book-E cores have an FPU
+ */
+#ifndef CONFIG_PPC_FPU
+_GLOBAL(giveup_fpu)
+	blr
+#endif
+
+/*
+ * extern void abort(void)
+ *
+ * At present, this routine just applies a system reset.
+ */
+_GLOBAL(abort)
+	li	r13,0
+        mtspr   SPRN_DBCR0,r13		/* disable all debug events */
+	mfmsr	r13
+	ori	r13,r13,MSR_DE@l	/* Enable Debug Events */
+	mtmsr	r13
+        mfspr   r13,SPRN_DBCR0
+        lis	r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h
+        mtspr   SPRN_DBCR0,r13
+
+_GLOBAL(set_context)
+
+#ifdef CONFIG_BDI_SWITCH
+	/* Context switch the PTE pointer for the Abatron BDI2000.
+	 * The PGDIR is the second parameter.
+	 */
+	lis	r5, abatron_pteptrs@h
+	ori	r5, r5, abatron_pteptrs@l
+	stw	r4, 0x4(r5)
+#endif
+	mtspr	SPRN_PID,r3
+	isync			/* Force context change */
+	blr
+
+/*
+ * We put a few things here that have to be page-aligned. This stuff
+ * goes at the beginning of the data segment, which is page-aligned.
+ */
+	.data
+_GLOBAL(sdata)
+_GLOBAL(empty_zero_page)
+	.space	4096
+_GLOBAL(swapper_pg_dir)
+	.space	4096
+
+/* Reserved 4k for the critical exception stack & 4k for the machine
+ * check stack per CPU for kernel mode exceptions */
+	.section .bss
+        .align 12
+exception_stack_bottom:
+	.space	BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS
+_GLOBAL(exception_stack_top)
+
+/*
+ * This space gets a copy of optional info passed to us by the bootstrap
+ * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
+ */
+_GLOBAL(cmd_line)
+	.space	512
+
+/*
+ * Room for two PTE pointers, usually the kernel and current user pointers
+ * to their respective root page table.
+ */
+abatron_pteptrs:
+	.space	8
+
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
new file mode 100644
index 0000000..1a2194c
--- /dev/null
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -0,0 +1,233 @@
+/*
+ *  This file contains the power_save function for 6xx & 7xxx CPUs
+ *  rewritten in assembler
+ *
+ *  Warning ! This code assumes that if your machine has a 750fx
+ *  it will have PLL 1 set to low speed mode (used during NAP/DOZE).
+ *  if this is not the case some additional changes will have to
+ *  be done to check a runtime var (a bit like powersave-nap)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+#undef DEBUG
+
+	.text
+
+/*
+ * Init idle, called at early CPU setup time from head.S for each CPU
+ * Make sure no rest of NAP mode remains in HID0, save default
+ * values for some CPU specific registers. Called with r24
+ * containing CPU number and r3 reloc offset
+ */
+_GLOBAL(init_idle_6xx)
+BEGIN_FTR_SECTION
+	mfspr	r4,SPRN_HID0
+	rlwinm	r4,r4,0,10,8	/* Clear NAP */
+	mtspr	SPRN_HID0, r4
+	b	1f
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
+	blr
+1:
+	slwi	r5,r24,2
+	add	r5,r5,r3
+BEGIN_FTR_SECTION
+	mfspr	r4,SPRN_MSSCR0
+	addis	r6,r5, nap_save_msscr0@ha
+	stw	r4,nap_save_msscr0@l(r6)
+END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
+BEGIN_FTR_SECTION
+	mfspr	r4,SPRN_HID1
+	addis	r6,r5,nap_save_hid1@ha
+	stw	r4,nap_save_hid1@l(r6)
+END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
+	blr
+
+/*
+ * Here is the power_save_6xx function. This could eventually be
+ * split into several functions & changing the function pointer
+ * depending on the various features.
+ */
+_GLOBAL(ppc6xx_idle)
+	/* Check if we can nap or doze, put HID0 mask in r3
+	 */
+	lis	r3, 0
+BEGIN_FTR_SECTION
+	lis	r3,HID0_DOZE@h
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
+BEGIN_FTR_SECTION
+	/* We must dynamically check for the NAP feature as it
+	 * can be cleared by CPU init after the fixups are done
+	 */
+	lis	r4,cur_cpu_spec@ha
+	lwz	r4,cur_cpu_spec@l(r4)
+	lwz	r4,CPU_SPEC_FEATURES(r4)
+	andi.	r0,r4,CPU_FTR_CAN_NAP
+	beq	1f
+	/* Now check if user or arch enabled NAP mode */
+	lis	r4,powersave_nap@ha
+	lwz	r4,powersave_nap@l(r4)
+	cmpwi	0,r4,0
+	beq	1f
+	lis	r3,HID0_NAP@h
+1:	
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
+	cmpwi	0,r3,0
+	beqlr
+
+	/* Clear MSR:EE */
+	mfmsr	r7
+	rlwinm	r0,r7,0,17,15
+	mtmsr	r0
+
+	/* Check current_thread_info()->flags */
+	rlwinm	r4,r1,0,0,18
+	lwz	r4,TI_FLAGS(r4)
+	andi.	r0,r4,_TIF_NEED_RESCHED
+	beq	1f
+	mtmsr	r7	/* out of line this ? */
+	blr
+1:	
+	/* Some pre-nap cleanups needed on some CPUs */
+	andis.	r0,r3,HID0_NAP@h
+	beq	2f
+BEGIN_FTR_SECTION
+	/* Disable L2 prefetch on some 745x and try to ensure
+	 * L2 prefetch engines are idle. As explained by errata
+	 * text, we can't be sure they are, we just hope very hard
+	 * that well be enough (sic !). At least I noticed Apple
+	 * doesn't even bother doing the dcbf's here...
+	 */
+	mfspr	r4,SPRN_MSSCR0
+	rlwinm	r4,r4,0,0,29
+	sync
+	mtspr	SPRN_MSSCR0,r4
+	sync
+	isync
+	lis	r4,KERNELBASE@h
+	dcbf	0,r4
+	dcbf	0,r4
+	dcbf	0,r4
+	dcbf	0,r4
+END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
+#ifdef DEBUG
+	lis	r6,nap_enter_count@ha
+	lwz	r4,nap_enter_count@l(r6)
+	addi	r4,r4,1
+	stw	r4,nap_enter_count@l(r6)
+#endif	
+2:
+BEGIN_FTR_SECTION
+	/* Go to low speed mode on some 750FX */
+	lis	r4,powersave_lowspeed@ha
+	lwz	r4,powersave_lowspeed@l(r4)
+	cmpwi	0,r4,0
+	beq	1f
+	mfspr	r4,SPRN_HID1
+	oris	r4,r4,0x0001
+	mtspr	SPRN_HID1,r4
+1:	
+END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
+
+	/* Go to NAP or DOZE now */	
+	mfspr	r4,SPRN_HID0
+	lis	r5,(HID0_NAP|HID0_SLEEP)@h
+BEGIN_FTR_SECTION
+	oris	r5,r5,HID0_DOZE@h
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
+	andc	r4,r4,r5
+	or	r4,r4,r3
+BEGIN_FTR_SECTION
+	oris	r4,r4,HID0_DPM@h	/* that should be done once for all  */
+END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
+	mtspr	SPRN_HID0,r4
+BEGIN_FTR_SECTION
+	DSSALL
+	sync
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+	ori	r7,r7,MSR_EE /* Could be ommited (already set) */
+	oris	r7,r7,MSR_POW@h
+	sync
+	isync
+	mtmsr	r7
+	isync
+	sync
+	blr
+	
+/*
+ * Return from NAP/DOZE mode, restore some CPU specific registers,
+ * we are called with DR/IR still off and r2 containing physical
+ * address of current.
+ */
+_GLOBAL(power_save_6xx_restore)
+	mfspr	r11,SPRN_HID0
+	rlwinm.	r11,r11,0,10,8	/* Clear NAP & copy NAP bit !state to cr1 EQ */
+	cror	4*cr1+eq,4*cr0+eq,4*cr0+eq
+BEGIN_FTR_SECTION
+	rlwinm	r11,r11,0,9,7	/* Clear DOZE */
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
+	mtspr	SPRN_HID0, r11
+
+#ifdef DEBUG
+	beq	cr1,1f
+	lis	r11,(nap_return_count-KERNELBASE)@ha
+	lwz	r9,nap_return_count@l(r11)
+	addi	r9,r9,1
+	stw	r9,nap_return_count@l(r11)
+1:
+#endif
+	
+	rlwinm	r9,r1,0,0,18
+	tophys(r9,r9)
+	lwz	r11,TI_CPU(r9)
+	slwi	r11,r11,2
+	/* Todo make sure all these are in the same page
+	 * and load r22 (@ha part + CPU offset) only once
+	 */
+BEGIN_FTR_SECTION
+	beq	cr1,1f
+	addis	r9,r11,(nap_save_msscr0-KERNELBASE)@ha
+	lwz	r9,nap_save_msscr0@l(r9)
+	mtspr	SPRN_MSSCR0, r9
+	sync
+	isync
+1:
+END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
+BEGIN_FTR_SECTION
+	addis	r9,r11,(nap_save_hid1-KERNELBASE)@ha
+	lwz	r9,nap_save_hid1@l(r9)
+	mtspr	SPRN_HID1, r9
+END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
+	b	transfer_to_handler_cont
+
+	.data
+
+_GLOBAL(nap_save_msscr0)
+	.space	4*NR_CPUS
+
+_GLOBAL(nap_save_hid1)
+	.space	4*NR_CPUS
+
+_GLOBAL(powersave_nap)
+	.long	0
+_GLOBAL(powersave_lowspeed)
+	.long	0
+
+#ifdef DEBUG
+_GLOBAL(nap_enter_count)
+	.space	4
+_GLOBAL(nap_return_count)
+	.space	4
+#endif
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
new file mode 100644
index 0000000..7bfa0f0
--- /dev/null
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -0,0 +1,338 @@
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/threads.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/elfcore.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h>
+#include <linux/nvram.h>
+#include <linux/console.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/ide.h>
+#include <linux/pm.h>
+#include <linux/bitops.h>
+
+#include <asm/page.h>
+#include <asm/semaphore.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/ide.h>
+#include <asm/atomic.h>
+#include <asm/checksum.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+#include <linux/adb.h>
+#include <linux/cuda.h>
+#include <linux/pmu.h>
+#include <asm/prom.h>
+#include <asm/system.h>
+#include <asm/pci-bridge.h>
+#include <asm/irq.h>
+#include <asm/pmac_feature.h>
+#include <asm/dma.h>
+#include <asm/machdep.h>
+#include <asm/hw_irq.h>
+#include <asm/nvram.h>
+#include <asm/mmu_context.h>
+#include <asm/backlight.h>
+#include <asm/time.h>
+#include <asm/cputable.h>
+#include <asm/btext.h>
+#include <asm/div64.h>
+#include <asm/xmon.h>
+
+#ifdef  CONFIG_8xx
+#include <asm/commproc.h>
+#endif
+
+extern void transfer_to_handler(void);
+extern void do_IRQ(struct pt_regs *regs);
+extern void MachineCheckException(struct pt_regs *regs);
+extern void AlignmentException(struct pt_regs *regs);
+extern void ProgramCheckException(struct pt_regs *regs);
+extern void SingleStepException(struct pt_regs *regs);
+extern int do_signal(sigset_t *, struct pt_regs *);
+extern int pmac_newworld;
+extern int sys_sigreturn(struct pt_regs *regs);
+
+long long __ashrdi3(long long, int);
+long long __ashldi3(long long, int);
+long long __lshrdi3(long long, int);
+
+extern unsigned long mm_ptov (unsigned long paddr);
+
+EXPORT_SYMBOL(clear_pages);
+EXPORT_SYMBOL(clear_user_page);
+EXPORT_SYMBOL(do_signal);
+EXPORT_SYMBOL(transfer_to_handler);
+EXPORT_SYMBOL(do_IRQ);
+EXPORT_SYMBOL(MachineCheckException);
+EXPORT_SYMBOL(AlignmentException);
+EXPORT_SYMBOL(ProgramCheckException);
+EXPORT_SYMBOL(SingleStepException);
+EXPORT_SYMBOL(sys_sigreturn);
+EXPORT_SYMBOL(ppc_n_lost_interrupts);
+EXPORT_SYMBOL(ppc_lost_interrupts);
+
+EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
+EXPORT_SYMBOL(DMA_MODE_READ);
+EXPORT_SYMBOL(DMA_MODE_WRITE);
+#if defined(CONFIG_PPC_PREP)
+EXPORT_SYMBOL(_prep_type);
+EXPORT_SYMBOL(ucSystemType);
+#endif
+
+#if !defined(__INLINE_BITOPS)
+EXPORT_SYMBOL(set_bit);
+EXPORT_SYMBOL(clear_bit);
+EXPORT_SYMBOL(change_bit);
+EXPORT_SYMBOL(test_and_set_bit);
+EXPORT_SYMBOL(test_and_clear_bit);
+EXPORT_SYMBOL(test_and_change_bit);
+#endif /* __INLINE_BITOPS */
+
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strncpy);
+EXPORT_SYMBOL(strcat);
+EXPORT_SYMBOL(strncat);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(strstr);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strncmp);
+EXPORT_SYMBOL(strcasecmp);
+EXPORT_SYMBOL(__div64_32);
+
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_generic);
+EXPORT_SYMBOL(ip_fast_csum);
+EXPORT_SYMBOL(csum_tcpudp_magic);
+
+EXPORT_SYMBOL(__copy_tofrom_user);
+EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+EXPORT_SYMBOL(__strnlen_user);
+
+EXPORT_SYMBOL(_insb);
+EXPORT_SYMBOL(_outsb);
+EXPORT_SYMBOL(_insw);
+EXPORT_SYMBOL(_outsw);
+EXPORT_SYMBOL(_insl);
+EXPORT_SYMBOL(_outsl);
+EXPORT_SYMBOL(_insw_ns);
+EXPORT_SYMBOL(_outsw_ns);
+EXPORT_SYMBOL(_insl_ns);
+EXPORT_SYMBOL(_outsl_ns);
+EXPORT_SYMBOL(iopa);
+EXPORT_SYMBOL(mm_ptov);
+EXPORT_SYMBOL(ioremap);
+#ifdef CONFIG_44x
+EXPORT_SYMBOL(ioremap64);
+#endif
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(iounmap);
+EXPORT_SYMBOL(ioremap_bot);	/* aka VMALLOC_END */
+
+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
+EXPORT_SYMBOL(ppc_ide_md);
+#endif
+
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(isa_io_base);
+EXPORT_SYMBOL(isa_mem_base);
+EXPORT_SYMBOL(pci_dram_offset);
+EXPORT_SYMBOL(pci_alloc_consistent);
+EXPORT_SYMBOL(pci_free_consistent);
+EXPORT_SYMBOL(pci_bus_io_base);
+EXPORT_SYMBOL(pci_bus_io_base_phys);
+EXPORT_SYMBOL(pci_bus_mem_base_phys);
+EXPORT_SYMBOL(pci_bus_to_hose);
+EXPORT_SYMBOL(pci_resource_to_bus);
+EXPORT_SYMBOL(pci_phys_to_bus);
+EXPORT_SYMBOL(pci_bus_to_phys);
+#endif /* CONFIG_PCI */
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+EXPORT_SYMBOL(flush_dcache_all);
+#endif
+
+EXPORT_SYMBOL(start_thread);
+EXPORT_SYMBOL(kernel_thread);
+
+EXPORT_SYMBOL(flush_instruction_cache);
+EXPORT_SYMBOL(giveup_fpu);
+#ifdef CONFIG_PPC64
+EXPORT_SYMBOL(__flush_icache_range);
+#else
+EXPORT_SYMBOL(flush_icache_range);
+#endif
+EXPORT_SYMBOL(flush_dcache_range);
+EXPORT_SYMBOL(flush_icache_user_range);
+EXPORT_SYMBOL(flush_dcache_page);
+EXPORT_SYMBOL(flush_tlb_kernel_range);
+EXPORT_SYMBOL(flush_tlb_page);
+EXPORT_SYMBOL(_tlbie);
+#ifdef CONFIG_ALTIVEC
+EXPORT_SYMBOL(last_task_used_altivec);
+EXPORT_SYMBOL(giveup_altivec);
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+EXPORT_SYMBOL(last_task_used_spe);
+EXPORT_SYMBOL(giveup_spe);
+#endif /* CONFIG_SPE */
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(smp_call_function);
+EXPORT_SYMBOL(smp_hw_index);
+#endif
+
+EXPORT_SYMBOL(ppc_md);
+
+#ifdef CONFIG_ADB
+EXPORT_SYMBOL(adb_request);
+EXPORT_SYMBOL(adb_register);
+EXPORT_SYMBOL(adb_unregister);
+EXPORT_SYMBOL(adb_poll);
+EXPORT_SYMBOL(adb_try_handler_change);
+#endif /* CONFIG_ADB */
+#ifdef CONFIG_ADB_CUDA
+EXPORT_SYMBOL(cuda_request);
+EXPORT_SYMBOL(cuda_poll);
+#endif /* CONFIG_ADB_CUDA */
+#ifdef CONFIG_PPC_MULTIPLATFORM
+EXPORT_SYMBOL(_machine);
+#endif
+#ifdef CONFIG_PPC_PMAC
+EXPORT_SYMBOL(sys_ctrler);
+EXPORT_SYMBOL(pmac_newworld);
+#endif
+#ifdef CONFIG_PPC_OF
+EXPORT_SYMBOL(find_devices);
+EXPORT_SYMBOL(find_type_devices);
+EXPORT_SYMBOL(find_compatible_devices);
+EXPORT_SYMBOL(find_path_device);
+EXPORT_SYMBOL(device_is_compatible);
+EXPORT_SYMBOL(machine_is_compatible);
+EXPORT_SYMBOL(find_all_nodes);
+EXPORT_SYMBOL(get_property);
+EXPORT_SYMBOL(request_OF_resource);
+EXPORT_SYMBOL(release_OF_resource);
+EXPORT_SYMBOL(pci_busdev_to_OF_node);
+EXPORT_SYMBOL(pci_device_to_OF_node);
+EXPORT_SYMBOL(pci_device_from_OF_node);
+EXPORT_SYMBOL(of_find_node_by_name);
+EXPORT_SYMBOL(of_find_node_by_type);
+EXPORT_SYMBOL(of_find_compatible_node);
+EXPORT_SYMBOL(of_find_node_by_path);
+EXPORT_SYMBOL(of_find_all_nodes);
+EXPORT_SYMBOL(of_get_parent);
+EXPORT_SYMBOL(of_get_next_child);
+EXPORT_SYMBOL(of_node_get);
+EXPORT_SYMBOL(of_node_put);
+#endif /* CONFIG_PPC_OF */
+#if defined(CONFIG_BOOTX_TEXT)
+EXPORT_SYMBOL(btext_update_display);
+#endif
+#if defined(CONFIG_SCSI) && defined(CONFIG_PPC_PMAC)
+EXPORT_SYMBOL(note_scsi_host);
+#endif
+#ifdef CONFIG_VT
+EXPORT_SYMBOL(kd_mksound);
+#endif
+EXPORT_SYMBOL(to_tm);
+
+EXPORT_SYMBOL(pm_power_off);
+
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(cacheable_memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(memscan);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memchr);
+
+#if defined(CONFIG_FB_VGA16_MODULE)
+EXPORT_SYMBOL(screen_info);
+#endif
+
+EXPORT_SYMBOL(__delay);
+EXPORT_SYMBOL(timer_interrupt);
+EXPORT_SYMBOL(irq_desc);
+EXPORT_SYMBOL(tb_ticks_per_jiffy);
+EXPORT_SYMBOL(get_wchan);
+EXPORT_SYMBOL(console_drivers);
+
+#ifdef CONFIG_PPC_ISERIES
+EXPORT_SYMBOL(local_irq_disable);
+EXPORT_SYMBOL(local_irq_enable);
+EXPORT_SYMBOL(local_get_flags);
+#endif
+
+#ifdef CONFIG_XMON
+EXPORT_SYMBOL(xmon);
+EXPORT_SYMBOL(xmon_printf);
+#endif
+EXPORT_SYMBOL(__up);
+EXPORT_SYMBOL(__down);
+EXPORT_SYMBOL(__down_interruptible);
+
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
+extern void (*debugger)(struct pt_regs *regs);
+extern int (*debugger_bpt)(struct pt_regs *regs);
+extern int (*debugger_sstep)(struct pt_regs *regs);
+extern int (*debugger_iabr_match)(struct pt_regs *regs);
+extern int (*debugger_dabr_match)(struct pt_regs *regs);
+extern void (*debugger_fault_handler)(struct pt_regs *regs);
+
+EXPORT_SYMBOL(debugger);
+EXPORT_SYMBOL(debugger_bpt);
+EXPORT_SYMBOL(debugger_sstep);
+EXPORT_SYMBOL(debugger_iabr_match);
+EXPORT_SYMBOL(debugger_dabr_match);
+EXPORT_SYMBOL(debugger_fault_handler);
+#endif
+
+#ifdef  CONFIG_8xx
+EXPORT_SYMBOL(cpm_install_handler);
+EXPORT_SYMBOL(cpm_free_handler);
+#endif /* CONFIG_8xx */
+#if defined(CONFIG_8xx) || defined(CONFIG_40x) || defined(CONFIG_85xx) ||\
+	defined(CONFIG_83xx)
+EXPORT_SYMBOL(__res);
+#endif
+
+EXPORT_SYMBOL(next_mmu_context);
+EXPORT_SYMBOL(set_context);
+EXPORT_SYMBOL_GPL(__handle_mm_fault); /* For MOL */
+EXPORT_SYMBOL(disarm_decr);
+#ifdef CONFIG_PPC_STD_MMU
+extern long mol_trampoline;
+EXPORT_SYMBOL(mol_trampoline); /* For MOL */
+EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
+#ifdef CONFIG_SMP
+extern int mmu_hash_lock;
+EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
+#endif /* CONFIG_SMP */
+extern long *intercept_table;
+EXPORT_SYMBOL(intercept_table);
+#endif /* CONFIG_PPC_STD_MMU */
+EXPORT_SYMBOL(cur_cpu_spec);
+#ifdef CONFIG_PPC_PMAC
+extern unsigned long agp_special_page;
+EXPORT_SYMBOL(agp_special_page);
+#endif
+#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+EXPORT_SYMBOL(__mtdcr);
+EXPORT_SYMBOL(__mfdcr);
+#endif
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
new file mode 100644
index 0000000..e394676
--- /dev/null
+++ b/arch/powerpc/kernel/process.c
@@ -0,0 +1,726 @@
+/*
+ *  arch/ppc/kernel/process.c
+ *
+ *  Derived from "arch/i386/kernel/process.c"
+ *    Copyright (C) 1995  Linus Torvalds
+ *
+ *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
+ *  Paul Mackerras (paulus@cs.anu.edu.au)
+ *
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/elf.h>
+#include <linux/init.h>
+#include <linux/prctl.h>
+#include <linux/init_task.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/mqueue.h>
+#include <linux/hardirq.h>
+
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/mmu.h>
+#include <asm/prom.h>
+
+extern unsigned long _get_SP(void);
+
+#ifndef CONFIG_SMP
+struct task_struct *last_task_used_math = NULL;
+struct task_struct *last_task_used_altivec = NULL;
+struct task_struct *last_task_used_spe = NULL;
+#endif
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+EXPORT_SYMBOL(init_mm);
+
+/* this is 8kB-aligned so we can get to the thread_info struct
+   at the base of it from the stack pointer with 1 integer instruction. */
+union thread_union init_thread_union
+	__attribute__((__section__(".data.init_task"))) =
+{ INIT_THREAD_INFO(init_task) };
+
+/* initial task structure */
+struct task_struct init_task = INIT_TASK(init_task);
+EXPORT_SYMBOL(init_task);
+
+/* only used to get secondary processor up */
+struct task_struct *current_set[NR_CPUS] = {&init_task, };
+
+/*
+ * Make sure the floating-point register state in the
+ * the thread_struct is up to date for task tsk.
+ */
+void flush_fp_to_thread(struct task_struct *tsk)
+{
+	if (tsk->thread.regs) {
+		/*
+		 * We need to disable preemption here because if we didn't,
+		 * another process could get scheduled after the regs->msr
+		 * test but before we have finished saving the FP registers
+		 * to the thread_struct.  That process could take over the
+		 * FPU, and then when we get scheduled again we would store
+		 * bogus values for the remaining FP registers.
+		 */
+		preempt_disable();
+		if (tsk->thread.regs->msr & MSR_FP) {
+#ifdef CONFIG_SMP
+			/*
+			 * This should only ever be called for current or
+			 * for a stopped child process.  Since we save away
+			 * the FP register state on context switch on SMP,
+			 * there is something wrong if a stopped child appears
+			 * to still have its FP state in the CPU registers.
+			 */
+			BUG_ON(tsk != current);
+#endif
+			giveup_fpu(current);
+		}
+		preempt_enable();
+	}
+}
+
+void enable_kernel_fp(void)
+{
+	WARN_ON(preemptible());
+
+#ifdef CONFIG_SMP
+	if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
+		giveup_fpu(current);
+	else
+		giveup_fpu(NULL);	/* just enables FP for kernel */
+#else
+	giveup_fpu(last_task_used_math);
+#endif /* CONFIG_SMP */
+}
+EXPORT_SYMBOL(enable_kernel_fp);
+
+int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
+{
+	if (!tsk->thread.regs)
+		return 0;
+	flush_fp_to_thread(current);
+
+	memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
+
+	return 1;
+}
+
+#ifdef CONFIG_ALTIVEC
+void enable_kernel_altivec(void)
+{
+	WARN_ON(preemptible());
+
+#ifdef CONFIG_SMP
+	if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
+		giveup_altivec(current);
+	else
+		giveup_altivec(NULL);	/* just enable AltiVec for kernel - force */
+#else
+	giveup_altivec(last_task_used_altivec);
+#endif /* CONFIG_SMP */
+}
+EXPORT_SYMBOL(enable_kernel_altivec);
+
+/*
+ * Make sure the VMX/Altivec register state in the
+ * the thread_struct is up to date for task tsk.
+ */
+void flush_altivec_to_thread(struct task_struct *tsk)
+{
+	if (tsk->thread.regs) {
+		preempt_disable();
+		if (tsk->thread.regs->msr & MSR_VEC) {
+#ifdef CONFIG_SMP
+			BUG_ON(tsk != current);
+#endif
+			giveup_altivec(current);
+		}
+		preempt_enable();
+	}
+}
+
+int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
+{
+	flush_altivec_to_thread(current);
+	memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
+	return 1;
+}
+#endif /* CONFIG_ALTIVEC */
+
+#ifdef CONFIG_SPE
+
+void enable_kernel_spe(void)
+{
+	WARN_ON(preemptible());
+
+#ifdef CONFIG_SMP
+	if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
+		giveup_spe(current);
+	else
+		giveup_spe(NULL);	/* just enable SPE for kernel - force */
+#else
+	giveup_spe(last_task_used_spe);
+#endif /* __SMP __ */
+}
+EXPORT_SYMBOL(enable_kernel_spe);
+
+void flush_spe_to_thread(struct task_struct *tsk)
+{
+	if (tsk->thread.regs) {
+		preempt_disable();
+		if (tsk->thread.regs->msr & MSR_SPE) {
+#ifdef CONFIG_SMP
+			BUG_ON(tsk != current);
+#endif
+			giveup_spe(current);
+		}
+		preempt_enable();
+	}
+}
+
+int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
+{
+	flush_spe_to_thread(current);
+	/* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
+	memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
+	return 1;
+}
+#endif /* CONFIG_SPE */
+
+static void set_dabr_spr(unsigned long val)
+{
+	mtspr(SPRN_DABR, val);
+}
+
+int set_dabr(unsigned long dabr)
+{
+	int ret = 0;
+
+#ifdef CONFIG_PPC64
+	if (firmware_has_feature(FW_FEATURE_XDABR)) {
+		/* We want to catch accesses from kernel and userspace */
+		unsigned long flags = H_DABRX_KERNEL|H_DABRX_USER;
+		ret = plpar_set_xdabr(dabr, flags);
+	} else if (firmware_has_feature(FW_FEATURE_DABR)) {
+		ret = plpar_set_dabr(dabr);
+	} else
+#endif
+		set_dabr_spr(dabr);
+
+	return ret;
+}
+
+static DEFINE_PER_CPU(unsigned long, current_dabr);
+
+struct task_struct *__switch_to(struct task_struct *prev,
+	struct task_struct *new)
+{
+	struct thread_struct *new_thread, *old_thread;
+	unsigned long flags;
+	struct task_struct *last;
+
+#ifdef CONFIG_SMP
+	/* avoid complexity of lazy save/restore of fpu
+	 * by just saving it every time we switch out if
+	 * this task used the fpu during the last quantum.
+	 *
+	 * If it tries to use the fpu again, it'll trap and
+	 * reload its fp regs.  So we don't have to do a restore
+	 * every switch, just a save.
+	 *  -- Cort
+	 */
+	if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
+		giveup_fpu(prev);
+#ifdef CONFIG_ALTIVEC
+	/*
+	 * If the previous thread used altivec in the last quantum
+	 * (thus changing altivec regs) then save them.
+	 * We used to check the VRSAVE register but not all apps
+	 * set it, so we don't rely on it now (and in fact we need
+	 * to save & restore VSCR even if VRSAVE == 0).  -- paulus
+	 *
+	 * On SMP we always save/restore altivec regs just to avoid the
+	 * complexity of changing processors.
+	 *  -- Cort
+	 */
+	if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
+		giveup_altivec(prev);
+	/* Avoid the trap.  On smp this this never happens since
+	 * we don't set last_task_used_altivec -- Cort
+	 */
+	if (new->thread.regs && last_task_used_altivec == new)
+		new->thread.regs->msr |= MSR_VEC;
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+	/*
+	 * If the previous thread used spe in the last quantum
+	 * (thus changing spe regs) then save them.
+	 *
+	 * On SMP we always save/restore spe regs just to avoid the
+	 * complexity of changing processors.
+	 */
+	if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
+		giveup_spe(prev);
+	/* Avoid the trap.  On smp this this never happens since
+	 * we don't set last_task_used_spe
+	 */
+	if (new->thread.regs && last_task_used_spe == new)
+		new->thread.regs->msr |= MSR_SPE;
+#endif /* CONFIG_SPE */
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_PPC64	/* for now */
+	if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) {
+		set_dabr(new->thread.dabr);
+		__get_cpu_var(current_dabr) = new->thread.dabr;
+	}
+#endif
+
+	new_thread = &new->thread;
+	old_thread = &current->thread;
+	local_irq_save(flags);
+	last = _switch(old_thread, new_thread);
+
+	local_irq_restore(flags);
+
+	return last;
+}
+
+void show_regs(struct pt_regs * regs)
+{
+	int i, trap;
+
+	printk("NIP: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx    %s\n",
+	       regs->nip, regs->link, regs->gpr[1], regs, regs->trap,
+	       print_tainted());
+	printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n",
+	       regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0,
+	       regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0,
+	       regs->msr&MSR_IR ? 1 : 0,
+	       regs->msr&MSR_DR ? 1 : 0);
+	trap = TRAP(regs);
+	if (trap == 0x300 || trap == 0x600)
+		printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr);
+	printk("TASK = %p[%d] '%s' THREAD: %p\n",
+	       current, current->pid, current->comm, current->thread_info);
+	printk("Last syscall: %ld ", current->thread.last_syscall);
+
+#ifdef CONFIG_SMP
+	printk(" CPU: %d", smp_processor_id());
+#endif /* CONFIG_SMP */
+
+	for (i = 0;  i < 32;  i++) {
+		long r;
+		if ((i % 8) == 0)
+			printk("\n" KERN_INFO "GPR%02d: ", i);
+		if (__get_user(r, &regs->gpr[i]))
+			break;
+		printk("%08lX ", r);
+		if (i == 12 && !FULL_REGS(regs))
+			break;
+	}
+	printk("\n");
+#ifdef CONFIG_KALLSYMS
+	/*
+	 * Lookup NIP late so we have the best change of getting the
+	 * above info out without failing
+	 */
+	printk("NIP [%08lx] ", regs->nip);
+	print_symbol("%s\n", regs->nip);
+	printk("LR [%08lx] ", regs->link);
+	print_symbol("%s\n", regs->link);
+#endif
+	show_stack(current, (unsigned long *) regs->gpr[1]);
+}
+
+void exit_thread(void)
+{
+#ifndef CONFIG_SMP
+	if (last_task_used_math == current)
+		last_task_used_math = NULL;
+#ifdef CONFIG_ALTIVEC
+	if (last_task_used_altivec == current)
+		last_task_used_altivec = NULL;
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+	if (last_task_used_spe == current)
+		last_task_used_spe = NULL;
+#endif
+#endif /* CONFIG_SMP */
+}
+
+void flush_thread(void)
+{
+#ifndef CONFIG_SMP
+	if (last_task_used_math == current)
+		last_task_used_math = NULL;
+#ifdef CONFIG_ALTIVEC
+	if (last_task_used_altivec == current)
+		last_task_used_altivec = NULL;
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+	if (last_task_used_spe == current)
+		last_task_used_spe = NULL;
+#endif
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_PPC64	/* for now */
+	if (current->thread.dabr) {
+		current->thread.dabr = 0;
+		set_dabr(0);
+	}
+#endif
+}
+
+void
+release_thread(struct task_struct *t)
+{
+}
+
+/*
+ * This gets called before we allocate a new thread and copy
+ * the current task into it.
+ */
+void prepare_to_copy(struct task_struct *tsk)
+{
+	flush_fp_to_thread(current);
+	flush_altivec_to_thread(current);
+	flush_spe_to_thread(current);
+}
+
+/*
+ * Copy a thread..
+ */
+int
+copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+	    unsigned long unused,
+	    struct task_struct *p, struct pt_regs *regs)
+{
+	struct pt_regs *childregs, *kregs;
+	extern void ret_from_fork(void);
+	unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE;
+	unsigned long childframe;
+
+	CHECK_FULL_REGS(regs);
+	/* Copy registers */
+	sp -= sizeof(struct pt_regs);
+	childregs = (struct pt_regs *) sp;
+	*childregs = *regs;
+	if ((childregs->msr & MSR_PR) == 0) {
+		/* for kernel thread, set `current' and stackptr in new task */
+		childregs->gpr[1] = sp + sizeof(struct pt_regs);
+		childregs->gpr[2] = (unsigned long) p;
+		p->thread.regs = NULL;	/* no user register state */
+	} else {
+		childregs->gpr[1] = usp;
+		p->thread.regs = childregs;
+		if (clone_flags & CLONE_SETTLS)
+			childregs->gpr[2] = childregs->gpr[6];
+	}
+	childregs->gpr[3] = 0;  /* Result from fork() */
+	sp -= STACK_FRAME_OVERHEAD;
+	childframe = sp;
+
+	/*
+	 * The way this works is that at some point in the future
+	 * some task will call _switch to switch to the new task.
+	 * That will pop off the stack frame created below and start
+	 * the new task running at ret_from_fork.  The new task will
+	 * do some house keeping and then return from the fork or clone
+	 * system call, using the stack frame created above.
+	 */
+	sp -= sizeof(struct pt_regs);
+	kregs = (struct pt_regs *) sp;
+	sp -= STACK_FRAME_OVERHEAD;
+	p->thread.ksp = sp;
+	kregs->nip = (unsigned long)ret_from_fork;
+
+	p->thread.last_syscall = -1;
+
+	return 0;
+}
+
+/*
+ * Set up a thread for executing a new program
+ */
+void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
+{
+	set_fs(USER_DS);
+	memset(regs->gpr, 0, sizeof(regs->gpr));
+	regs->ctr = 0;
+	regs->link = 0;
+	regs->xer = 0;
+	regs->ccr = 0;
+	regs->mq = 0;
+	regs->nip = nip;
+	regs->gpr[1] = sp;
+	regs->msr = MSR_USER;
+#ifndef CONFIG_SMP
+	if (last_task_used_math == current)
+		last_task_used_math = NULL;
+#ifdef CONFIG_ALTIVEC
+	if (last_task_used_altivec == current)
+		last_task_used_altivec = NULL;
+#endif
+#ifdef CONFIG_SPE
+	if (last_task_used_spe == current)
+		last_task_used_spe = NULL;
+#endif
+#endif /* CONFIG_SMP */
+	memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
+	current->thread.fpscr = 0;
+#ifdef CONFIG_ALTIVEC
+	memset(current->thread.vr, 0, sizeof(current->thread.vr));
+	memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
+	current->thread.vrsave = 0;
+	current->thread.used_vr = 0;
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+	memset(current->thread.evr, 0, sizeof(current->thread.evr));
+	current->thread.acc = 0;
+	current->thread.spefscr = 0;
+	current->thread.used_spe = 0;
+#endif /* CONFIG_SPE */
+}
+
+#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
+		| PR_FP_EXC_RES | PR_FP_EXC_INV)
+
+int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
+{
+	struct pt_regs *regs = tsk->thread.regs;
+
+	/* This is a bit hairy.  If we are an SPE enabled  processor
+	 * (have embedded fp) we store the IEEE exception enable flags in
+	 * fpexc_mode.  fpexc_mode is also used for setting FP exception
+	 * mode (asyn, precise, disabled) for 'Classic' FP. */
+	if (val & PR_FP_EXC_SW_ENABLE) {
+#ifdef CONFIG_SPE
+		tsk->thread.fpexc_mode = val &
+			(PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
+#else
+		return -EINVAL;
+#endif
+	} else {
+		/* on a CONFIG_SPE this does not hurt us.  The bits that
+		 * __pack_fe01 use do not overlap with bits used for
+		 * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
+		 * on CONFIG_SPE implementations are reserved so writing to
+		 * them does not change anything */
+		if (val > PR_FP_EXC_PRECISE)
+			return -EINVAL;
+		tsk->thread.fpexc_mode = __pack_fe01(val);
+		if (regs != NULL && (regs->msr & MSR_FP) != 0)
+			regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
+				| tsk->thread.fpexc_mode;
+	}
+	return 0;
+}
+
+int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
+{
+	unsigned int val;
+
+	if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
+#ifdef CONFIG_SPE
+		val = tsk->thread.fpexc_mode;
+#else
+		return -EINVAL;
+#endif
+	else
+		val = __unpack_fe01(tsk->thread.fpexc_mode);
+	return put_user(val, (unsigned int __user *) adr);
+}
+
+int sys_clone(unsigned long clone_flags, unsigned long usp,
+	      int __user *parent_tidp, void __user *child_threadptr,
+	      int __user *child_tidp, int p6,
+	      struct pt_regs *regs)
+{
+	CHECK_FULL_REGS(regs);
+	if (usp == 0)
+		usp = regs->gpr[1];	/* stack pointer for child */
+ 	return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
+}
+
+int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
+	     unsigned long p4, unsigned long p5, unsigned long p6,
+	     struct pt_regs *regs)
+{
+	CHECK_FULL_REGS(regs);
+	return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
+}
+
+int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
+	      unsigned long p4, unsigned long p5, unsigned long p6,
+	      struct pt_regs *regs)
+{
+	CHECK_FULL_REGS(regs);
+	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
+			regs, 0, NULL, NULL);
+}
+
+int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
+	       unsigned long a3, unsigned long a4, unsigned long a5,
+	       struct pt_regs *regs)
+{
+	int error;
+	char * filename;
+
+	filename = getname((char __user *) a0);
+	error = PTR_ERR(filename);
+	if (IS_ERR(filename))
+		goto out;
+	flush_fp_to_thread(current);
+	flush_altivec_to_thread(current);
+	flush_spe_to_thread(current);
+	error = do_execve(filename, (char __user * __user *) a1,
+			  (char __user * __user *) a2, regs);
+	if (error == 0) {
+		task_lock(current);
+		current->ptrace &= ~PT_DTRACE;
+		task_unlock(current);
+	}
+	putname(filename);
+out:
+	return error;
+}
+
+static int validate_sp(unsigned long sp, struct task_struct *p,
+		       unsigned long nbytes)
+{
+	unsigned long stack_page = (unsigned long)p->thread_info;
+
+	if (sp >= stack_page + sizeof(struct thread_struct)
+	    && sp <= stack_page + THREAD_SIZE - nbytes)
+		return 1;
+
+#ifdef CONFIG_IRQSTACKS
+	stack_page = (unsigned long) hardirq_ctx[task_cpu(p)];
+	if (sp >= stack_page + sizeof(struct thread_struct)
+	    && sp <= stack_page + THREAD_SIZE - nbytes)
+		return 1;
+
+	stack_page = (unsigned long) softirq_ctx[task_cpu(p)];
+	if (sp >= stack_page + sizeof(struct thread_struct)
+	    && sp <= stack_page + THREAD_SIZE - nbytes)
+		return 1;
+#endif
+
+	return 0;
+}
+
+void dump_stack(void)
+{
+	show_stack(current, NULL);
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+void show_stack(struct task_struct *tsk, unsigned long *stack)
+{
+	unsigned long sp, stack_top, prev_sp, ret;
+	int count = 0;
+	unsigned long next_exc = 0;
+	struct pt_regs *regs;
+	extern char ret_from_except, ret_from_except_full, ret_from_syscall;
+
+	sp = (unsigned long) stack;
+	if (tsk == NULL)
+		tsk = current;
+	if (sp == 0) {
+		if (tsk == current)
+			asm("mr %0,1" : "=r" (sp));
+		else
+			sp = tsk->thread.ksp;
+	}
+
+	prev_sp = (unsigned long) (tsk->thread_info + 1);
+	stack_top = (unsigned long) tsk->thread_info + THREAD_SIZE;
+	while (count < 16 && sp > prev_sp && sp < stack_top && (sp & 3) == 0) {
+		if (count == 0) {
+			printk("Call trace:");
+#ifdef CONFIG_KALLSYMS
+			printk("\n");
+#endif
+		} else {
+			if (next_exc) {
+				ret = next_exc;
+				next_exc = 0;
+			} else
+				ret = *(unsigned long *)(sp + 4);
+			printk(" [%08lx] ", ret);
+#ifdef CONFIG_KALLSYMS
+			print_symbol("%s", ret);
+			printk("\n");
+#endif
+			if (ret == (unsigned long) &ret_from_except
+			    || ret == (unsigned long) &ret_from_except_full
+			    || ret == (unsigned long) &ret_from_syscall) {
+				/* sp + 16 points to an exception frame */
+				regs = (struct pt_regs *) (sp + 16);
+				if (sp + 16 + sizeof(*regs) <= stack_top)
+					next_exc = regs->nip;
+			}
+		}
+		++count;
+		sp = *(unsigned long *)sp;
+	}
+#ifndef CONFIG_KALLSYMS
+	if (count > 0)
+		printk("\n");
+#endif
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+	unsigned long ip, sp;
+	int count = 0;
+
+	if (!p || p == current || p->state == TASK_RUNNING)
+		return 0;
+
+	sp = p->thread.ksp;
+	if (!validate_sp(sp, p, 16))
+		return 0;
+
+	do {
+		sp = *(unsigned long *)sp;
+		if (!validate_sp(sp, p, 16))
+			return 0;
+		if (count > 0) {
+			ip = *(unsigned long *)(sp + 4);
+			if (!in_sched_functions(ip))
+				return ip;
+		}
+	} while (count++ < 16);
+	return 0;
+}
+EXPORT_SYMBOL(get_wchan);
diff --git a/arch/powerpc/kernel/semaphore.c b/arch/powerpc/kernel/semaphore.c
new file mode 100644
index 0000000..2f8c3c9
--- /dev/null
+++ b/arch/powerpc/kernel/semaphore.c
@@ -0,0 +1,135 @@
+/*
+ * PowerPC-specific semaphore code.
+ *
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
+ * to eliminate the SMP races in the old version between the updates
+ * of `count' and `waking'.  Now we use negative `count' values to
+ * indicate that some process(es) are waiting for the semaphore.
+ */
+
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/atomic.h>
+#include <asm/semaphore.h>
+#include <asm/errno.h>
+
+/*
+ * Atomically update sem->count.
+ * This does the equivalent of the following:
+ *
+ *	old_count = sem->count;
+ *	tmp = MAX(old_count, 0) + incr;
+ *	sem->count = tmp;
+ *	return old_count;
+ */
+static inline int __sem_update_count(struct semaphore *sem, int incr)
+{
+	int old_count, tmp;
+
+	__asm__ __volatile__("\n"
+"1:	lwarx	%0,0,%3\n"
+"	srawi	%1,%0,31\n"
+"	andc	%1,%0,%1\n"
+"	add	%1,%1,%4\n"
+	PPC405_ERR77(0,%3)
+"	stwcx.	%1,0,%3\n"
+"	bne	1b"
+	: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
+	: "r" (&sem->count), "r" (incr), "m" (sem->count)
+	: "cc");
+
+	return old_count;
+}
+
+void __up(struct semaphore *sem)
+{
+	/*
+	 * Note that we incremented count in up() before we came here,
+	 * but that was ineffective since the result was <= 0, and
+	 * any negative value of count is equivalent to 0.
+	 * This ends up setting count to 1, unless count is now > 0
+	 * (i.e. because some other cpu has called up() in the meantime),
+	 * in which case we just increment count.
+	 */
+	__sem_update_count(sem, 1);
+	wake_up(&sem->wait);
+}
+EXPORT_SYMBOL(__up);
+
+/*
+ * Note that when we come in to __down or __down_interruptible,
+ * we have already decremented count, but that decrement was
+ * ineffective since the result was < 0, and any negative value
+ * of count is equivalent to 0.
+ * Thus it is only when we decrement count from some value > 0
+ * that we have actually got the semaphore.
+ */
+void __sched __down(struct semaphore *sem)
+{
+	struct task_struct *tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
+
+	__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+	add_wait_queue_exclusive(&sem->wait, &wait);
+
+	/*
+	 * Try to get the semaphore.  If the count is > 0, then we've
+	 * got the semaphore; we decrement count and exit the loop.
+	 * If the count is 0 or negative, we set it to -1, indicating
+	 * that we are asleep, and then sleep.
+	 */
+	while (__sem_update_count(sem, -1) <= 0) {
+		schedule();
+		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+	}
+	remove_wait_queue(&sem->wait, &wait);
+	__set_task_state(tsk, TASK_RUNNING);
+
+	/*
+	 * If there are any more sleepers, wake one of them up so
+	 * that it can either get the semaphore, or set count to -1
+	 * indicating that there are still processes sleeping.
+	 */
+	wake_up(&sem->wait);
+}
+EXPORT_SYMBOL(__down);
+
+int __sched __down_interruptible(struct semaphore * sem)
+{
+	int retval = 0;
+	struct task_struct *tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
+
+	__set_task_state(tsk, TASK_INTERRUPTIBLE);
+	add_wait_queue_exclusive(&sem->wait, &wait);
+
+	while (__sem_update_count(sem, -1) <= 0) {
+		if (signal_pending(current)) {
+			/*
+			 * A signal is pending - give up trying.
+			 * Set sem->count to 0 if it is negative,
+			 * since we are no longer sleeping.
+			 */
+			__sem_update_count(sem, 0);
+			retval = -EINTR;
+			break;
+		}
+		schedule();
+		set_task_state(tsk, TASK_INTERRUPTIBLE);
+	}
+	remove_wait_queue(&sem->wait, &wait);
+	__set_task_state(tsk, TASK_RUNNING);
+
+	wake_up(&sem->wait);
+	return retval;
+}
+EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
new file mode 100644
index 0000000..c7afbbb
--- /dev/null
+++ b/arch/powerpc/kernel/traps.c
@@ -0,0 +1,1047 @@
+/*
+ *  arch/powerpc/kernel/traps.c
+ *
+ *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ *  Modified by Cort Dougan (cort@cs.nmt.edu)
+ *  and Paul Mackerras (paulus@samba.org)
+ */
+
+/*
+ * This file handles the architecture-dependent parts of hardware exceptions
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/interrupt.h>
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/prctl.h>
+#include <linux/delay.h>
+#include <linux/kprobes.h>
+#include <asm/kdebug.h>
+
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/reg.h>
+#include <asm/xmon.h>
+#ifdef CONFIG_PMAC_BACKLIGHT
+#include <asm/backlight.h>
+#endif
+#include <asm/perfmon.h>
+
+#ifdef CONFIG_DEBUGGER
+int (*__debugger)(struct pt_regs *regs);
+int (*__debugger_ipi)(struct pt_regs *regs);
+int (*__debugger_bpt)(struct pt_regs *regs);
+int (*__debugger_sstep)(struct pt_regs *regs);
+int (*__debugger_iabr_match)(struct pt_regs *regs);
+int (*__debugger_dabr_match)(struct pt_regs *regs);
+int (*__debugger_fault_handler)(struct pt_regs *regs);
+
+EXPORT_SYMBOL(__debugger);
+EXPORT_SYMBOL(__debugger_ipi);
+EXPORT_SYMBOL(__debugger_bpt);
+EXPORT_SYMBOL(__debugger_sstep);
+EXPORT_SYMBOL(__debugger_iabr_match);
+EXPORT_SYMBOL(__debugger_dabr_match);
+EXPORT_SYMBOL(__debugger_fault_handler);
+#endif
+
+struct notifier_block *powerpc_die_chain;
+static DEFINE_SPINLOCK(die_notifier_lock);
+
+int register_die_notifier(struct notifier_block *nb)
+{
+	int err = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&die_notifier_lock, flags);
+	err = notifier_chain_register(&powerpc_die_chain, nb);
+	spin_unlock_irqrestore(&die_notifier_lock, flags);
+	return err;
+}
+
+/*
+ * Trap & Exception support
+ */
+
+static DEFINE_SPINLOCK(die_lock);
+
+int die(const char *str, struct pt_regs *regs, long err)
+{
+	static int die_counter;
+	int nl = 0;
+
+	if (debugger(regs))
+		return 1;
+
+	console_verbose();
+	spin_lock_irq(&die_lock);
+	bust_spinlocks(1);
+#ifdef CONFIG_PMAC_BACKLIGHT
+	if (_machine == _MACH_Pmac) {
+		set_backlight_enable(1);
+		set_backlight_level(BACKLIGHT_MAX);
+	}
+#endif
+	printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
+#ifdef CONFIG_PREEMPT
+	printk("PREEMPT ");
+	nl = 1;
+#endif
+#ifdef CONFIG_SMP
+	printk("SMP NR_CPUS=%d ", NR_CPUS);
+	nl = 1;
+#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+	printk("DEBUG_PAGEALLOC ");
+	nl = 1;
+#endif
+#ifdef CONFIG_NUMA
+	printk("NUMA ");
+	nl = 1;
+#endif
+#ifdef CONFIG_PPC64
+	switch (systemcfg->platform) {
+	case PLATFORM_PSERIES:
+		printk("PSERIES ");
+		nl = 1;
+		break;
+	case PLATFORM_PSERIES_LPAR:
+		printk("PSERIES LPAR ");
+		nl = 1;
+		break;
+	case PLATFORM_ISERIES_LPAR:
+		printk("ISERIES LPAR ");
+		nl = 1;
+		break;
+	case PLATFORM_POWERMAC:
+		printk("POWERMAC ");
+		nl = 1;
+		break;
+	case PLATFORM_BPA:
+		printk("BPA ");
+		nl = 1;
+		break;
+	}
+#endif
+	if (nl)
+		printk("\n");
+	print_modules();
+	show_regs(regs);
+	bust_spinlocks(0);
+	spin_unlock_irq(&die_lock);
+
+	if (in_interrupt())
+		panic("Fatal exception in interrupt");
+
+	if (panic_on_oops) {
+		panic("Fatal exception");
+	}
+	do_exit(err);
+
+	return 0;
+}
+
+void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
+{
+	siginfo_t info;
+
+	if (!user_mode(regs)) {
+		if (die("Exception in kernel mode", regs, signr))
+			return;
+	}
+
+	memset(&info, 0, sizeof(info));
+	info.si_signo = signr;
+	info.si_code = code;
+	info.si_addr = (void __user *) addr;
+	force_sig_info(signr, &info, current);
+
+	/*
+	 * Init gets no signals that it doesn't have a handler for.
+	 * That's all very well, but if it has caused a synchronous
+	 * exception and we ignore the resulting signal, it will just
+	 * generate the same exception over and over again and we get
+	 * nowhere.  Better to kill it and let the kernel panic.
+	 */
+	if (current->pid == 1) {
+		__sighandler_t handler;
+
+		spin_lock_irq(&current->sighand->siglock);
+		handler = current->sighand->action[signr-1].sa.sa_handler;
+		spin_unlock_irq(&current->sighand->siglock);
+		if (handler == SIG_DFL) {
+			/* init has generated a synchronous exception
+			   and it doesn't have a handler for the signal */
+			printk(KERN_CRIT "init has generated signal %d "
+			       "but has no handler for it\n", signr);
+			do_exit(signr);
+		}
+	}
+}
+
+#ifdef CONFIG_PPC64
+void system_reset_exception(struct pt_regs *regs)
+{
+	/* See if any machine dependent calls */
+	if (ppc_md.system_reset_exception)
+		ppc_md.system_reset_exception(regs);
+
+	die("System Reset", regs, SIGABRT);
+
+	/* Must die if the interrupt is not recoverable */
+	if (!(regs->msr & MSR_RI))
+		panic("Unrecoverable System Reset");
+
+	/* What should we do here? We could issue a shutdown or hard reset. */
+}
+#endif
+
+/*
+ * I/O accesses can cause machine checks on powermacs.
+ * Check if the NIP corresponds to the address of a sync
+ * instruction for which there is an entry in the exception
+ * table.
+ * Note that the 601 only takes a machine check on TEA
+ * (transfer error ack) signal assertion, and does not
+ * set any of the top 16 bits of SRR1.
+ *  -- paulus.
+ */
+static inline int check_io_access(struct pt_regs *regs)
+{
+#ifdef CONFIG_PPC_PMAC
+	unsigned long msr = regs->msr;
+	const struct exception_table_entry *entry;
+	unsigned int *nip = (unsigned int *)regs->nip;
+
+	if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
+	    && (entry = search_exception_tables(regs->nip)) != NULL) {
+		/*
+		 * Check that it's a sync instruction, or somewhere
+		 * in the twi; isync; nop sequence that inb/inw/inl uses.
+		 * As the address is in the exception table
+		 * we should be able to read the instr there.
+		 * For the debug message, we look at the preceding
+		 * load or store.
+		 */
+		if (*nip == 0x60000000)		/* nop */
+			nip -= 2;
+		else if (*nip == 0x4c00012c)	/* isync */
+			--nip;
+		if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
+			/* sync or twi */
+			unsigned int rb;
+
+			--nip;
+			rb = (*nip >> 11) & 0x1f;
+			printk(KERN_DEBUG "%s bad port %lx at %p\n",
+			       (*nip & 0x100)? "OUT to": "IN from",
+			       regs->gpr[rb] - _IO_BASE, nip);
+			regs->msr |= MSR_RI;
+			regs->nip = entry->fixup;
+			return 1;
+		}
+	}
+#endif /* CONFIG_PPC_PMAC */
+	return 0;
+}
+
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+/* On 4xx, the reason for the machine check or program exception
+   is in the ESR. */
+#define get_reason(regs)	((regs)->dsisr)
+#ifndef CONFIG_FSL_BOOKE
+#define get_mc_reason(regs)	((regs)->dsisr)
+#else
+#define get_mc_reason(regs)	(mfspr(SPRN_MCSR))
+#endif
+#define REASON_FP		ESR_FP
+#define REASON_ILLEGAL		(ESR_PIL | ESR_PUO)
+#define REASON_PRIVILEGED	ESR_PPR
+#define REASON_TRAP		ESR_PTR
+
+/* single-step stuff */
+#define single_stepping(regs)	(current->thread.dbcr0 & DBCR0_IC)
+#define clear_single_step(regs)	(current->thread.dbcr0 &= ~DBCR0_IC)
+
+#else
+/* On non-4xx, the reason for the machine check or program
+   exception is in the MSR. */
+#define get_reason(regs)	((regs)->msr)
+#define get_mc_reason(regs)	((regs)->msr)
+#define REASON_FP		0x100000
+#define REASON_ILLEGAL		0x80000
+#define REASON_PRIVILEGED	0x40000
+#define REASON_TRAP		0x20000
+
+#define single_stepping(regs)	((regs)->msr & MSR_SE)
+#define clear_single_step(regs)	((regs)->msr &= ~MSR_SE)
+#endif
+
+/*
+ * This is "fall-back" implementation for configurations
+ * which don't provide platform-specific machine check info
+ */
+void __attribute__ ((weak))
+platform_machine_check(struct pt_regs *regs)
+{
+}
+
+void MachineCheckException(struct pt_regs *regs)
+{
+#ifdef CONFIG_PPC64
+	int recover = 0;
+
+	/* See if any machine dependent calls */
+	if (ppc_md.machine_check_exception)
+		recover = ppc_md.machine_check_exception(regs);
+
+	if (recover)
+		return;
+#else
+	unsigned long reason = get_mc_reason(regs);
+
+	if (user_mode(regs)) {
+		regs->msr |= MSR_RI;
+		_exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
+		return;
+	}
+
+#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
+	/* the qspan pci read routines can cause machine checks -- Cort */
+	bad_page_fault(regs, regs->dar, SIGBUS);
+	return;
+#endif
+
+	if (debugger_fault_handler(regs)) {
+		regs->msr |= MSR_RI;
+		return;
+	}
+
+	if (check_io_access(regs))
+		return;
+
+#if defined(CONFIG_4xx) && !defined(CONFIG_440A)
+	if (reason & ESR_IMCP) {
+		printk("Instruction");
+		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
+	} else
+		printk("Data");
+	printk(" machine check in kernel mode.\n");
+#elif defined(CONFIG_440A)
+	printk("Machine check in kernel mode.\n");
+	if (reason & ESR_IMCP){
+		printk("Instruction Synchronous Machine Check exception\n");
+		mtspr(SPRN_ESR, reason & ~ESR_IMCP);
+	}
+	else {
+		u32 mcsr = mfspr(SPRN_MCSR);
+		if (mcsr & MCSR_IB)
+			printk("Instruction Read PLB Error\n");
+		if (mcsr & MCSR_DRB)
+			printk("Data Read PLB Error\n");
+		if (mcsr & MCSR_DWB)
+			printk("Data Write PLB Error\n");
+		if (mcsr & MCSR_TLBP)
+			printk("TLB Parity Error\n");
+		if (mcsr & MCSR_ICP){
+			flush_instruction_cache();
+			printk("I-Cache Parity Error\n");
+		}
+		if (mcsr & MCSR_DCSP)
+			printk("D-Cache Search Parity Error\n");
+		if (mcsr & MCSR_DCFP)
+			printk("D-Cache Flush Parity Error\n");
+		if (mcsr & MCSR_IMPE)
+			printk("Machine Check exception is imprecise\n");
+
+		/* Clear MCSR */
+		mtspr(SPRN_MCSR, mcsr);
+	}
+#elif defined (CONFIG_E500)
+	printk("Machine check in kernel mode.\n");
+	printk("Caused by (from MCSR=%lx): ", reason);
+
+	if (reason & MCSR_MCP)
+		printk("Machine Check Signal\n");
+	if (reason & MCSR_ICPERR)
+		printk("Instruction Cache Parity Error\n");
+	if (reason & MCSR_DCP_PERR)
+		printk("Data Cache Push Parity Error\n");
+	if (reason & MCSR_DCPERR)
+		printk("Data Cache Parity Error\n");
+	if (reason & MCSR_GL_CI)
+		printk("Guarded Load or Cache-Inhibited stwcx.\n");
+	if (reason & MCSR_BUS_IAERR)
+		printk("Bus - Instruction Address Error\n");
+	if (reason & MCSR_BUS_RAERR)
+		printk("Bus - Read Address Error\n");
+	if (reason & MCSR_BUS_WAERR)
+		printk("Bus - Write Address Error\n");
+	if (reason & MCSR_BUS_IBERR)
+		printk("Bus - Instruction Data Error\n");
+	if (reason & MCSR_BUS_RBERR)
+		printk("Bus - Read Data Bus Error\n");
+	if (reason & MCSR_BUS_WBERR)
+		printk("Bus - Read Data Bus Error\n");
+	if (reason & MCSR_BUS_IPERR)
+		printk("Bus - Instruction Parity Error\n");
+	if (reason & MCSR_BUS_RPERR)
+		printk("Bus - Read Parity Error\n");
+#elif defined (CONFIG_E200)
+	printk("Machine check in kernel mode.\n");
+	printk("Caused by (from MCSR=%lx): ", reason);
+
+	if (reason & MCSR_MCP)
+		printk("Machine Check Signal\n");
+	if (reason & MCSR_CP_PERR)
+		printk("Cache Push Parity Error\n");
+	if (reason & MCSR_CPERR)
+		printk("Cache Parity Error\n");
+	if (reason & MCSR_EXCP_ERR)
+		printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
+	if (reason & MCSR_BUS_IRERR)
+		printk("Bus - Read Bus Error on instruction fetch\n");
+	if (reason & MCSR_BUS_DRERR)
+		printk("Bus - Read Bus Error on data load\n");
+	if (reason & MCSR_BUS_WRERR)
+		printk("Bus - Write Bus Error on buffered store or cache line push\n");
+#else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */
+	printk("Machine check in kernel mode.\n");
+	printk("Caused by (from SRR1=%lx): ", reason);
+	switch (reason & 0x601F0000) {
+	case 0x80000:
+		printk("Machine check signal\n");
+		break;
+	case 0:		/* for 601 */
+	case 0x40000:
+	case 0x140000:	/* 7450 MSS error and TEA */
+		printk("Transfer error ack signal\n");
+		break;
+	case 0x20000:
+		printk("Data parity error signal\n");
+		break;
+	case 0x10000:
+		printk("Address parity error signal\n");
+		break;
+	case 0x20000000:
+		printk("L1 Data Cache error\n");
+		break;
+	case 0x40000000:
+		printk("L1 Instruction Cache error\n");
+		break;
+	case 0x00100000:
+		printk("L2 data cache parity error\n");
+		break;
+	default:
+		printk("Unknown values in msr\n");
+	}
+#endif /* CONFIG_4xx */
+
+	/*
+	 * Optional platform-provided routine to print out
+	 * additional info, e.g. bus error registers.
+	 */
+	platform_machine_check(regs);
+#endif /* CONFIG_PPC64 */
+
+	if (debugger_fault_handler(regs))
+		return;
+	die("Machine check", regs, SIGBUS);
+
+	/* Must die if the interrupt is not recoverable */
+	if (!(regs->msr & MSR_RI))
+		panic("Unrecoverable Machine check");
+}
+
+void SMIException(struct pt_regs *regs)
+{
+	die("System Management Interrupt", regs, SIGABRT);
+}
+
+void UnknownException(struct pt_regs *regs)
+{
+	printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
+	       regs->nip, regs->msr, regs->trap);
+
+	_exception(SIGTRAP, regs, 0, 0);
+}
+
+void InstructionBreakpoint(struct pt_regs *regs)
+{
+	if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
+					5, SIGTRAP) == NOTIFY_STOP)
+		return;
+	if (debugger_iabr_match(regs))
+		return;
+	_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
+}
+
+void RunModeException(struct pt_regs *regs)
+{
+	_exception(SIGTRAP, regs, 0, 0);
+}
+
+void SingleStepException(struct pt_regs *regs)
+{
+	regs->msr &= ~(MSR_SE | MSR_BE);  /* Turn off 'trace' bits */
+
+	if (notify_die(DIE_SSTEP, "single_step", regs, 5,
+					5, SIGTRAP) == NOTIFY_STOP)
+		return;
+	if (debugger_sstep(regs))
+		return;
+
+	_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
+}
+
+/*
+ * After we have successfully emulated an instruction, we have to
+ * check if the instruction was being single-stepped, and if so,
+ * pretend we got a single-step exception.  This was pointed out
+ * by Kumar Gala.  -- paulus
+ */
+static void emulate_single_step(struct pt_regs *regs)
+{
+	if (single_stepping(regs)) {
+		clear_single_step(regs);
+		_exception(SIGTRAP, regs, TRAP_TRACE, 0);
+	}
+}
+
+/* Illegal instruction emulation support.  Originally written to
+ * provide the PVR to user applications using the mfspr rd, PVR.
+ * Return non-zero if we can't emulate, or -EFAULT if the associated
+ * memory access caused an access fault.  Return zero on success.
+ *
+ * There are a couple of ways to do this, either "decode" the instruction
+ * or directly match lots of bits.  In this case, matching lots of
+ * bits is faster and easier.
+ *
+ */
+#define INST_MFSPR_PVR		0x7c1f42a6
+#define INST_MFSPR_PVR_MASK	0xfc1fffff
+
+#define INST_DCBA		0x7c0005ec
+#define INST_DCBA_MASK		0x7c0007fe
+
+#define INST_MCRXR		0x7c000400
+#define INST_MCRXR_MASK		0x7c0007fe
+
+#define INST_STRING		0x7c00042a
+#define INST_STRING_MASK	0x7c0007fe
+#define INST_STRING_GEN_MASK	0x7c00067e
+#define INST_LSWI		0x7c0004aa
+#define INST_LSWX		0x7c00042a
+#define INST_STSWI		0x7c0005aa
+#define INST_STSWX		0x7c00052a
+
+static int emulate_string_inst(struct pt_regs *regs, u32 instword)
+{
+	u8 rT = (instword >> 21) & 0x1f;
+	u8 rA = (instword >> 16) & 0x1f;
+	u8 NB_RB = (instword >> 11) & 0x1f;
+	u32 num_bytes;
+	unsigned long EA;
+	int pos = 0;
+
+	/* Early out if we are an invalid form of lswx */
+	if ((instword & INST_STRING_MASK) == INST_LSWX)
+		if ((rT == rA) || (rT == NB_RB))
+			return -EINVAL;
+
+	EA = (rA == 0) ? 0 : regs->gpr[rA];
+
+	switch (instword & INST_STRING_MASK) {
+		case INST_LSWX:
+		case INST_STSWX:
+			EA += NB_RB;
+			num_bytes = regs->xer & 0x7f;
+			break;
+		case INST_LSWI:
+		case INST_STSWI:
+			num_bytes = (NB_RB == 0) ? 32 : NB_RB;
+			break;
+		default:
+			return -EINVAL;
+	}
+
+	while (num_bytes != 0)
+	{
+		u8 val;
+		u32 shift = 8 * (3 - (pos & 0x3));
+
+		switch ((instword & INST_STRING_MASK)) {
+			case INST_LSWX:
+			case INST_LSWI:
+				if (get_user(val, (u8 __user *)EA))
+					return -EFAULT;
+				/* first time updating this reg,
+				 * zero it out */
+				if (pos == 0)
+					regs->gpr[rT] = 0;
+				regs->gpr[rT] |= val << shift;
+				break;
+			case INST_STSWI:
+			case INST_STSWX:
+				val = regs->gpr[rT] >> shift;
+				if (put_user(val, (u8 __user *)EA))
+					return -EFAULT;
+				break;
+		}
+		/* move EA to next address */
+		EA += 1;
+		num_bytes--;
+
+		/* manage our position within the register */
+		if (++pos == 4) {
+			pos = 0;
+			if (++rT == 32)
+				rT = 0;
+		}
+	}
+
+	return 0;
+}
+
+static int emulate_instruction(struct pt_regs *regs)
+{
+	u32 instword;
+	u32 rd;
+
+	if (!user_mode(regs))
+		return -EINVAL;
+	CHECK_FULL_REGS(regs);
+
+	if (get_user(instword, (u32 __user *)(regs->nip)))
+		return -EFAULT;
+
+	/* Emulate the mfspr rD, PVR. */
+	if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
+		rd = (instword >> 21) & 0x1f;
+		regs->gpr[rd] = mfspr(SPRN_PVR);
+		return 0;
+	}
+
+	/* Emulating the dcba insn is just a no-op.  */
+	if ((instword & INST_DCBA_MASK) == INST_DCBA)
+		return 0;
+
+	/* Emulate the mcrxr insn.  */
+	if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
+		int shift = (instword >> 21) & 0x1c;
+		unsigned long msk = 0xf0000000UL >> shift;
+
+		regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
+		regs->xer &= ~0xf0000000UL;
+		return 0;
+	}
+
+	/* Emulate load/store string insn. */
+	if ((instword & INST_STRING_GEN_MASK) == INST_STRING)
+		return emulate_string_inst(regs, instword);
+
+	return -EINVAL;
+}
+
+/*
+ * Look through the list of trap instructions that are used for BUG(),
+ * BUG_ON() and WARN_ON() and see if we hit one.  At this point we know
+ * that the exception was caused by a trap instruction of some kind.
+ * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
+ * otherwise.
+ */
+extern struct bug_entry __start___bug_table[], __stop___bug_table[];
+
+#ifndef CONFIG_MODULES
+#define module_find_bug(x)	NULL
+#endif
+
+struct bug_entry *find_bug(unsigned long bugaddr)
+{
+	struct bug_entry *bug;
+
+	for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
+		if (bugaddr == bug->bug_addr)
+			return bug;
+	return module_find_bug(bugaddr);
+}
+
+int check_bug_trap(struct pt_regs *regs)
+{
+	struct bug_entry *bug;
+	unsigned long addr;
+
+	if (regs->msr & MSR_PR)
+		return 0;	/* not in kernel */
+	addr = regs->nip;	/* address of trap instruction */
+	if (addr < PAGE_OFFSET)
+		return 0;
+	bug = find_bug(regs->nip);
+	if (bug == NULL)
+		return 0;
+	if (bug->line & BUG_WARNING_TRAP) {
+		/* this is a WARN_ON rather than BUG/BUG_ON */
+#ifdef CONFIG_XMON
+		xmon_printf(KERN_ERR "Badness in %s at %s:%d\n",
+		       bug->function, bug->file,
+		       bug->line & ~BUG_WARNING_TRAP);
+#endif /* CONFIG_XMON */		
+		printk(KERN_ERR "Badness in %s at %s:%d\n",
+		       bug->function, bug->file,
+		       bug->line & ~BUG_WARNING_TRAP);
+		dump_stack();
+		return 1;
+	}
+#ifdef CONFIG_XMON
+	xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
+	       bug->function, bug->file, bug->line);
+	xmon(regs);
+#endif /* CONFIG_XMON */
+	printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
+	       bug->function, bug->file, bug->line);
+
+	return 0;
+}
+
+void ProgramCheckException(struct pt_regs *regs)
+{
+	unsigned int reason = get_reason(regs);
+	extern int do_mathemu(struct pt_regs *regs);
+
+#ifdef CONFIG_MATH_EMULATION
+	/* (reason & REASON_ILLEGAL) would be the obvious thing here,
+	 * but there seems to be a hardware bug on the 405GP (RevD)
+	 * that means ESR is sometimes set incorrectly - either to
+	 * ESR_DST (!?) or 0.  In the process of chasing this with the
+	 * hardware people - not sure if it can happen on any illegal
+	 * instruction or only on FP instructions, whether there is a
+	 * pattern to occurences etc. -dgibson 31/Mar/2003 */
+	if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) {
+		emulate_single_step(regs);
+		return;
+	}
+#endif /* CONFIG_MATH_EMULATION */
+
+	if (reason & REASON_FP) {
+		/* IEEE FP exception */
+		int code = 0;
+		u32 fpscr;
+
+		/* We must make sure the FP state is consistent with
+		 * our MSR_FP in regs
+		 */
+		preempt_disable();
+		if (regs->msr & MSR_FP)
+			giveup_fpu(current);
+		preempt_enable();
+
+		fpscr = current->thread.fpscr;
+		fpscr &= fpscr << 22;	/* mask summary bits with enables */
+		if (fpscr & FPSCR_VX)
+			code = FPE_FLTINV;
+		else if (fpscr & FPSCR_OX)
+			code = FPE_FLTOVF;
+		else if (fpscr & FPSCR_UX)
+			code = FPE_FLTUND;
+		else if (fpscr & FPSCR_ZX)
+			code = FPE_FLTDIV;
+		else if (fpscr & FPSCR_XX)
+			code = FPE_FLTRES;
+		_exception(SIGFPE, regs, code, regs->nip);
+		return;
+	}
+
+	if (reason & REASON_TRAP) {
+		/* trap exception */
+		if (debugger_bpt(regs))
+			return;
+		if (check_bug_trap(regs)) {
+			regs->nip += 4;
+			return;
+		}
+		_exception(SIGTRAP, regs, TRAP_BRKPT, 0);
+		return;
+	}
+
+	/* Try to emulate it if we should. */
+	if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
+		switch (emulate_instruction(regs)) {
+		case 0:
+			regs->nip += 4;
+			emulate_single_step(regs);
+			return;
+		case -EFAULT:
+			_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
+			return;
+		}
+	}
+
+	if (reason & REASON_PRIVILEGED)
+		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
+	else
+		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+}
+
+void AlignmentException(struct pt_regs *regs)
+{
+	int fixed;
+
+	fixed = fix_alignment(regs);
+
+	if (fixed == 1) {
+		regs->nip += 4;	/* skip over emulated instruction */
+		emulate_single_step(regs);
+		return;
+	}
+
+	/* Operand address was bad */	
+	if (fixed == -EFAULT) {
+		if (user_mode(regs))
+			_exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar);
+		else
+			/* Search exception table */
+			bad_page_fault(regs, regs->dar, SIGSEGV);
+		return;
+	}
+	_exception(SIGBUS, regs, BUS_ADRALN, regs->dar);
+}
+
+void StackOverflow(struct pt_regs *regs)
+{
+	printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
+	       current, regs->gpr[1]);
+	debugger(regs);
+	show_regs(regs);
+	panic("kernel stack overflow");
+}
+
+void nonrecoverable_exception(struct pt_regs *regs)
+{
+	printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
+	       regs->nip, regs->msr);
+	debugger(regs);
+	die("nonrecoverable exception", regs, SIGKILL);
+}
+
+void trace_syscall(struct pt_regs *regs)
+{
+	printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
+	       current, current->pid, regs->nip, regs->link, regs->gpr[0],
+	       regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
+}
+
+#ifdef CONFIG_8xx
+void SoftwareEmulation(struct pt_regs *regs)
+{
+	extern int do_mathemu(struct pt_regs *);
+	extern int Soft_emulate_8xx(struct pt_regs *);
+	int errcode;
+
+	CHECK_FULL_REGS(regs);
+
+	if (!user_mode(regs)) {
+		debugger(regs);
+		die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
+	}
+
+#ifdef CONFIG_MATH_EMULATION
+	errcode = do_mathemu(regs);
+#else
+	errcode = Soft_emulate_8xx(regs);
+#endif
+	if (errcode) {
+		if (errcode > 0)
+			_exception(SIGFPE, regs, 0, 0);
+		else if (errcode == -EFAULT)
+			_exception(SIGSEGV, regs, 0, 0);
+		else
+			_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+	} else
+		emulate_single_step(regs);
+}
+#endif /* CONFIG_8xx */
+
+#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+
+void DebugException(struct pt_regs *regs, unsigned long debug_status)
+{
+	if (debug_status & DBSR_IC) {	/* instruction completion */
+		regs->msr &= ~MSR_DE;
+		if (user_mode(regs)) {
+			current->thread.dbcr0 &= ~DBCR0_IC;
+		} else {
+			/* Disable instruction completion */
+			mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
+			/* Clear the instruction completion event */
+			mtspr(SPRN_DBSR, DBSR_IC);
+			if (debugger_sstep(regs))
+				return;
+		}
+		_exception(SIGTRAP, regs, TRAP_TRACE, 0);
+	}
+}
+#endif /* CONFIG_4xx || CONFIG_BOOKE */
+
+#if !defined(CONFIG_TAU_INT)
+void TAUException(struct pt_regs *regs)
+{
+	printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
+	       regs->nip, regs->msr, regs->trap, print_tainted());
+}
+#endif /* CONFIG_INT_TAU */
+
+void AltivecUnavailException(struct pt_regs *regs)
+{
+	static int kernel_altivec_count;
+
+#ifndef CONFIG_ALTIVEC
+	if (user_mode(regs)) {
+		/* A user program has executed an altivec instruction,
+		   but this kernel doesn't support altivec. */
+		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+		return;
+	}
+#endif
+	/* The kernel has executed an altivec instruction without
+	   first enabling altivec.  Whinge but let it do it. */
+	if (++kernel_altivec_count < 10)
+		printk(KERN_ERR "AltiVec used in kernel (task=%p, pc=%lx)\n",
+		       current, regs->nip);
+	regs->msr |= MSR_VEC;
+}
+
+#ifdef CONFIG_ALTIVEC
+void AltivecAssistException(struct pt_regs *regs)
+{
+	int err;
+
+	preempt_disable();
+	if (regs->msr & MSR_VEC)
+		giveup_altivec(current);
+	preempt_enable();
+	if (!user_mode(regs)) {
+		printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
+		       " at %lx\n", regs->nip);
+		die("Kernel Altivec assist exception", regs, SIGILL);
+	}
+
+	err = emulate_altivec(regs);
+	if (err == 0) {
+		regs->nip += 4;		/* skip emulated instruction */
+		emulate_single_step(regs);
+		return;
+	}
+
+	if (err == -EFAULT) {
+		/* got an error reading the instruction */
+		_exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
+	} else {
+		/* didn't recognize the instruction */
+		/* XXX quick hack for now: set the non-Java bit in the VSCR */
+		if (printk_ratelimit())
+			printk(KERN_ERR "Unrecognized altivec instruction "
+			       "in %s at %lx\n", current->comm, regs->nip);
+		current->thread.vscr.u[3] |= 0x10000;
+	}
+}
+#endif /* CONFIG_ALTIVEC */
+
+#ifdef CONFIG_E500
+void PerformanceMonitorException(struct pt_regs *regs)
+{
+	perf_irq(regs);
+}
+#endif
+
+#ifdef CONFIG_FSL_BOOKE
+void CacheLockingException(struct pt_regs *regs, unsigned long address,
+			   unsigned long error_code)
+{
+	/* We treat cache locking instructions from the user
+	 * as priv ops, in the future we could try to do
+	 * something smarter
+	 */
+	if (error_code & (ESR_DLK|ESR_ILK))
+		_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
+	return;
+}
+#endif /* CONFIG_FSL_BOOKE */
+
+#ifdef CONFIG_SPE
+void SPEFloatingPointException(struct pt_regs *regs)
+{
+	unsigned long spefscr;
+	int fpexc_mode;
+	int code = 0;
+
+	spefscr = current->thread.spefscr;
+	fpexc_mode = current->thread.fpexc_mode;
+
+	/* Hardware does not neccessarily set sticky
+	 * underflow/overflow/invalid flags */
+	if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
+		code = FPE_FLTOVF;
+		spefscr |= SPEFSCR_FOVFS;
+	}
+	else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
+		code = FPE_FLTUND;
+		spefscr |= SPEFSCR_FUNFS;
+	}
+	else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
+		code = FPE_FLTDIV;
+	else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
+		code = FPE_FLTINV;
+		spefscr |= SPEFSCR_FINVS;
+	}
+	else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
+		code = FPE_FLTRES;
+
+	current->thread.spefscr = spefscr;
+
+	_exception(SIGFPE, regs, code, regs->nip);
+	return;
+}
+#endif
+
+#ifdef CONFIG_BOOKE_WDT
+/*
+ * Default handler for a Watchdog exception,
+ * spins until a reboot occurs
+ */
+void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
+{
+	/* Generic WatchdogHandler, implement your own */
+	mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
+	return;
+}
+
+void WatchdogException(struct pt_regs *regs)
+{
+	printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
+	WatchdogHandler(regs);
+}
+#endif
+
+void __init trap_init(void)
+{
+}
diff --git a/arch/ppc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c
similarity index 100%
rename from arch/ppc/kernel/vecemu.c
rename to arch/powerpc/kernel/vecemu.c
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
new file mode 100644
index 0000000..12cb90bc
--- /dev/null
+++ b/arch/powerpc/kernel/vector.S
@@ -0,0 +1,197 @@
+#include <linux/config.h>
+#include <asm/ppc_asm.h>
+#include <asm/processor.h>
+
+/*
+ * The routines below are in assembler so we can closely control the
+ * usage of floating-point registers.  These routines must be called
+ * with preempt disabled.
+ */
+#ifdef CONFIG_PPC32
+	.data
+fpzero:
+	.long	0
+fpone:
+	.long	0x3f800000	/* 1.0 in single-precision FP */
+fphalf:
+	.long	0x3f000000	/* 0.5 in single-precision FP */
+
+#define LDCONST(fr, name)	\
+	lis	r11,name@ha;	\
+	lfs	fr,name@l(r11)
+#else
+
+	.section ".toc","aw"
+fpzero:
+	.tc	FD_0_0[TC],0
+fpone:
+	.tc	FD_3ff00000_0[TC],0x3ff0000000000000	/* 1.0 */
+fphalf:
+	.tc	FD_3fe00000_0[TC],0x3fe0000000000000	/* 0.5 */
+
+#define LDCONST(fr, name)	\
+	lfd	fr,name@toc(r2)
+#endif
+
+	.text
+/*
+ * Internal routine to enable floating point and set FPSCR to 0.
+ * Don't call it from C; it doesn't use the normal calling convention.
+ */
+fpenable:
+#ifdef CONFIG_PPC32
+	stwu	r1,-64(r1)
+#else
+	stdu	r1,-64(r1)
+#endif
+	mfmsr	r10
+	ori	r11,r10,MSR_FP
+	mtmsr	r11
+	isync
+	stfd	fr0,24(r1)
+	stfd	fr1,16(r1)
+	stfd	fr31,8(r1)
+	LDCONST(fr1, fpzero)
+	mffs	fr31
+	mtfsf	0xff,fr1
+	blr
+
+fpdisable:
+	mtlr	r12
+	mtfsf	0xff,fr31
+	lfd	fr31,8(r1)
+	lfd	fr1,16(r1)
+	lfd	fr0,24(r1)
+	mtmsr	r10
+	isync
+	addi	r1,r1,64
+	blr
+
+/*
+ * Vector add, floating point.
+ */
+_GLOBAL(vaddfp)
+	mflr	r12
+	bl	fpenable
+	li	r0,4
+	mtctr	r0
+	li	r6,0
+1:	lfsx	fr0,r4,r6
+	lfsx	fr1,r5,r6
+	fadds	fr0,fr0,fr1
+	stfsx	fr0,r3,r6
+	addi	r6,r6,4
+	bdnz	1b
+	b	fpdisable
+
+/*
+ * Vector subtract, floating point.
+ */
+_GLOBAL(vsubfp)
+	mflr	r12
+	bl	fpenable
+	li	r0,4
+	mtctr	r0
+	li	r6,0
+1:	lfsx	fr0,r4,r6
+	lfsx	fr1,r5,r6
+	fsubs	fr0,fr0,fr1
+	stfsx	fr0,r3,r6
+	addi	r6,r6,4
+	bdnz	1b
+	b	fpdisable
+
+/*
+ * Vector multiply and add, floating point.
+ */
+_GLOBAL(vmaddfp)
+	mflr	r12
+	bl	fpenable
+	stfd	fr2,32(r1)
+	li	r0,4
+	mtctr	r0
+	li	r7,0
+1:	lfsx	fr0,r4,r7
+	lfsx	fr1,r5,r7
+	lfsx	fr2,r6,r7
+	fmadds	fr0,fr0,fr2,fr1
+	stfsx	fr0,r3,r7
+	addi	r7,r7,4
+	bdnz	1b
+	lfd	fr2,32(r1)
+	b	fpdisable
+
+/*
+ * Vector negative multiply and subtract, floating point.
+ */
+_GLOBAL(vnmsubfp)
+	mflr	r12
+	bl	fpenable
+	stfd	fr2,32(r1)
+	li	r0,4
+	mtctr	r0
+	li	r7,0
+1:	lfsx	fr0,r4,r7
+	lfsx	fr1,r5,r7
+	lfsx	fr2,r6,r7
+	fnmsubs	fr0,fr0,fr2,fr1
+	stfsx	fr0,r3,r7
+	addi	r7,r7,4
+	bdnz	1b
+	lfd	fr2,32(r1)
+	b	fpdisable
+
+/*
+ * Vector reciprocal estimate.  We just compute 1.0/x.
+ * r3 -> destination, r4 -> source.
+ */
+_GLOBAL(vrefp)
+	mflr	r12
+	bl	fpenable
+	li	r0,4
+	LDCONST(fr1, fpone)
+	mtctr	r0
+	li	r6,0
+1:	lfsx	fr0,r4,r6
+	fdivs	fr0,fr1,fr0
+	stfsx	fr0,r3,r6
+	addi	r6,r6,4
+	bdnz	1b
+	b	fpdisable
+
+/*
+ * Vector reciprocal square-root estimate, floating point.
+ * We use the frsqrte instruction for the initial estimate followed
+ * by 2 iterations of Newton-Raphson to get sufficient accuracy.
+ * r3 -> destination, r4 -> source.
+ */
+_GLOBAL(vrsqrtefp)
+	mflr	r12
+	bl	fpenable
+	stfd	fr2,32(r1)
+	stfd	fr3,40(r1)
+	stfd	fr4,48(r1)
+	stfd	fr5,56(r1)
+	li	r0,4
+	LDCONST(fr4, fpone)
+	LDCONST(fr5, fphalf)
+	mtctr	r0
+	li	r6,0
+1:	lfsx	fr0,r4,r6
+	frsqrte	fr1,fr0		/* r = frsqrte(s) */
+	fmuls	fr3,fr1,fr0	/* r * s */
+	fmuls	fr2,fr1,fr5	/* r * 0.5 */
+	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
+	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
+	fmuls	fr3,fr1,fr0	/* r * s */
+	fmuls	fr2,fr1,fr5	/* r * 0.5 */
+	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
+	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
+	stfsx	fr1,r3,r6
+	addi	r6,r6,4
+	bdnz	1b
+	lfd	fr5,56(r1)
+	lfd	fr4,48(r1)
+	lfd	fr3,40(r1)
+	lfd	fr2,32(r1)
+	b	fpdisable
diff --git a/arch/powerpc/kernel/vmlinux.lds b/arch/powerpc/kernel/vmlinux.lds
new file mode 100644
index 0000000..d62c288
--- /dev/null
+++ b/arch/powerpc/kernel/vmlinux.lds
@@ -0,0 +1,174 @@
+/* Align . to a 8 byte boundary equals to maximum function alignment. */
+/* sched.text is aling to function alignment to secure we have same
+ * address even at second ld pass when generating System.map */
+/* spinlock.text is aling to function alignment to secure we have same
+ * address even at second ld pass when generating System.map */
+  /* DWARF debug sections.
+		Symbols in the DWARF debugging sections are relative to
+		the beginning of the section so we begin them at 0.  */
+  /* Stabs debugging sections.  */
+OUTPUT_ARCH(powerpc:common)
+jiffies = jiffies_64 + 4;
+SECTIONS
+{
+  /* Read-only sections, merged into text segment: */
+  . = + SIZEOF_HEADERS;
+  .interp : { *(.interp) }
+  .hash : { *(.hash) }
+  .dynsym : { *(.dynsym) }
+  .dynstr : { *(.dynstr) }
+  .rel.text : { *(.rel.text) }
+  .rela.text : { *(.rela.text) }
+  .rel.data : { *(.rel.data) }
+  .rela.data : { *(.rela.data) }
+  .rel.rodata : { *(.rel.rodata) }
+  .rela.rodata : { *(.rela.rodata) }
+  .rel.got : { *(.rel.got) }
+  .rela.got : { *(.rela.got) }
+  .rel.ctors : { *(.rel.ctors) }
+  .rela.ctors : { *(.rela.ctors) }
+  .rel.dtors : { *(.rel.dtors) }
+  .rela.dtors : { *(.rela.dtors) }
+  .rel.bss : { *(.rel.bss) }
+  .rela.bss : { *(.rela.bss) }
+  .rel.plt : { *(.rel.plt) }
+  .rela.plt : { *(.rela.plt) }
+/*  .init          : { *(.init)	} =0*/
+  .plt : { *(.plt) }
+  .text :
+  {
+    *(.text)
+    . = ALIGN(8); __sched_text_start = .; *(.sched.text) __sched_text_end = .;
+    . = ALIGN(8); __lock_text_start = .; *(.spinlock.text) __lock_text_end = .;
+    *(.fixup)
+    *(.got1)
+    __got2_start = .;
+    *(.got2)
+    __got2_end = .;
+  }
+  _etext = .;
+  PROVIDE (etext = .);
+  .rodata : AT(ADDR(.rodata) - 0) { *(.rodata) *(.rodata.*) *(__vermagic) } .rodata1 : AT(ADDR(.rodata1) - 0) { *(.rodata1) } .pci_fixup : AT(ADDR(.pci_fixup) - 0) { __start_pci_fixups_early = .; *(.pci_fixup_early) __end_pci_fixups_early = .; __start_pci_fixups_header = .; *(.pci_fixup_header) __end_pci_fixups_header = .; __start_pci_fixups_final = .; *(.pci_fixup_final) __end_pci_fixups_final = .; __start_pci_fixups_enable = .; *(.pci_fixup_enable) __end_pci_fixups_enable = .; } __ksymtab : AT(ADDR(__ksymtab) - 0) { __start___ksymtab = .; *(__ksymtab) __stop___ksymtab = .; } __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - 0) { __start___ksymtab_gpl = .; *(__ksymtab_gpl) __stop___ksymtab_gpl = .; } __kcrctab : AT(ADDR(__kcrctab) - 0) { __start___kcrctab = .; *(__kcrctab) __stop___kcrctab = .; } __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - 0) { __start___kcrctab_gpl = .; *(__kcrctab_gpl) __stop___kcrctab_gpl = .; } __ksymtab_strings : AT(ADDR(__ksymtab_strings) - 0) { *(__ksymtab_strings) } __param : AT(ADDR(__param) - 0) { __start___param = .; *(__param) __stop___param = .; }
+  .fini : { *(.fini) } =0
+  .ctors : { *(.ctors) }
+  .dtors : { *(.dtors) }
+  .fixup : { *(.fixup) }
+ __ex_table : {
+  __start___ex_table = .;
+  *(__ex_table)
+  __stop___ex_table = .;
+ }
+ __bug_table : {
+  __start___bug_table = .;
+  *(__bug_table)
+  __stop___bug_table = .;
+ }
+  /* Read-write section, merged into data segment: */
+  . = ALIGN(4096);
+  .data :
+  {
+    *(.data)
+    *(.data1)
+    *(.sdata)
+    *(.sdata2)
+    *(.got.plt) *(.got)
+    *(.dynamic)
+    CONSTRUCTORS
+  }
+
+  . = ALIGN(4096);
+  __nosave_begin = .;
+  .data_nosave : { *(.data.nosave) }
+  . = ALIGN(4096);
+  __nosave_end = .;
+
+  . = ALIGN(32);
+  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+  _edata = .;
+  PROVIDE (edata = .);
+
+  . = ALIGN(8192);
+  .data.init_task : { *(.data.init_task) }
+
+  . = ALIGN(4096);
+  __init_begin = .;
+  .init.text : {
+ _sinittext = .;
+ *(.init.text)
+ _einittext = .;
+  }
+  /* .exit.text is discarded at runtime, not link time,
+     to deal with references from __bug_table */
+  .exit.text : { *(.exit.text) }
+  .init.data : {
+    *(.init.data);
+    __vtop_table_begin = .;
+    *(.vtop_fixup);
+    __vtop_table_end = .;
+    __ptov_table_begin = .;
+    *(.ptov_fixup);
+    __ptov_table_end = .;
+  }
+  . = ALIGN(16);
+  __setup_start = .;
+  .init.setup : { *(.init.setup) }
+  __setup_end = .;
+  __initcall_start = .;
+  .initcall.init : {
+ *(.initcall1.init)
+ *(.initcall2.init)
+ *(.initcall3.init)
+ *(.initcall4.init)
+ *(.initcall5.init)
+ *(.initcall6.init)
+ *(.initcall7.init)
+  }
+  __initcall_end = .;
+
+  __con_initcall_start = .;
+  .con_initcall.init : { *(.con_initcall.init) }
+  __con_initcall_end = .;
+
+  .security_initcall.init : AT(ADDR(.security_initcall.init) - 0) { __security_initcall_start = .; *(.security_initcall.init) __security_initcall_end = .; }
+
+  __start___ftr_fixup = .;
+  __ftr_fixup : { *(__ftr_fixup) }
+  __stop___ftr_fixup = .;
+
+  . = ALIGN(32);
+  __per_cpu_start = .;
+  .data.percpu : { *(.data.percpu) }
+  __per_cpu_end = .;
+
+  . = ALIGN(4096);
+  __initramfs_start = .;
+  .init.ramfs : { *(.init.ramfs) }
+  __initramfs_end = .;
+
+  . = ALIGN(4096);
+  __init_end = .;
+
+  . = ALIGN(4096);
+  _sextratext = .;
+  _eextratext = .;
+
+  __bss_start = .;
+  .bss :
+  {
+   *(.sbss) *(.scommon)
+   *(.dynbss)
+   *(.bss)
+   *(COMMON)
+  }
+  __bss_stop = .;
+
+  _end = . ;
+  PROVIDE (end = .);
+
+  /* Sections to be discarded. */
+  /DISCARD/ : {
+    *(.exitcall.exit)
+    *(.exit.data)
+  }
+}
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
new file mode 100644
index 0000000..09c6525
--- /dev/null
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -0,0 +1,172 @@
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_ARCH(powerpc:common)
+jiffies = jiffies_64 + 4;
+SECTIONS
+{
+  /* Read-only sections, merged into text segment: */
+  . = + SIZEOF_HEADERS;
+  .interp : { *(.interp) }
+  .hash          : { *(.hash)		}
+  .dynsym        : { *(.dynsym)		}
+  .dynstr        : { *(.dynstr)		}
+  .rel.text      : { *(.rel.text)		}
+  .rela.text     : { *(.rela.text) 	}
+  .rel.data      : { *(.rel.data)		}
+  .rela.data     : { *(.rela.data) 	}
+  .rel.rodata    : { *(.rel.rodata) 	}
+  .rela.rodata   : { *(.rela.rodata) 	}
+  .rel.got       : { *(.rel.got)		}
+  .rela.got      : { *(.rela.got)		}
+  .rel.ctors     : { *(.rel.ctors)	}
+  .rela.ctors    : { *(.rela.ctors)	}
+  .rel.dtors     : { *(.rel.dtors)	}
+  .rela.dtors    : { *(.rela.dtors)	}
+  .rel.bss       : { *(.rel.bss)		}
+  .rela.bss      : { *(.rela.bss)		}
+  .rel.plt       : { *(.rel.plt)		}
+  .rela.plt      : { *(.rela.plt)		}
+/*  .init          : { *(.init)	} =0*/
+  .plt : { *(.plt) }
+  .text      :
+  {
+    *(.text)
+    SCHED_TEXT
+    LOCK_TEXT
+    *(.fixup)
+    *(.got1)
+    __got2_start = .;
+    *(.got2)
+    __got2_end = .;
+  }
+  _etext = .;
+  PROVIDE (etext = .);
+
+  RODATA
+  .fini      : { *(.fini)    } =0
+  .ctors     : { *(.ctors)   }
+  .dtors     : { *(.dtors)   }
+
+  .fixup   : { *(.fixup) }
+
+	__ex_table : {
+		__start___ex_table = .;
+		*(__ex_table)
+		__stop___ex_table = .;
+	}
+
+	__bug_table : {
+		__start___bug_table = .;
+		*(__bug_table)
+		__stop___bug_table = .;
+	}
+
+  /* Read-write section, merged into data segment: */
+  . = ALIGN(4096);
+  .data    :
+  {
+    *(.data)
+    *(.data1)
+    *(.sdata)
+    *(.sdata2)
+    *(.got.plt) *(.got)
+    *(.dynamic)
+    CONSTRUCTORS
+  }
+
+  . = ALIGN(4096);
+  __nosave_begin = .;
+  .data_nosave : { *(.data.nosave) }
+  . = ALIGN(4096);
+  __nosave_end = .;
+
+  . = ALIGN(32);
+  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+  _edata  =  .;
+  PROVIDE (edata = .);
+
+  . = ALIGN(8192);
+  .data.init_task : { *(.data.init_task) }
+
+  . = ALIGN(4096);
+  __init_begin = .;
+  .init.text : {
+	_sinittext = .;
+	*(.init.text)
+	_einittext = .;
+  }
+  /* .exit.text is discarded at runtime, not link time,
+     to deal with references from __bug_table */
+  .exit.text : { *(.exit.text) }
+  .init.data : {
+    *(.init.data);
+    __vtop_table_begin = .;
+    *(.vtop_fixup);
+    __vtop_table_end = .;
+    __ptov_table_begin = .;
+    *(.ptov_fixup);
+    __ptov_table_end = .;
+  }
+  . = ALIGN(16);
+  __setup_start = .;
+  .init.setup : { *(.init.setup) }
+  __setup_end = .;
+  __initcall_start = .;
+  .initcall.init : {
+	*(.initcall1.init)
+	*(.initcall2.init)
+	*(.initcall3.init)
+	*(.initcall4.init)
+	*(.initcall5.init)
+	*(.initcall6.init)
+	*(.initcall7.init)
+  }
+  __initcall_end = .;
+
+  __con_initcall_start = .;
+  .con_initcall.init : { *(.con_initcall.init) }
+  __con_initcall_end = .;
+
+  SECURITY_INIT
+
+  __start___ftr_fixup = .;
+  __ftr_fixup : { *(__ftr_fixup) }
+  __stop___ftr_fixup = .;
+
+  . = ALIGN(32);
+  __per_cpu_start = .;
+  .data.percpu  : { *(.data.percpu) }
+  __per_cpu_end = .;
+
+  . = ALIGN(4096);
+  __initramfs_start = .;
+  .init.ramfs : { *(.init.ramfs) }
+  __initramfs_end = .;
+
+  . = ALIGN(4096);
+  __init_end = .;
+
+  . = ALIGN(4096);
+  _sextratext = .;
+  _eextratext = .;
+
+  __bss_start = .;
+  .bss       :
+  {
+   *(.sbss) *(.scommon)
+   *(.dynbss)
+   *(.bss)
+   *(COMMON)
+  }
+  __bss_stop = .;
+
+  _end = . ;
+  PROVIDE (end = .);
+
+  /* Sections to be discarded. */
+  /DISCARD/ : {
+    *(.exitcall.exit)
+    *(.exit.data)
+  }
+}
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
new file mode 100644
index 0000000..347f979
--- /dev/null
+++ b/arch/powerpc/lib/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for ppc-specific library files..
+#
+
+obj-y			:= strcase.o string.o
+obj-$(CONFIG_PPC32)	+= div64.o copy32.o checksum.o
+obj-$(CONFIG_PPC64)	+= copypage.o copyuser.o memcpy.o usercopy.o \
+			   sstep.o checksum64.o
+obj-$(CONFIG_PPC_ISERIES) += e2a.o
diff --git a/arch/powerpc/lib/checksum.S b/arch/powerpc/lib/checksum.S
new file mode 100644
index 0000000..7874e8a
--- /dev/null
+++ b/arch/powerpc/lib/checksum.S
@@ -0,0 +1,225 @@
+/*
+ * This file contains assembly-language implementations
+ * of IP-style 1's complement checksum routines.
+ *	
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ * Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au).
+ */
+
+#include <linux/sys.h>
+#include <asm/processor.h>
+#include <asm/errno.h>
+#include <asm/ppc_asm.h>
+
+	.text
+
+/*
+ * ip_fast_csum(buf, len) -- Optimized for IP header
+ * len is in words and is always >= 5.
+ */
+_GLOBAL(ip_fast_csum)
+	lwz	r0,0(r3)
+	lwzu	r5,4(r3)
+	addic.	r4,r4,-2
+	addc	r0,r0,r5
+	mtctr	r4
+	blelr-
+1:	lwzu	r4,4(r3)
+	adde	r0,r0,r4
+	bdnz	1b
+	addze	r0,r0		/* add in final carry */
+	rlwinm	r3,r0,16,0,31	/* fold two halves together */
+	add	r3,r0,r3
+	not	r3,r3
+	srwi	r3,r3,16
+	blr
+
+/*
+ * Compute checksum of TCP or UDP pseudo-header:
+ *   csum_tcpudp_magic(saddr, daddr, len, proto, sum)
+ */	
+_GLOBAL(csum_tcpudp_magic)
+	rlwimi	r5,r6,16,0,15	/* put proto in upper half of len */
+	addc	r0,r3,r4	/* add 4 32-bit words together */
+	adde	r0,r0,r5
+	adde	r0,r0,r7
+	addze	r0,r0		/* add in final carry */
+	rlwinm	r3,r0,16,0,31	/* fold two halves together */
+	add	r3,r0,r3
+	not	r3,r3
+	srwi	r3,r3,16
+	blr
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * csum_partial(buff, len, sum)
+ */
+_GLOBAL(csum_partial)
+	addic	r0,r5,0
+	subi	r3,r3,4
+	srwi.	r6,r4,2
+	beq	3f		/* if we're doing < 4 bytes */
+	andi.	r5,r3,2		/* Align buffer to longword boundary */
+	beq+	1f
+	lhz	r5,4(r3)	/* do 2 bytes to get aligned */
+	addi	r3,r3,2
+	subi	r4,r4,2
+	addc	r0,r0,r5
+	srwi.	r6,r4,2		/* # words to do */
+	beq	3f
+1:	mtctr	r6
+2:	lwzu	r5,4(r3)	/* the bdnz has zero overhead, so it should */
+	adde	r0,r0,r5	/* be unnecessary to unroll this loop */
+	bdnz	2b
+	andi.	r4,r4,3
+3:	cmpwi	0,r4,2
+	blt+	4f
+	lhz	r5,4(r3)
+	addi	r3,r3,2
+	subi	r4,r4,2
+	adde	r0,r0,r5
+4:	cmpwi	0,r4,1
+	bne+	5f
+	lbz	r5,4(r3)
+	slwi	r5,r5,8		/* Upper byte of word */
+	adde	r0,r0,r5
+5:	addze	r3,r0		/* add in final carry */
+	blr
+
+/*
+ * Computes the checksum of a memory block at src, length len,
+ * and adds in "sum" (32-bit), while copying the block to dst.
+ * If an access exception occurs on src or dst, it stores -EFAULT
+ * to *src_err or *dst_err respectively, and (for an error on
+ * src) zeroes the rest of dst.
+ *
+ * csum_partial_copy_generic(src, dst, len, sum, src_err, dst_err)
+ */
+_GLOBAL(csum_partial_copy_generic)
+	addic	r0,r6,0
+	subi	r3,r3,4
+	subi	r4,r4,4
+	srwi.	r6,r5,2
+	beq	3f		/* if we're doing < 4 bytes */
+	andi.	r9,r4,2		/* Align dst to longword boundary */
+	beq+	1f
+81:	lhz	r6,4(r3)	/* do 2 bytes to get aligned */
+	addi	r3,r3,2
+	subi	r5,r5,2
+91:	sth	r6,4(r4)
+	addi	r4,r4,2
+	addc	r0,r0,r6
+	srwi.	r6,r5,2		/* # words to do */
+	beq	3f
+1:	srwi.	r6,r5,4		/* # groups of 4 words to do */
+	beq	10f
+	mtctr	r6
+71:	lwz	r6,4(r3)
+72:	lwz	r9,8(r3)
+73:	lwz	r10,12(r3)
+74:	lwzu	r11,16(r3)
+	adde	r0,r0,r6
+75:	stw	r6,4(r4)
+	adde	r0,r0,r9
+76:	stw	r9,8(r4)
+	adde	r0,r0,r10
+77:	stw	r10,12(r4)
+	adde	r0,r0,r11
+78:	stwu	r11,16(r4)
+	bdnz	71b
+10:	rlwinm.	r6,r5,30,30,31	/* # words left to do */
+	beq	13f
+	mtctr	r6
+82:	lwzu	r9,4(r3)
+92:	stwu	r9,4(r4)
+	adde	r0,r0,r9
+	bdnz	82b
+13:	andi.	r5,r5,3
+3:	cmpwi	0,r5,2
+	blt+	4f
+83:	lhz	r6,4(r3)
+	addi	r3,r3,2
+	subi	r5,r5,2
+93:	sth	r6,4(r4)
+	addi	r4,r4,2
+	adde	r0,r0,r6
+4:	cmpwi	0,r5,1
+	bne+	5f
+84:	lbz	r6,4(r3)
+94:	stb	r6,4(r4)
+	slwi	r6,r6,8		/* Upper byte of word */
+	adde	r0,r0,r6
+5:	addze	r3,r0		/* add in final carry */
+	blr
+
+/* These shouldn't go in the fixup section, since that would
+   cause the ex_table addresses to get out of order. */
+
+src_error_4:
+	mfctr	r6		/* update # bytes remaining from ctr */
+	rlwimi	r5,r6,4,0,27
+	b	79f
+src_error_1:
+	li	r6,0
+	subi	r5,r5,2
+95:	sth	r6,4(r4)
+	addi	r4,r4,2
+79:	srwi.	r6,r5,2
+	beq	3f
+	mtctr	r6
+src_error_2:
+	li	r6,0
+96:	stwu	r6,4(r4)
+	bdnz	96b
+3:	andi.	r5,r5,3
+	beq	src_error
+src_error_3:
+	li	r6,0
+	mtctr	r5
+	addi	r4,r4,3
+97:	stbu	r6,1(r4)
+	bdnz	97b
+src_error:
+	cmpwi	0,r7,0
+	beq	1f
+	li	r6,-EFAULT
+	stw	r6,0(r7)
+1:	addze	r3,r0
+	blr
+
+dst_error:
+	cmpwi	0,r8,0
+	beq	1f
+	li	r6,-EFAULT
+	stw	r6,0(r8)
+1:	addze	r3,r0
+	blr
+
+.section __ex_table,"a"
+	.long	81b,src_error_1
+	.long	91b,dst_error
+	.long	71b,src_error_4
+	.long	72b,src_error_4
+	.long	73b,src_error_4
+	.long	74b,src_error_4
+	.long	75b,dst_error
+	.long	76b,dst_error
+	.long	77b,dst_error
+	.long	78b,dst_error
+	.long	82b,src_error_2
+	.long	92b,dst_error
+	.long	83b,src_error_3
+	.long	93b,dst_error
+	.long	84b,src_error_3
+	.long	94b,dst_error
+	.long	95b,dst_error
+	.long	96b,dst_error
+	.long	97b,dst_error
diff --git a/arch/powerpc/lib/checksum64.S b/arch/powerpc/lib/checksum64.S
new file mode 100644
index 0000000..ef96c6c
--- /dev/null
+++ b/arch/powerpc/lib/checksum64.S
@@ -0,0 +1,229 @@
+/*
+ * This file contains assembly-language implementations
+ * of IP-style 1's complement checksum routines.
+ *	
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ * Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au).
+ */
+
+#include <linux/sys.h>
+#include <asm/processor.h>
+#include <asm/errno.h>
+#include <asm/ppc_asm.h>
+
+/*
+ * ip_fast_csum(r3=buf, r4=len) -- Optimized for IP header
+ * len is in words and is always >= 5.
+ *
+ * In practice len == 5, but this is not guaranteed.  So this code does not
+ * attempt to use doubleword instructions.
+ */
+_GLOBAL(ip_fast_csum)
+	lwz	r0,0(r3)
+	lwzu	r5,4(r3)
+	addic.	r4,r4,-2
+	addc	r0,r0,r5
+	mtctr	r4
+	blelr-
+1:	lwzu	r4,4(r3)
+	adde	r0,r0,r4
+	bdnz	1b
+	addze	r0,r0		/* add in final carry */
+        rldicl  r4,r0,32,0      /* fold two 32-bit halves together */
+        add     r0,r0,r4
+        srdi    r0,r0,32
+	rlwinm	r3,r0,16,0,31	/* fold two halves together */
+	add	r3,r0,r3
+	not	r3,r3
+	srwi	r3,r3,16
+	blr
+
+/*
+ * Compute checksum of TCP or UDP pseudo-header:
+ *   csum_tcpudp_magic(r3=saddr, r4=daddr, r5=len, r6=proto, r7=sum)
+ * No real gain trying to do this specially for 64 bit, but
+ * the 32 bit addition may spill into the upper bits of
+ * the doubleword so we still must fold it down from 64.
+ */	
+_GLOBAL(csum_tcpudp_magic)
+	rlwimi	r5,r6,16,0,15	/* put proto in upper half of len */
+	addc	r0,r3,r4	/* add 4 32-bit words together */
+	adde	r0,r0,r5
+	adde	r0,r0,r7
+        rldicl  r4,r0,32,0      /* fold 64 bit value */
+        add     r0,r4,r0
+        srdi    r0,r0,32
+	rlwinm	r3,r0,16,0,31	/* fold two halves together */
+	add	r3,r0,r3
+	not	r3,r3
+	srwi	r3,r3,16
+	blr
+
+/*
+ * Computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit).
+ *
+ * This code assumes at least halfword alignment, though the length
+ * can be any number of bytes.  The sum is accumulated in r5.
+ *
+ * csum_partial(r3=buff, r4=len, r5=sum)
+ */
+_GLOBAL(csum_partial)
+        subi	r3,r3,8		/* we'll offset by 8 for the loads */
+        srdi.	r6,r4,3         /* divide by 8 for doubleword count */
+        addic   r5,r5,0         /* clear carry */
+        beq	3f              /* if we're doing < 8 bytes */
+        andi.	r0,r3,2         /* aligned on a word boundary already? */
+        beq+	1f
+        lhz     r6,8(r3)        /* do 2 bytes to get aligned */
+        addi    r3,r3,2
+        subi    r4,r4,2
+        addc    r5,r5,r6
+        srdi.   r6,r4,3         /* recompute number of doublewords */
+        beq     3f              /* any left? */
+1:      mtctr   r6
+2:      ldu     r6,8(r3)        /* main sum loop */
+        adde    r5,r5,r6
+        bdnz    2b
+        andi.	r4,r4,7         /* compute bytes left to sum after doublewords */
+3:	cmpwi	0,r4,4		/* is at least a full word left? */
+	blt	4f
+	lwz	r6,8(r3)	/* sum this word */
+	addi	r3,r3,4
+	subi	r4,r4,4
+	adde	r5,r5,r6
+4:	cmpwi	0,r4,2		/* is at least a halfword left? */
+        blt+	5f
+        lhz     r6,8(r3)        /* sum this halfword */
+        addi    r3,r3,2
+        subi    r4,r4,2
+        adde    r5,r5,r6
+5:	cmpwi	0,r4,1		/* is at least a byte left? */
+        bne+    6f
+        lbz     r6,8(r3)        /* sum this byte */
+        slwi    r6,r6,8         /* this byte is assumed to be the upper byte of a halfword */
+        adde    r5,r5,r6
+6:      addze	r5,r5		/* add in final carry */
+	rldicl  r4,r5,32,0      /* fold two 32-bit halves together */
+        add     r3,r4,r5
+        srdi    r3,r3,32
+        blr
+
+/*
+ * Computes the checksum of a memory block at src, length len,
+ * and adds in "sum" (32-bit), while copying the block to dst.
+ * If an access exception occurs on src or dst, it stores -EFAULT
+ * to *src_err or *dst_err respectively, and (for an error on
+ * src) zeroes the rest of dst.
+ *
+ * This code needs to be reworked to take advantage of 64 bit sum+copy.
+ * However, due to tokenring halfword alignment problems this will be very
+ * tricky.  For now we'll leave it until we instrument it somehow.
+ *
+ * csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err)
+ */
+_GLOBAL(csum_partial_copy_generic)
+	addic	r0,r6,0
+	subi	r3,r3,4
+	subi	r4,r4,4
+	srwi.	r6,r5,2
+	beq	3f		/* if we're doing < 4 bytes */
+	andi.	r9,r4,2		/* Align dst to longword boundary */
+	beq+	1f
+81:	lhz	r6,4(r3)	/* do 2 bytes to get aligned */
+	addi	r3,r3,2
+	subi	r5,r5,2
+91:	sth	r6,4(r4)
+	addi	r4,r4,2
+	addc	r0,r0,r6
+	srwi.	r6,r5,2		/* # words to do */
+	beq	3f
+1:	mtctr	r6
+82:	lwzu	r6,4(r3)	/* the bdnz has zero overhead, so it should */
+92:	stwu	r6,4(r4)	/* be unnecessary to unroll this loop */
+	adde	r0,r0,r6
+	bdnz	82b
+	andi.	r5,r5,3
+3:	cmpwi	0,r5,2
+	blt+	4f
+83:	lhz	r6,4(r3)
+	addi	r3,r3,2
+	subi	r5,r5,2
+93:	sth	r6,4(r4)
+	addi	r4,r4,2
+	adde	r0,r0,r6
+4:	cmpwi	0,r5,1
+	bne+	5f
+84:	lbz	r6,4(r3)
+94:	stb	r6,4(r4)
+	slwi	r6,r6,8		/* Upper byte of word */
+	adde	r0,r0,r6
+5:	addze	r3,r0		/* add in final carry (unlikely with 64-bit regs) */
+        rldicl  r4,r3,32,0      /* fold 64 bit value */
+        add     r3,r4,r3
+        srdi    r3,r3,32
+	blr
+
+/* These shouldn't go in the fixup section, since that would
+   cause the ex_table addresses to get out of order. */
+
+	.globl src_error_1
+src_error_1:
+	li	r6,0
+	subi	r5,r5,2
+95:	sth	r6,4(r4)
+	addi	r4,r4,2
+	srwi.	r6,r5,2
+	beq	3f
+	mtctr	r6
+	.globl src_error_2
+src_error_2:
+	li	r6,0
+96:	stwu	r6,4(r4)
+	bdnz	96b
+3:	andi.	r5,r5,3
+	beq	src_error
+	.globl src_error_3
+src_error_3:
+	li	r6,0
+	mtctr	r5
+	addi	r4,r4,3
+97:	stbu	r6,1(r4)
+	bdnz	97b
+	.globl src_error
+src_error:
+	cmpdi	0,r7,0
+	beq	1f
+	li	r6,-EFAULT
+	stw	r6,0(r7)
+1:	addze	r3,r0
+	blr
+
+	.globl dst_error
+dst_error:
+	cmpdi	0,r8,0
+	beq	1f
+	li	r6,-EFAULT
+	stw	r6,0(r8)
+1:	addze	r3,r0
+	blr
+
+.section __ex_table,"a"
+	.align  3
+	.llong	81b,src_error_1
+	.llong	91b,dst_error
+	.llong	82b,src_error_2
+	.llong	92b,dst_error
+	.llong	83b,src_error_3
+	.llong	93b,dst_error
+	.llong	84b,src_error_3
+	.llong	94b,dst_error
+	.llong	95b,dst_error
+	.llong	96b,dst_error
+	.llong	97b,dst_error
diff --git a/arch/powerpc/lib/copy32.S b/arch/powerpc/lib/copy32.S
new file mode 100644
index 0000000..420a912
--- /dev/null
+++ b/arch/powerpc/lib/copy32.S
@@ -0,0 +1,543 @@
+/*
+ * Memory copy functions for 32-bit PowerPC.
+ *
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+#include <asm/errno.h>
+#include <asm/ppc_asm.h>
+
+#define COPY_16_BYTES		\
+	lwz	r7,4(r4);	\
+	lwz	r8,8(r4);	\
+	lwz	r9,12(r4);	\
+	lwzu	r10,16(r4);	\
+	stw	r7,4(r6);	\
+	stw	r8,8(r6);	\
+	stw	r9,12(r6);	\
+	stwu	r10,16(r6)
+
+#define COPY_16_BYTES_WITHEX(n)	\
+8 ## n ## 0:			\
+	lwz	r7,4(r4);	\
+8 ## n ## 1:			\
+	lwz	r8,8(r4);	\
+8 ## n ## 2:			\
+	lwz	r9,12(r4);	\
+8 ## n ## 3:			\
+	lwzu	r10,16(r4);	\
+8 ## n ## 4:			\
+	stw	r7,4(r6);	\
+8 ## n ## 5:			\
+	stw	r8,8(r6);	\
+8 ## n ## 6:			\
+	stw	r9,12(r6);	\
+8 ## n ## 7:			\
+	stwu	r10,16(r6)
+
+#define COPY_16_BYTES_EXCODE(n)			\
+9 ## n ## 0:					\
+	addi	r5,r5,-(16 * n);		\
+	b	104f;				\
+9 ## n ## 1:					\
+	addi	r5,r5,-(16 * n);		\
+	b	105f;				\
+.section __ex_table,"a";			\
+	.align	2;				\
+	.long	8 ## n ## 0b,9 ## n ## 0b;	\
+	.long	8 ## n ## 1b,9 ## n ## 0b;	\
+	.long	8 ## n ## 2b,9 ## n ## 0b;	\
+	.long	8 ## n ## 3b,9 ## n ## 0b;	\
+	.long	8 ## n ## 4b,9 ## n ## 1b;	\
+	.long	8 ## n ## 5b,9 ## n ## 1b;	\
+	.long	8 ## n ## 6b,9 ## n ## 1b;	\
+	.long	8 ## n ## 7b,9 ## n ## 1b;	\
+	.text
+
+	.text
+	.stabs	"arch/powerpc/lib/",N_SO,0,0,0f
+	.stabs	"copy32.S",N_SO,0,0,0f
+0:
+
+CACHELINE_BYTES = L1_CACHE_LINE_SIZE
+LG_CACHELINE_BYTES = LG_L1_CACHE_LINE_SIZE
+CACHELINE_MASK = (L1_CACHE_LINE_SIZE-1)
+
+/*
+ * Use dcbz on the complete cache lines in the destination
+ * to set them to zero.  This requires that the destination
+ * area is cacheable.  -- paulus
+ */
+_GLOBAL(cacheable_memzero)
+	mr	r5,r4
+	li	r4,0
+	addi	r6,r3,-4
+	cmplwi	0,r5,4
+	blt	7f
+	stwu	r4,4(r6)
+	beqlr
+	andi.	r0,r6,3
+	add	r5,r0,r5
+	subf	r6,r0,r6
+	clrlwi	r7,r6,32-LG_CACHELINE_BYTES
+	add	r8,r7,r5
+	srwi	r9,r8,LG_CACHELINE_BYTES
+	addic.	r9,r9,-1	/* total number of complete cachelines */
+	ble	2f
+	xori	r0,r7,CACHELINE_MASK & ~3
+	srwi.	r0,r0,2
+	beq	3f
+	mtctr	r0
+4:	stwu	r4,4(r6)
+	bdnz	4b
+3:	mtctr	r9
+	li	r7,4
+#if !defined(CONFIG_8xx)
+10:	dcbz	r7,r6
+#else
+10:	stw	r4, 4(r6)
+	stw	r4, 8(r6)
+	stw	r4, 12(r6)
+	stw	r4, 16(r6)
+#if CACHE_LINE_SIZE >= 32
+	stw	r4, 20(r6)
+	stw	r4, 24(r6)
+	stw	r4, 28(r6)
+	stw	r4, 32(r6)
+#endif /* CACHE_LINE_SIZE */
+#endif
+	addi	r6,r6,CACHELINE_BYTES
+	bdnz	10b
+	clrlwi	r5,r8,32-LG_CACHELINE_BYTES
+	addi	r5,r5,4
+2:	srwi	r0,r5,2
+	mtctr	r0
+	bdz	6f
+1:	stwu	r4,4(r6)
+	bdnz	1b
+6:	andi.	r5,r5,3
+7:	cmpwi	0,r5,0
+	beqlr
+	mtctr	r5
+	addi	r6,r6,3
+8:	stbu	r4,1(r6)
+	bdnz	8b
+	blr
+
+_GLOBAL(memset)
+	rlwimi	r4,r4,8,16,23
+	rlwimi	r4,r4,16,0,15
+	addi	r6,r3,-4
+	cmplwi	0,r5,4
+	blt	7f
+	stwu	r4,4(r6)
+	beqlr
+	andi.	r0,r6,3
+	add	r5,r0,r5
+	subf	r6,r0,r6
+	srwi	r0,r5,2
+	mtctr	r0
+	bdz	6f
+1:	stwu	r4,4(r6)
+	bdnz	1b
+6:	andi.	r5,r5,3
+7:	cmpwi	0,r5,0
+	beqlr
+	mtctr	r5
+	addi	r6,r6,3
+8:	stbu	r4,1(r6)
+	bdnz	8b
+	blr
+
+/*
+ * This version uses dcbz on the complete cache lines in the
+ * destination area to reduce memory traffic.  This requires that
+ * the destination area is cacheable.
+ * We only use this version if the source and dest don't overlap.
+ * -- paulus.
+ */
+_GLOBAL(cacheable_memcpy)
+	add	r7,r3,r5		/* test if the src & dst overlap */
+	add	r8,r4,r5
+	cmplw	0,r4,r7
+	cmplw	1,r3,r8
+	crand	0,0,4			/* cr0.lt &= cr1.lt */
+	blt	memcpy			/* if regions overlap */
+
+	addi	r4,r4,-4
+	addi	r6,r3,-4
+	neg	r0,r3
+	andi.	r0,r0,CACHELINE_MASK	/* # bytes to start of cache line */
+	beq	58f
+
+	cmplw	0,r5,r0			/* is this more than total to do? */
+	blt	63f			/* if not much to do */
+	andi.	r8,r0,3			/* get it word-aligned first */
+	subf	r5,r0,r5
+	mtctr	r8
+	beq+	61f
+70:	lbz	r9,4(r4)		/* do some bytes */
+	stb	r9,4(r6)
+	addi	r4,r4,1
+	addi	r6,r6,1
+	bdnz	70b
+61:	srwi.	r0,r0,2
+	mtctr	r0
+	beq	58f
+72:	lwzu	r9,4(r4)		/* do some words */
+	stwu	r9,4(r6)
+	bdnz	72b
+
+58:	srwi.	r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
+	clrlwi	r5,r5,32-LG_CACHELINE_BYTES
+	li	r11,4
+	mtctr	r0
+	beq	63f
+53:
+#if !defined(CONFIG_8xx)
+	dcbz	r11,r6
+#endif
+	COPY_16_BYTES
+#if L1_CACHE_LINE_SIZE >= 32
+	COPY_16_BYTES
+#if L1_CACHE_LINE_SIZE >= 64
+	COPY_16_BYTES
+	COPY_16_BYTES
+#if L1_CACHE_LINE_SIZE >= 128
+	COPY_16_BYTES
+	COPY_16_BYTES
+	COPY_16_BYTES
+	COPY_16_BYTES
+#endif
+#endif
+#endif
+	bdnz	53b
+
+63:	srwi.	r0,r5,2
+	mtctr	r0
+	beq	64f
+30:	lwzu	r0,4(r4)
+	stwu	r0,4(r6)
+	bdnz	30b
+
+64:	andi.	r0,r5,3
+	mtctr	r0
+	beq+	65f
+40:	lbz	r0,4(r4)
+	stb	r0,4(r6)
+	addi	r4,r4,1
+	addi	r6,r6,1
+	bdnz	40b
+65:	blr
+
+_GLOBAL(memmove)
+	cmplw	0,r3,r4
+	bgt	backwards_memcpy
+	/* fall through */
+
+_GLOBAL(memcpy)
+	srwi.	r7,r5,3
+	addi	r6,r3,-4
+	addi	r4,r4,-4
+	beq	2f			/* if less than 8 bytes to do */
+	andi.	r0,r6,3			/* get dest word aligned */
+	mtctr	r7
+	bne	5f
+1:	lwz	r7,4(r4)
+	lwzu	r8,8(r4)
+	stw	r7,4(r6)
+	stwu	r8,8(r6)
+	bdnz	1b
+	andi.	r5,r5,7
+2:	cmplwi	0,r5,4
+	blt	3f
+	lwzu	r0,4(r4)
+	addi	r5,r5,-4
+	stwu	r0,4(r6)
+3:	cmpwi	0,r5,0
+	beqlr
+	mtctr	r5
+	addi	r4,r4,3
+	addi	r6,r6,3
+4:	lbzu	r0,1(r4)
+	stbu	r0,1(r6)
+	bdnz	4b
+	blr
+5:	subfic	r0,r0,4
+	mtctr	r0
+6:	lbz	r7,4(r4)
+	addi	r4,r4,1
+	stb	r7,4(r6)
+	addi	r6,r6,1
+	bdnz	6b
+	subf	r5,r0,r5
+	rlwinm.	r7,r5,32-3,3,31
+	beq	2b
+	mtctr	r7
+	b	1b
+
+_GLOBAL(backwards_memcpy)
+	rlwinm.	r7,r5,32-3,3,31		/* r0 = r5 >> 3 */
+	add	r6,r3,r5
+	add	r4,r4,r5
+	beq	2f
+	andi.	r0,r6,3
+	mtctr	r7
+	bne	5f
+1:	lwz	r7,-4(r4)
+	lwzu	r8,-8(r4)
+	stw	r7,-4(r6)
+	stwu	r8,-8(r6)
+	bdnz	1b
+	andi.	r5,r5,7
+2:	cmplwi	0,r5,4
+	blt	3f
+	lwzu	r0,-4(r4)
+	subi	r5,r5,4
+	stwu	r0,-4(r6)
+3:	cmpwi	0,r5,0
+	beqlr
+	mtctr	r5
+4:	lbzu	r0,-1(r4)
+	stbu	r0,-1(r6)
+	bdnz	4b
+	blr
+5:	mtctr	r0
+6:	lbzu	r7,-1(r4)
+	stbu	r7,-1(r6)
+	bdnz	6b
+	subf	r5,r0,r5
+	rlwinm.	r7,r5,32-3,3,31
+	beq	2b
+	mtctr	r7
+	b	1b
+
+_GLOBAL(__copy_tofrom_user)
+	addi	r4,r4,-4
+	addi	r6,r3,-4
+	neg	r0,r3
+	andi.	r0,r0,CACHELINE_MASK	/* # bytes to start of cache line */
+	beq	58f
+
+	cmplw	0,r5,r0			/* is this more than total to do? */
+	blt	63f			/* if not much to do */
+	andi.	r8,r0,3			/* get it word-aligned first */
+	mtctr	r8
+	beq+	61f
+70:	lbz	r9,4(r4)		/* do some bytes */
+71:	stb	r9,4(r6)
+	addi	r4,r4,1
+	addi	r6,r6,1
+	bdnz	70b
+61:	subf	r5,r0,r5
+	srwi.	r0,r0,2
+	mtctr	r0
+	beq	58f
+72:	lwzu	r9,4(r4)		/* do some words */
+73:	stwu	r9,4(r6)
+	bdnz	72b
+
+	.section __ex_table,"a"
+	.align	2
+	.long	70b,100f
+	.long	71b,101f
+	.long	72b,102f
+	.long	73b,103f
+	.text
+
+58:	srwi.	r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
+	clrlwi	r5,r5,32-LG_CACHELINE_BYTES
+	li	r11,4
+	beq	63f
+
+#ifdef CONFIG_8xx
+	/* Don't use prefetch on 8xx */
+	mtctr	r0
+	li	r0,0
+53:	COPY_16_BYTES_WITHEX(0)
+	bdnz	53b
+
+#else /* not CONFIG_8xx */
+	/* Here we decide how far ahead to prefetch the source */
+	li	r3,4
+	cmpwi	r0,1
+	li	r7,0
+	ble	114f
+	li	r7,1
+#if MAX_COPY_PREFETCH > 1
+	/* Heuristically, for large transfers we prefetch
+	   MAX_COPY_PREFETCH cachelines ahead.  For small transfers
+	   we prefetch 1 cacheline ahead. */
+	cmpwi	r0,MAX_COPY_PREFETCH
+	ble	112f
+	li	r7,MAX_COPY_PREFETCH
+112:	mtctr	r7
+111:	dcbt	r3,r4
+	addi	r3,r3,CACHELINE_BYTES
+	bdnz	111b
+#else
+	dcbt	r3,r4
+	addi	r3,r3,CACHELINE_BYTES
+#endif /* MAX_COPY_PREFETCH > 1 */
+
+114:	subf	r8,r7,r0
+	mr	r0,r7
+	mtctr	r8
+
+53:	dcbt	r3,r4
+54:	dcbz	r11,r6
+	.section __ex_table,"a"
+	.align	2
+	.long	54b,105f
+	.text
+/* the main body of the cacheline loop */
+	COPY_16_BYTES_WITHEX(0)
+#if L1_CACHE_LINE_SIZE >= 32
+	COPY_16_BYTES_WITHEX(1)
+#if L1_CACHE_LINE_SIZE >= 64
+	COPY_16_BYTES_WITHEX(2)
+	COPY_16_BYTES_WITHEX(3)
+#if L1_CACHE_LINE_SIZE >= 128
+	COPY_16_BYTES_WITHEX(4)
+	COPY_16_BYTES_WITHEX(5)
+	COPY_16_BYTES_WITHEX(6)
+	COPY_16_BYTES_WITHEX(7)
+#endif
+#endif
+#endif
+	bdnz	53b
+	cmpwi	r0,0
+	li	r3,4
+	li	r7,0
+	bne	114b
+#endif /* CONFIG_8xx */
+
+63:	srwi.	r0,r5,2
+	mtctr	r0
+	beq	64f
+30:	lwzu	r0,4(r4)
+31:	stwu	r0,4(r6)
+	bdnz	30b
+
+64:	andi.	r0,r5,3
+	mtctr	r0
+	beq+	65f
+40:	lbz	r0,4(r4)
+41:	stb	r0,4(r6)
+	addi	r4,r4,1
+	addi	r6,r6,1
+	bdnz	40b
+65:	li	r3,0
+	blr
+
+/* read fault, initial single-byte copy */
+100:	li	r9,0
+	b	90f
+/* write fault, initial single-byte copy */
+101:	li	r9,1
+90:	subf	r5,r8,r5
+	li	r3,0
+	b	99f
+/* read fault, initial word copy */
+102:	li	r9,0
+	b	91f
+/* write fault, initial word copy */
+103:	li	r9,1
+91:	li	r3,2
+	b	99f
+
+/*
+ * this stuff handles faults in the cacheline loop and branches to either
+ * 104f (if in read part) or 105f (if in write part), after updating r5
+ */
+	COPY_16_BYTES_EXCODE(0)
+#if L1_CACHE_LINE_SIZE >= 32
+	COPY_16_BYTES_EXCODE(1)
+#if L1_CACHE_LINE_SIZE >= 64
+	COPY_16_BYTES_EXCODE(2)
+	COPY_16_BYTES_EXCODE(3)
+#if L1_CACHE_LINE_SIZE >= 128
+	COPY_16_BYTES_EXCODE(4)
+	COPY_16_BYTES_EXCODE(5)
+	COPY_16_BYTES_EXCODE(6)
+	COPY_16_BYTES_EXCODE(7)
+#endif
+#endif
+#endif
+
+/* read fault in cacheline loop */
+104:	li	r9,0
+	b	92f
+/* fault on dcbz (effectively a write fault) */
+/* or write fault in cacheline loop */
+105:	li	r9,1
+92:	li	r3,LG_CACHELINE_BYTES
+	mfctr	r8
+	add	r0,r0,r8
+	b	106f
+/* read fault in final word loop */
+108:	li	r9,0
+	b	93f
+/* write fault in final word loop */
+109:	li	r9,1
+93:	andi.	r5,r5,3
+	li	r3,2
+	b	99f
+/* read fault in final byte loop */
+110:	li	r9,0
+	b	94f
+/* write fault in final byte loop */
+111:	li	r9,1
+94:	li	r5,0
+	li	r3,0
+/*
+ * At this stage the number of bytes not copied is
+ * r5 + (ctr << r3), and r9 is 0 for read or 1 for write.
+ */
+99:	mfctr	r0
+106:	slw	r3,r0,r3
+	add.	r3,r3,r5
+	beq	120f			/* shouldn't happen */
+	cmpwi	0,r9,0
+	bne	120f
+/* for a read fault, first try to continue the copy one byte at a time */
+	mtctr	r3
+130:	lbz	r0,4(r4)
+131:	stb	r0,4(r6)
+	addi	r4,r4,1
+	addi	r6,r6,1
+	bdnz	130b
+/* then clear out the destination: r3 bytes starting at 4(r6) */
+132:	mfctr	r3
+	srwi.	r0,r3,2
+	li	r9,0
+	mtctr	r0
+	beq	113f
+112:	stwu	r9,4(r6)
+	bdnz	112b
+113:	andi.	r0,r3,3
+	mtctr	r0
+	beq	120f
+114:	stb	r9,4(r6)
+	addi	r6,r6,1
+	bdnz	114b
+120:	blr
+
+	.section __ex_table,"a"
+	.align	2
+	.long	30b,108b
+	.long	31b,109b
+	.long	40b,110b
+	.long	41b,111b
+	.long	130b,132b
+	.long	131b,120b
+	.long	112b,120b
+	.long	114b,120b
+	.text
diff --git a/arch/powerpc/lib/copypage.S b/arch/powerpc/lib/copypage.S
new file mode 100644
index 0000000..733d616
--- /dev/null
+++ b/arch/powerpc/lib/copypage.S
@@ -0,0 +1,121 @@
+/*
+ * arch/ppc64/lib/copypage.S
+ *
+ * Copyright (C) 2002 Paul Mackerras, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+
+_GLOBAL(copy_page)
+	std	r31,-8(1)
+	std	r30,-16(1)
+	std	r29,-24(1)
+	std	r28,-32(1)
+	std	r27,-40(1)
+	std	r26,-48(1)
+	std	r25,-56(1)
+	std	r24,-64(1)
+	std	r23,-72(1)
+	std	r22,-80(1)
+	std	r21,-88(1)
+	std	r20,-96(1)
+	li	r5,4096/32 - 1
+	addi	r3,r3,-8
+	li	r12,5
+0:	addi	r5,r5,-24
+	mtctr	r12
+	ld	r22,640(4)
+	ld	r21,512(4)
+	ld	r20,384(4)
+	ld	r11,256(4)
+	ld	r9,128(4)
+	ld	r7,0(4)
+	ld	r25,648(4)
+	ld	r24,520(4)
+	ld	r23,392(4)
+	ld	r10,264(4)
+	ld	r8,136(4)
+	ldu	r6,8(4)
+	cmpwi	r5,24
+1:	std	r22,648(3)
+	std	r21,520(3)
+	std	r20,392(3)
+	std	r11,264(3)
+	std	r9,136(3)
+	std	r7,8(3)
+	ld	r28,648(4)
+	ld	r27,520(4)
+	ld	r26,392(4)
+	ld	r31,264(4)
+	ld	r30,136(4)
+	ld	r29,8(4)
+	std	r25,656(3)
+	std	r24,528(3)
+	std	r23,400(3)
+	std	r10,272(3)
+	std	r8,144(3)
+	std	r6,16(3)
+	ld	r22,656(4)
+	ld	r21,528(4)
+	ld	r20,400(4)
+	ld	r11,272(4)
+	ld	r9,144(4)
+	ld	r7,16(4)
+	std	r28,664(3)
+	std	r27,536(3)
+	std	r26,408(3)
+	std	r31,280(3)
+	std	r30,152(3)
+	stdu	r29,24(3)
+	ld	r25,664(4)
+	ld	r24,536(4)
+	ld	r23,408(4)
+	ld	r10,280(4)
+	ld	r8,152(4)
+	ldu	r6,24(4)
+	bdnz	1b
+	std	r22,648(3)
+	std	r21,520(3)
+	std	r20,392(3)
+	std	r11,264(3)
+	std	r9,136(3)
+	std	r7,8(3)
+	addi	r4,r4,640
+	addi	r3,r3,648
+	bge	0b
+	mtctr	r5
+	ld	r7,0(4)
+	ld	r8,8(4)
+	ldu	r9,16(4)
+3:	ld	r10,8(4)
+	std	r7,8(3)
+	ld	r7,16(4)
+	std	r8,16(3)
+	ld	r8,24(4)
+	std	r9,24(3)
+	ldu	r9,32(4)
+	stdu	r10,32(3)
+	bdnz	3b
+4:	ld	r10,8(4)
+	std	r7,8(3)
+	std	r8,16(3)
+	std	r9,24(3)
+	std	r10,32(3)
+9:	ld	r20,-96(1)
+	ld	r21,-88(1)
+	ld	r22,-80(1)
+	ld	r23,-72(1)
+	ld	r24,-64(1)
+	ld	r25,-56(1)
+	ld	r26,-48(1)
+	ld	r27,-40(1)
+	ld	r28,-32(1)
+	ld	r29,-24(1)
+	ld	r30,-16(1)
+	ld	r31,-8(1)
+	blr
diff --git a/arch/powerpc/lib/copyuser.S b/arch/powerpc/lib/copyuser.S
new file mode 100644
index 0000000..a0b3fbb
--- /dev/null
+++ b/arch/powerpc/lib/copyuser.S
@@ -0,0 +1,576 @@
+/*
+ * arch/ppc64/lib/copyuser.S
+ *
+ * Copyright (C) 2002 Paul Mackerras, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+
+	.align	7
+_GLOBAL(__copy_tofrom_user)
+	/* first check for a whole page copy on a page boundary */
+	cmpldi	cr1,r5,16
+	cmpdi	cr6,r5,4096
+	or	r0,r3,r4
+	neg	r6,r3		/* LS 3 bits = # bytes to 8-byte dest bdry */
+	andi.	r0,r0,4095
+	std	r3,-24(r1)
+	crand	cr0*4+2,cr0*4+2,cr6*4+2
+	std	r4,-16(r1)
+	std	r5,-8(r1)
+	dcbt	0,r4
+	beq	.Lcopy_page
+	andi.	r6,r6,7
+	mtcrf	0x01,r5
+	blt	cr1,.Lshort_copy
+	bne	.Ldst_unaligned
+.Ldst_aligned:
+	andi.	r0,r4,7
+	addi	r3,r3,-16
+	bne	.Lsrc_unaligned
+	srdi	r7,r5,4
+20:	ld	r9,0(r4)
+	addi	r4,r4,-8
+	mtctr	r7
+	andi.	r5,r5,7
+	bf	cr7*4+0,22f
+	addi	r3,r3,8
+	addi	r4,r4,8
+	mr	r8,r9
+	blt	cr1,72f
+21:	ld	r9,8(r4)
+70:	std	r8,8(r3)
+22:	ldu	r8,16(r4)
+71:	stdu	r9,16(r3)
+	bdnz	21b
+72:	std	r8,8(r3)
+	beq+	3f
+	addi	r3,r3,16
+23:	ld	r9,8(r4)
+.Ldo_tail:
+	bf	cr7*4+1,1f
+	rotldi	r9,r9,32
+73:	stw	r9,0(r3)
+	addi	r3,r3,4
+1:	bf	cr7*4+2,2f
+	rotldi	r9,r9,16
+74:	sth	r9,0(r3)
+	addi	r3,r3,2
+2:	bf	cr7*4+3,3f
+	rotldi	r9,r9,8
+75:	stb	r9,0(r3)
+3:	li	r3,0
+	blr
+
+.Lsrc_unaligned:
+	srdi	r6,r5,3
+	addi	r5,r5,-16
+	subf	r4,r0,r4
+	srdi	r7,r5,4
+	sldi	r10,r0,3
+	cmpldi	cr6,r6,3
+	andi.	r5,r5,7
+	mtctr	r7
+	subfic	r11,r10,64
+	add	r5,r5,r0
+	bt	cr7*4+0,28f
+
+24:	ld	r9,0(r4)	/* 3+2n loads, 2+2n stores */
+25:	ld	r0,8(r4)
+	sld	r6,r9,r10
+26:	ldu	r9,16(r4)
+	srd	r7,r0,r11
+	sld	r8,r0,r10
+	or	r7,r7,r6
+	blt	cr6,79f
+27:	ld	r0,8(r4)
+	b	2f
+
+28:	ld	r0,0(r4)	/* 4+2n loads, 3+2n stores */
+29:	ldu	r9,8(r4)
+	sld	r8,r0,r10
+	addi	r3,r3,-8
+	blt	cr6,5f
+30:	ld	r0,8(r4)
+	srd	r12,r9,r11
+	sld	r6,r9,r10
+31:	ldu	r9,16(r4)
+	or	r12,r8,r12
+	srd	r7,r0,r11
+	sld	r8,r0,r10
+	addi	r3,r3,16
+	beq	cr6,78f
+
+1:	or	r7,r7,r6
+32:	ld	r0,8(r4)
+76:	std	r12,8(r3)
+2:	srd	r12,r9,r11
+	sld	r6,r9,r10
+33:	ldu	r9,16(r4)
+	or	r12,r8,r12
+77:	stdu	r7,16(r3)
+	srd	r7,r0,r11
+	sld	r8,r0,r10
+	bdnz	1b
+
+78:	std	r12,8(r3)
+	or	r7,r7,r6
+79:	std	r7,16(r3)
+5:	srd	r12,r9,r11
+	or	r12,r8,r12
+80:	std	r12,24(r3)
+	bne	6f
+	li	r3,0
+	blr
+6:	cmpwi	cr1,r5,8
+	addi	r3,r3,32
+	sld	r9,r9,r10
+	ble	cr1,.Ldo_tail
+34:	ld	r0,8(r4)
+	srd	r7,r0,r11
+	or	r9,r7,r9
+	b	.Ldo_tail
+
+.Ldst_unaligned:
+	mtcrf	0x01,r6		/* put #bytes to 8B bdry into cr7 */
+	subf	r5,r6,r5
+	li	r7,0
+	cmpldi	r1,r5,16
+	bf	cr7*4+3,1f
+35:	lbz	r0,0(r4)
+81:	stb	r0,0(r3)
+	addi	r7,r7,1
+1:	bf	cr7*4+2,2f
+36:	lhzx	r0,r7,r4
+82:	sthx	r0,r7,r3
+	addi	r7,r7,2
+2:	bf	cr7*4+1,3f
+37:	lwzx	r0,r7,r4
+83:	stwx	r0,r7,r3
+3:	mtcrf	0x01,r5
+	add	r4,r6,r4
+	add	r3,r6,r3
+	b	.Ldst_aligned
+
+.Lshort_copy:
+	bf	cr7*4+0,1f
+38:	lwz	r0,0(r4)
+39:	lwz	r9,4(r4)
+	addi	r4,r4,8
+84:	stw	r0,0(r3)
+85:	stw	r9,4(r3)
+	addi	r3,r3,8
+1:	bf	cr7*4+1,2f
+40:	lwz	r0,0(r4)
+	addi	r4,r4,4
+86:	stw	r0,0(r3)
+	addi	r3,r3,4
+2:	bf	cr7*4+2,3f
+41:	lhz	r0,0(r4)
+	addi	r4,r4,2
+87:	sth	r0,0(r3)
+	addi	r3,r3,2
+3:	bf	cr7*4+3,4f
+42:	lbz	r0,0(r4)
+88:	stb	r0,0(r3)
+4:	li	r3,0
+	blr
+
+/*
+ * exception handlers follow
+ * we have to return the number of bytes not copied
+ * for an exception on a load, we set the rest of the destination to 0
+ */
+
+136:
+137:
+	add	r3,r3,r7
+	b	1f
+130:
+131:
+	addi	r3,r3,8
+120:
+122:
+124:
+125:
+126:
+127:
+128:
+129:
+133:
+	addi	r3,r3,8
+121:
+132:
+	addi	r3,r3,8
+123:
+134:
+135:
+138:
+139:
+140:
+141:
+142:
+
+/*
+ * here we have had a fault on a load and r3 points to the first
+ * unmodified byte of the destination
+ */
+1:	ld	r6,-24(r1)
+	ld	r4,-16(r1)
+	ld	r5,-8(r1)
+	subf	r6,r6,r3
+	add	r4,r4,r6
+	subf	r5,r6,r5	/* #bytes left to go */
+
+/*
+ * first see if we can copy any more bytes before hitting another exception
+ */
+	mtctr	r5
+43:	lbz	r0,0(r4)
+	addi	r4,r4,1
+89:	stb	r0,0(r3)
+	addi	r3,r3,1
+	bdnz	43b
+	li	r3,0		/* huh? all copied successfully this time? */
+	blr
+
+/*
+ * here we have trapped again, need to clear ctr bytes starting at r3
+ */
+143:	mfctr	r5
+	li	r0,0
+	mr	r4,r3
+	mr	r3,r5		/* return the number of bytes not copied */
+1:	andi.	r9,r4,7
+	beq	3f
+90:	stb	r0,0(r4)
+	addic.	r5,r5,-1
+	addi	r4,r4,1
+	bne	1b
+	blr
+3:	cmpldi	cr1,r5,8
+	srdi	r9,r5,3
+	andi.	r5,r5,7
+	blt	cr1,93f
+	mtctr	r9
+91:	std	r0,0(r4)
+	addi	r4,r4,8
+	bdnz	91b
+93:	beqlr
+	mtctr	r5	
+92:	stb	r0,0(r4)
+	addi	r4,r4,1
+	bdnz	92b
+	blr
+
+/*
+ * exception handlers for stores: we just need to work
+ * out how many bytes weren't copied
+ */
+182:
+183:
+	add	r3,r3,r7
+	b	1f
+180:
+	addi	r3,r3,8
+171:
+177:
+	addi	r3,r3,8
+170:
+172:
+176:
+178:
+	addi	r3,r3,4
+185:
+	addi	r3,r3,4
+173:
+174:
+175:
+179:
+181:
+184:
+186:
+187:
+188:
+189:	
+1:
+	ld	r6,-24(r1)
+	ld	r5,-8(r1)
+	add	r6,r6,r5
+	subf	r3,r3,r6	/* #bytes not copied */
+190:
+191:
+192:
+	blr			/* #bytes not copied in r3 */
+
+	.section __ex_table,"a"
+	.align	3
+	.llong	20b,120b
+	.llong	21b,121b
+	.llong	70b,170b
+	.llong	22b,122b
+	.llong	71b,171b
+	.llong	72b,172b
+	.llong	23b,123b
+	.llong	73b,173b
+	.llong	74b,174b
+	.llong	75b,175b
+	.llong	24b,124b
+	.llong	25b,125b
+	.llong	26b,126b
+	.llong	27b,127b
+	.llong	28b,128b
+	.llong	29b,129b
+	.llong	30b,130b
+	.llong	31b,131b
+	.llong	32b,132b
+	.llong	76b,176b
+	.llong	33b,133b
+	.llong	77b,177b
+	.llong	78b,178b
+	.llong	79b,179b
+	.llong	80b,180b
+	.llong	34b,134b
+	.llong	35b,135b
+	.llong	81b,181b
+	.llong	36b,136b
+	.llong	82b,182b
+	.llong	37b,137b
+	.llong	83b,183b
+	.llong	38b,138b
+	.llong	39b,139b
+	.llong	84b,184b
+	.llong	85b,185b
+	.llong	40b,140b
+	.llong	86b,186b
+	.llong	41b,141b
+	.llong	87b,187b
+	.llong	42b,142b
+	.llong	88b,188b
+	.llong	43b,143b
+	.llong	89b,189b
+	.llong	90b,190b
+	.llong	91b,191b
+	.llong	92b,192b
+	
+	.text
+
+/*
+ * Routine to copy a whole page of data, optimized for POWER4.
+ * On POWER4 it is more than 50% faster than the simple loop
+ * above (following the .Ldst_aligned label) but it runs slightly
+ * slower on POWER3.
+ */
+.Lcopy_page:
+	std	r31,-32(1)
+	std	r30,-40(1)
+	std	r29,-48(1)
+	std	r28,-56(1)
+	std	r27,-64(1)
+	std	r26,-72(1)
+	std	r25,-80(1)
+	std	r24,-88(1)
+	std	r23,-96(1)
+	std	r22,-104(1)
+	std	r21,-112(1)
+	std	r20,-120(1)
+	li	r5,4096/32 - 1
+	addi	r3,r3,-8
+	li	r0,5
+0:	addi	r5,r5,-24
+	mtctr	r0
+20:	ld	r22,640(4)
+21:	ld	r21,512(4)
+22:	ld	r20,384(4)
+23:	ld	r11,256(4)
+24:	ld	r9,128(4)
+25:	ld	r7,0(4)
+26:	ld	r25,648(4)
+27:	ld	r24,520(4)
+28:	ld	r23,392(4)
+29:	ld	r10,264(4)
+30:	ld	r8,136(4)
+31:	ldu	r6,8(4)
+	cmpwi	r5,24
+1:
+32:	std	r22,648(3)
+33:	std	r21,520(3)
+34:	std	r20,392(3)
+35:	std	r11,264(3)
+36:	std	r9,136(3)
+37:	std	r7,8(3)
+38:	ld	r28,648(4)
+39:	ld	r27,520(4)
+40:	ld	r26,392(4)
+41:	ld	r31,264(4)
+42:	ld	r30,136(4)
+43:	ld	r29,8(4)
+44:	std	r25,656(3)
+45:	std	r24,528(3)
+46:	std	r23,400(3)
+47:	std	r10,272(3)
+48:	std	r8,144(3)
+49:	std	r6,16(3)
+50:	ld	r22,656(4)
+51:	ld	r21,528(4)
+52:	ld	r20,400(4)
+53:	ld	r11,272(4)
+54:	ld	r9,144(4)
+55:	ld	r7,16(4)
+56:	std	r28,664(3)
+57:	std	r27,536(3)
+58:	std	r26,408(3)
+59:	std	r31,280(3)
+60:	std	r30,152(3)
+61:	stdu	r29,24(3)
+62:	ld	r25,664(4)
+63:	ld	r24,536(4)
+64:	ld	r23,408(4)
+65:	ld	r10,280(4)
+66:	ld	r8,152(4)
+67:	ldu	r6,24(4)
+	bdnz	1b
+68:	std	r22,648(3)
+69:	std	r21,520(3)
+70:	std	r20,392(3)
+71:	std	r11,264(3)
+72:	std	r9,136(3)
+73:	std	r7,8(3)
+74:	addi	r4,r4,640
+75:	addi	r3,r3,648
+	bge	0b
+	mtctr	r5
+76:	ld	r7,0(4)
+77:	ld	r8,8(4)
+78:	ldu	r9,16(4)
+3:
+79:	ld	r10,8(4)
+80:	std	r7,8(3)
+81:	ld	r7,16(4)
+82:	std	r8,16(3)
+83:	ld	r8,24(4)
+84:	std	r9,24(3)
+85:	ldu	r9,32(4)
+86:	stdu	r10,32(3)
+	bdnz	3b
+4:
+87:	ld	r10,8(4)
+88:	std	r7,8(3)
+89:	std	r8,16(3)
+90:	std	r9,24(3)
+91:	std	r10,32(3)
+9:	ld	r20,-120(1)
+	ld	r21,-112(1)
+	ld	r22,-104(1)
+	ld	r23,-96(1)
+	ld	r24,-88(1)
+	ld	r25,-80(1)
+	ld	r26,-72(1)
+	ld	r27,-64(1)
+	ld	r28,-56(1)
+	ld	r29,-48(1)
+	ld	r30,-40(1)
+	ld	r31,-32(1)
+	li	r3,0
+	blr
+
+/*
+ * on an exception, reset to the beginning and jump back into the
+ * standard __copy_tofrom_user
+ */
+100:	ld	r20,-120(1)
+	ld	r21,-112(1)
+	ld	r22,-104(1)
+	ld	r23,-96(1)
+	ld	r24,-88(1)
+	ld	r25,-80(1)
+	ld	r26,-72(1)
+	ld	r27,-64(1)
+	ld	r28,-56(1)
+	ld	r29,-48(1)
+	ld	r30,-40(1)
+	ld	r31,-32(1)
+	ld	r3,-24(r1)
+	ld	r4,-16(r1)
+	li	r5,4096
+	b	.Ldst_aligned
+
+	.section __ex_table,"a"
+	.align	3
+	.llong	20b,100b
+	.llong	21b,100b
+	.llong	22b,100b
+	.llong	23b,100b
+	.llong	24b,100b
+	.llong	25b,100b
+	.llong	26b,100b
+	.llong	27b,100b
+	.llong	28b,100b
+	.llong	29b,100b
+	.llong	30b,100b
+	.llong	31b,100b
+	.llong	32b,100b
+	.llong	33b,100b
+	.llong	34b,100b
+	.llong	35b,100b
+	.llong	36b,100b
+	.llong	37b,100b
+	.llong	38b,100b
+	.llong	39b,100b
+	.llong	40b,100b
+	.llong	41b,100b
+	.llong	42b,100b
+	.llong	43b,100b
+	.llong	44b,100b
+	.llong	45b,100b
+	.llong	46b,100b
+	.llong	47b,100b
+	.llong	48b,100b
+	.llong	49b,100b
+	.llong	50b,100b
+	.llong	51b,100b
+	.llong	52b,100b
+	.llong	53b,100b
+	.llong	54b,100b
+	.llong	55b,100b
+	.llong	56b,100b
+	.llong	57b,100b
+	.llong	58b,100b
+	.llong	59b,100b
+	.llong	60b,100b
+	.llong	61b,100b
+	.llong	62b,100b
+	.llong	63b,100b
+	.llong	64b,100b
+	.llong	65b,100b
+	.llong	66b,100b
+	.llong	67b,100b
+	.llong	68b,100b
+	.llong	69b,100b
+	.llong	70b,100b
+	.llong	71b,100b
+	.llong	72b,100b
+	.llong	73b,100b
+	.llong	74b,100b
+	.llong	75b,100b
+	.llong	76b,100b
+	.llong	77b,100b
+	.llong	78b,100b
+	.llong	79b,100b
+	.llong	80b,100b
+	.llong	81b,100b
+	.llong	82b,100b
+	.llong	83b,100b
+	.llong	84b,100b
+	.llong	85b,100b
+	.llong	86b,100b
+	.llong	87b,100b
+	.llong	88b,100b
+	.llong	89b,100b
+	.llong	90b,100b
+	.llong	91b,100b
diff --git a/arch/powerpc/lib/div64.S b/arch/powerpc/lib/div64.S
new file mode 100644
index 0000000..3527569
--- /dev/null
+++ b/arch/powerpc/lib/div64.S
@@ -0,0 +1,58 @@
+/*
+ * Divide a 64-bit unsigned number by a 32-bit unsigned number.
+ * This routine assumes that the top 32 bits of the dividend are
+ * non-zero to start with.
+ * On entry, r3 points to the dividend, which get overwritten with
+ * the 64-bit quotient, and r4 contains the divisor.
+ * On exit, r3 contains the remainder.
+ *
+ * Copyright (C) 2002 Paul Mackerras, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/ppc_asm.h>
+#include <asm/processor.h>
+
+_GLOBAL(__div64_32)
+	lwz	r5,0(r3)	# get the dividend into r5/r6
+	lwz	r6,4(r3)
+	cmplw	r5,r4
+	li	r7,0
+	li	r8,0
+	blt	1f
+	divwu	r7,r5,r4	# if dividend.hi >= divisor,
+	mullw	r0,r7,r4	# quotient.hi = dividend.hi / divisor
+	subf.	r5,r0,r5	# dividend.hi %= divisor
+	beq	3f
+1:	mr	r11,r5		# here dividend.hi != 0
+	andis.	r0,r5,0xc000
+	bne	2f
+	cntlzw	r0,r5		# we are shifting the dividend right
+	li	r10,-1		# to make it < 2^32, and shifting
+	srw	r10,r10,r0	# the divisor right the same amount,
+	add	r9,r4,r10	# rounding up (so the estimate cannot
+	andc	r11,r6,r10	# ever be too large, only too small)
+	andc	r9,r9,r10
+	or	r11,r5,r11
+	rotlw	r9,r9,r0
+	rotlw	r11,r11,r0
+	divwu	r11,r11,r9	# then we divide the shifted quantities
+2:	mullw	r10,r11,r4	# to get an estimate of the quotient,
+	mulhwu	r9,r11,r4	# multiply the estimate by the divisor,
+	subfc	r6,r10,r6	# take the product from the divisor,
+	add	r8,r8,r11	# and add the estimate to the accumulated
+	subfe.	r5,r9,r5	# quotient
+	bne	1b
+3:	cmplw	r6,r4
+	blt	4f
+	divwu	r0,r6,r4	# perform the remaining 32-bit division
+	mullw	r10,r0,r4	# and get the remainder
+	add	r8,r8,r0
+	subf	r6,r10,r6
+4:	stw	r7,0(r3)	# return the quotient in *r3
+	stw	r8,4(r3)
+	mr	r3,r6		# return the remainder in r3
+	blr
diff --git a/arch/powerpc/lib/e2a.c b/arch/powerpc/lib/e2a.c
new file mode 100644
index 0000000..d2b8348
--- /dev/null
+++ b/arch/powerpc/lib/e2a.c
@@ -0,0 +1,108 @@
+/*
+ *  arch/ppc64/lib/e2a.c
+ *
+ *  EBCDIC to ASCII conversion
+ *
+ * This function moved here from arch/ppc64/kernel/viopath.c
+ *
+ * (C) Copyright 2000-2004 IBM Corporation
+ *
+ * This program is free software;  you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) anyu later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+
+unsigned char e2a(unsigned char x)
+{
+	switch (x) {
+	case 0xF0:
+		return '0';
+	case 0xF1:
+		return '1';
+	case 0xF2:
+		return '2';
+	case 0xF3:
+		return '3';
+	case 0xF4:
+		return '4';
+	case 0xF5:
+		return '5';
+	case 0xF6:
+		return '6';
+	case 0xF7:
+		return '7';
+	case 0xF8:
+		return '8';
+	case 0xF9:
+		return '9';
+	case 0xC1:
+		return 'A';
+	case 0xC2:
+		return 'B';
+	case 0xC3:
+		return 'C';
+	case 0xC4:
+		return 'D';
+	case 0xC5:
+		return 'E';
+	case 0xC6:
+		return 'F';
+	case 0xC7:
+		return 'G';
+	case 0xC8:
+		return 'H';
+	case 0xC9:
+		return 'I';
+	case 0xD1:
+		return 'J';
+	case 0xD2:
+		return 'K';
+	case 0xD3:
+		return 'L';
+	case 0xD4:
+		return 'M';
+	case 0xD5:
+		return 'N';
+	case 0xD6:
+		return 'O';
+	case 0xD7:
+		return 'P';
+	case 0xD8:
+		return 'Q';
+	case 0xD9:
+		return 'R';
+	case 0xE2:
+		return 'S';
+	case 0xE3:
+		return 'T';
+	case 0xE4:
+		return 'U';
+	case 0xE5:
+		return 'V';
+	case 0xE6:
+		return 'W';
+	case 0xE7:
+		return 'X';
+	case 0xE8:
+		return 'Y';
+	case 0xE9:
+		return 'Z';
+	}
+	return ' ';
+}
+EXPORT_SYMBOL(e2a);
+
+
diff --git a/arch/powerpc/lib/memcpy.S b/arch/powerpc/lib/memcpy.S
new file mode 100644
index 0000000..9ccacdf
--- /dev/null
+++ b/arch/powerpc/lib/memcpy.S
@@ -0,0 +1,172 @@
+/*
+ * arch/ppc64/lib/memcpy.S
+ *
+ * Copyright (C) 2002 Paul Mackerras, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+
+	.align	7
+_GLOBAL(memcpy)
+	mtcrf	0x01,r5
+	cmpldi	cr1,r5,16
+	neg	r6,r3		# LS 3 bits = # bytes to 8-byte dest bdry
+	andi.	r6,r6,7
+	dcbt	0,r4
+	blt	cr1,.Lshort_copy
+	bne	.Ldst_unaligned
+.Ldst_aligned:
+	andi.	r0,r4,7
+	addi	r3,r3,-16
+	bne	.Lsrc_unaligned
+	srdi	r7,r5,4
+	ld	r9,0(r4)
+	addi	r4,r4,-8
+	mtctr	r7
+	andi.	r5,r5,7
+	bf	cr7*4+0,2f
+	addi	r3,r3,8
+	addi	r4,r4,8
+	mr	r8,r9
+	blt	cr1,3f
+1:	ld	r9,8(r4)
+	std	r8,8(r3)
+2:	ldu	r8,16(r4)
+	stdu	r9,16(r3)
+	bdnz	1b
+3:	std	r8,8(r3)
+	beqlr
+	addi	r3,r3,16
+	ld	r9,8(r4)
+.Ldo_tail:
+	bf	cr7*4+1,1f
+	rotldi	r9,r9,32
+	stw	r9,0(r3)
+	addi	r3,r3,4
+1:	bf	cr7*4+2,2f
+	rotldi	r9,r9,16
+	sth	r9,0(r3)
+	addi	r3,r3,2
+2:	bf	cr7*4+3,3f
+	rotldi	r9,r9,8
+	stb	r9,0(r3)
+3:	blr
+
+.Lsrc_unaligned:
+	srdi	r6,r5,3
+	addi	r5,r5,-16
+	subf	r4,r0,r4
+	srdi	r7,r5,4
+	sldi	r10,r0,3
+	cmpdi	cr6,r6,3
+	andi.	r5,r5,7
+	mtctr	r7
+	subfic	r11,r10,64
+	add	r5,r5,r0
+
+	bt	cr7*4+0,0f
+
+	ld	r9,0(r4)	# 3+2n loads, 2+2n stores
+	ld	r0,8(r4)
+	sld	r6,r9,r10
+	ldu	r9,16(r4)
+	srd	r7,r0,r11
+	sld	r8,r0,r10
+	or	r7,r7,r6
+	blt	cr6,4f
+	ld	r0,8(r4)
+	# s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12
+	b	2f
+
+0:	ld	r0,0(r4)	# 4+2n loads, 3+2n stores
+	ldu	r9,8(r4)
+	sld	r8,r0,r10
+	addi	r3,r3,-8
+	blt	cr6,5f
+	ld	r0,8(r4)
+	srd	r12,r9,r11
+	sld	r6,r9,r10
+	ldu	r9,16(r4)
+	or	r12,r8,r12
+	srd	r7,r0,r11
+	sld	r8,r0,r10
+	addi	r3,r3,16
+	beq	cr6,3f
+
+	# d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9
+1:	or	r7,r7,r6
+	ld	r0,8(r4)
+	std	r12,8(r3)
+2:	srd	r12,r9,r11
+	sld	r6,r9,r10
+	ldu	r9,16(r4)
+	or	r12,r8,r12
+	stdu	r7,16(r3)
+	srd	r7,r0,r11
+	sld	r8,r0,r10
+	bdnz	1b
+
+3:	std	r12,8(r3)
+	or	r7,r7,r6
+4:	std	r7,16(r3)
+5:	srd	r12,r9,r11
+	or	r12,r8,r12
+	std	r12,24(r3)
+	beqlr
+	cmpwi	cr1,r5,8
+	addi	r3,r3,32
+	sld	r9,r9,r10
+	ble	cr1,.Ldo_tail
+	ld	r0,8(r4)
+	srd	r7,r0,r11
+	or	r9,r7,r9
+	b	.Ldo_tail
+
+.Ldst_unaligned:
+	mtcrf	0x01,r6		# put #bytes to 8B bdry into cr7
+	subf	r5,r6,r5
+	li	r7,0
+	cmpldi	r1,r5,16
+	bf	cr7*4+3,1f
+	lbz	r0,0(r4)
+	stb	r0,0(r3)
+	addi	r7,r7,1
+1:	bf	cr7*4+2,2f
+	lhzx	r0,r7,r4
+	sthx	r0,r7,r3
+	addi	r7,r7,2
+2:	bf	cr7*4+1,3f
+	lwzx	r0,r7,r4
+	stwx	r0,r7,r3
+3:	mtcrf	0x01,r5
+	add	r4,r6,r4
+	add	r3,r6,r3
+	b	.Ldst_aligned
+
+.Lshort_copy:
+	bf	cr7*4+0,1f
+	lwz	r0,0(r4)
+	lwz	r9,4(r4)
+	addi	r4,r4,8
+	stw	r0,0(r3)
+	stw	r9,4(r3)
+	addi	r3,r3,8
+1:	bf	cr7*4+1,2f
+	lwz	r0,0(r4)
+	addi	r4,r4,4
+	stw	r0,0(r3)
+	addi	r3,r3,4
+2:	bf	cr7*4+2,3f
+	lhz	r0,0(r4)
+	addi	r4,r4,2
+	sth	r0,0(r3)
+	addi	r3,r3,2
+3:	bf	cr7*4+3,4f
+	lbz	r0,0(r4)
+	stb	r0,0(r3)
+4:	blr
diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c
new file mode 100644
index 0000000..42c5de2
--- /dev/null
+++ b/arch/powerpc/lib/rheap.c
@@ -0,0 +1,693 @@
+/*
+ * arch/ppc/syslib/rheap.c
+ *
+ * A Remote Heap.  Remote means that we don't touch the memory that the
+ * heap points to. Normal heap implementations use the memory they manage
+ * to place their list. We cannot do that because the memory we manage may
+ * have special properties, for example it is uncachable or of different
+ * endianess.
+ *
+ * Author: Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2004 (c) INTRACOM S.A. Greece. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/rheap.h>
+
+/*
+ * Fixup a list_head, needed when copying lists.  If the pointers fall
+ * between s and e, apply the delta.  This assumes that
+ * sizeof(struct list_head *) == sizeof(unsigned long *).
+ */
+static inline void fixup(unsigned long s, unsigned long e, int d,
+			 struct list_head *l)
+{
+	unsigned long *pp;
+
+	pp = (unsigned long *)&l->next;
+	if (*pp >= s && *pp < e)
+		*pp += d;
+
+	pp = (unsigned long *)&l->prev;
+	if (*pp >= s && *pp < e)
+		*pp += d;
+}
+
+/* Grow the allocated blocks */
+static int grow(rh_info_t * info, int max_blocks)
+{
+	rh_block_t *block, *blk;
+	int i, new_blocks;
+	int delta;
+	unsigned long blks, blke;
+
+	if (max_blocks <= info->max_blocks)
+		return -EINVAL;
+
+	new_blocks = max_blocks - info->max_blocks;
+
+	block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_KERNEL);
+	if (block == NULL)
+		return -ENOMEM;
+
+	if (info->max_blocks > 0) {
+
+		/* copy old block area */
+		memcpy(block, info->block,
+		       sizeof(rh_block_t) * info->max_blocks);
+
+		delta = (char *)block - (char *)info->block;
+
+		/* and fixup list pointers */
+		blks = (unsigned long)info->block;
+		blke = (unsigned long)(info->block + info->max_blocks);
+
+		for (i = 0, blk = block; i < info->max_blocks; i++, blk++)
+			fixup(blks, blke, delta, &blk->list);
+
+		fixup(blks, blke, delta, &info->empty_list);
+		fixup(blks, blke, delta, &info->free_list);
+		fixup(blks, blke, delta, &info->taken_list);
+
+		/* free the old allocated memory */
+		if ((info->flags & RHIF_STATIC_BLOCK) == 0)
+			kfree(info->block);
+	}
+
+	info->block = block;
+	info->empty_slots += new_blocks;
+	info->max_blocks = max_blocks;
+	info->flags &= ~RHIF_STATIC_BLOCK;
+
+	/* add all new blocks to the free list */
+	for (i = 0, blk = block + info->max_blocks; i < new_blocks; i++, blk++)
+		list_add(&blk->list, &info->empty_list);
+
+	return 0;
+}
+
+/*
+ * Assure at least the required amount of empty slots.  If this function
+ * causes a grow in the block area then all pointers kept to the block
+ * area are invalid!
+ */
+static int assure_empty(rh_info_t * info, int slots)
+{
+	int max_blocks;
+
+	/* This function is not meant to be used to grow uncontrollably */
+	if (slots >= 4)
+		return -EINVAL;
+
+	/* Enough space */
+	if (info->empty_slots >= slots)
+		return 0;
+
+	/* Next 16 sized block */
+	max_blocks = ((info->max_blocks + slots) + 15) & ~15;
+
+	return grow(info, max_blocks);
+}
+
+static rh_block_t *get_slot(rh_info_t * info)
+{
+	rh_block_t *blk;
+
+	/* If no more free slots, and failure to extend. */
+	/* XXX: You should have called assure_empty before */
+	if (info->empty_slots == 0) {
+		printk(KERN_ERR "rh: out of slots; crash is imminent.\n");
+		return NULL;
+	}
+
+	/* Get empty slot to use */
+	blk = list_entry(info->empty_list.next, rh_block_t, list);
+	list_del_init(&blk->list);
+	info->empty_slots--;
+
+	/* Initialize */
+	blk->start = NULL;
+	blk->size = 0;
+	blk->owner = NULL;
+
+	return blk;
+}
+
+static inline void release_slot(rh_info_t * info, rh_block_t * blk)
+{
+	list_add(&blk->list, &info->empty_list);
+	info->empty_slots++;
+}
+
+static void attach_free_block(rh_info_t * info, rh_block_t * blkn)
+{
+	rh_block_t *blk;
+	rh_block_t *before;
+	rh_block_t *after;
+	rh_block_t *next;
+	int size;
+	unsigned long s, e, bs, be;
+	struct list_head *l;
+
+	/* We assume that they are aligned properly */
+	size = blkn->size;
+	s = (unsigned long)blkn->start;
+	e = s + size;
+
+	/* Find the blocks immediately before and after the given one
+	 * (if any) */
+	before = NULL;
+	after = NULL;
+	next = NULL;
+
+	list_for_each(l, &info->free_list) {
+		blk = list_entry(l, rh_block_t, list);
+
+		bs = (unsigned long)blk->start;
+		be = bs + blk->size;
+
+		if (next == NULL && s >= bs)
+			next = blk;
+
+		if (be == s)
+			before = blk;
+
+		if (e == bs)
+			after = blk;
+
+		/* If both are not null, break now */
+		if (before != NULL && after != NULL)
+			break;
+	}
+
+	/* Now check if they are really adjacent */
+	if (before != NULL && s != (unsigned long)before->start + before->size)
+		before = NULL;
+
+	if (after != NULL && e != (unsigned long)after->start)
+		after = NULL;
+
+	/* No coalescing; list insert and return */
+	if (before == NULL && after == NULL) {
+
+		if (next != NULL)
+			list_add(&blkn->list, &next->list);
+		else
+			list_add(&blkn->list, &info->free_list);
+
+		return;
+	}
+
+	/* We don't need it anymore */
+	release_slot(info, blkn);
+
+	/* Grow the before block */
+	if (before != NULL && after == NULL) {
+		before->size += size;
+		return;
+	}
+
+	/* Grow the after block backwards */
+	if (before == NULL && after != NULL) {
+		after->start = (int8_t *)after->start - size;
+		after->size += size;
+		return;
+	}
+
+	/* Grow the before block, and release the after block */
+	before->size += size + after->size;
+	list_del(&after->list);
+	release_slot(info, after);
+}
+
+static void attach_taken_block(rh_info_t * info, rh_block_t * blkn)
+{
+	rh_block_t *blk;
+	struct list_head *l;
+
+	/* Find the block immediately before the given one (if any) */
+	list_for_each(l, &info->taken_list) {
+		blk = list_entry(l, rh_block_t, list);
+		if (blk->start > blkn->start) {
+			list_add_tail(&blkn->list, &blk->list);
+			return;
+		}
+	}
+
+	list_add_tail(&blkn->list, &info->taken_list);
+}
+
+/*
+ * Create a remote heap dynamically.  Note that no memory for the blocks
+ * are allocated.  It will upon the first allocation
+ */
+rh_info_t *rh_create(unsigned int alignment)
+{
+	rh_info_t *info;
+
+	/* Alignment must be a power of two */
+	if ((alignment & (alignment - 1)) != 0)
+		return ERR_PTR(-EINVAL);
+
+	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	if (info == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	info->alignment = alignment;
+
+	/* Initially everything as empty */
+	info->block = NULL;
+	info->max_blocks = 0;
+	info->empty_slots = 0;
+	info->flags = 0;
+
+	INIT_LIST_HEAD(&info->empty_list);
+	INIT_LIST_HEAD(&info->free_list);
+	INIT_LIST_HEAD(&info->taken_list);
+
+	return info;
+}
+
+/*
+ * Destroy a dynamically created remote heap.  Deallocate only if the areas
+ * are not static
+ */
+void rh_destroy(rh_info_t * info)
+{
+	if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL)
+		kfree(info->block);
+
+	if ((info->flags & RHIF_STATIC_INFO) == 0)
+		kfree(info);
+}
+
+/*
+ * Initialize in place a remote heap info block.  This is needed to support
+ * operation very early in the startup of the kernel, when it is not yet safe
+ * to call kmalloc.
+ */
+void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
+	     rh_block_t * block)
+{
+	int i;
+	rh_block_t *blk;
+
+	/* Alignment must be a power of two */
+	if ((alignment & (alignment - 1)) != 0)
+		return;
+
+	info->alignment = alignment;
+
+	/* Initially everything as empty */
+	info->block = block;
+	info->max_blocks = max_blocks;
+	info->empty_slots = max_blocks;
+	info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK;
+
+	INIT_LIST_HEAD(&info->empty_list);
+	INIT_LIST_HEAD(&info->free_list);
+	INIT_LIST_HEAD(&info->taken_list);
+
+	/* Add all new blocks to the free list */
+	for (i = 0, blk = block; i < max_blocks; i++, blk++)
+		list_add(&blk->list, &info->empty_list);
+}
+
+/* Attach a free memory region, coalesces regions if adjuscent */
+int rh_attach_region(rh_info_t * info, void *start, int size)
+{
+	rh_block_t *blk;
+	unsigned long s, e, m;
+	int r;
+
+	/* The region must be aligned */
+	s = (unsigned long)start;
+	e = s + size;
+	m = info->alignment - 1;
+
+	/* Round start up */
+	s = (s + m) & ~m;
+
+	/* Round end down */
+	e = e & ~m;
+
+	/* Take final values */
+	start = (void *)s;
+	size = (int)(e - s);
+
+	/* Grow the blocks, if needed */
+	r = assure_empty(info, 1);
+	if (r < 0)
+		return r;
+
+	blk = get_slot(info);
+	blk->start = start;
+	blk->size = size;
+	blk->owner = NULL;
+
+	attach_free_block(info, blk);
+
+	return 0;
+}
+
+/* Detatch given address range, splits free block if needed. */
+void *rh_detach_region(rh_info_t * info, void *start, int size)
+{
+	struct list_head *l;
+	rh_block_t *blk, *newblk;
+	unsigned long s, e, m, bs, be;
+
+	/* Validate size */
+	if (size <= 0)
+		return ERR_PTR(-EINVAL);
+
+	/* The region must be aligned */
+	s = (unsigned long)start;
+	e = s + size;
+	m = info->alignment - 1;
+
+	/* Round start up */
+	s = (s + m) & ~m;
+
+	/* Round end down */
+	e = e & ~m;
+
+	if (assure_empty(info, 1) < 0)
+		return ERR_PTR(-ENOMEM);
+
+	blk = NULL;
+	list_for_each(l, &info->free_list) {
+		blk = list_entry(l, rh_block_t, list);
+		/* The range must lie entirely inside one free block */
+		bs = (unsigned long)blk->start;
+		be = (unsigned long)blk->start + blk->size;
+		if (s >= bs && e <= be)
+			break;
+		blk = NULL;
+	}
+
+	if (blk == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	/* Perfect fit */
+	if (bs == s && be == e) {
+		/* Delete from free list, release slot */
+		list_del(&blk->list);
+		release_slot(info, blk);
+		return (void *)s;
+	}
+
+	/* blk still in free list, with updated start and/or size */
+	if (bs == s || be == e) {
+		if (bs == s)
+			blk->start = (int8_t *)blk->start + size;
+		blk->size -= size;
+
+	} else {
+		/* The front free fragment */
+		blk->size = s - bs;
+
+		/* the back free fragment */
+		newblk = get_slot(info);
+		newblk->start = (void *)e;
+		newblk->size = be - e;
+
+		list_add(&newblk->list, &blk->list);
+	}
+
+	return (void *)s;
+}
+
+void *rh_alloc(rh_info_t * info, int size, const char *owner)
+{
+	struct list_head *l;
+	rh_block_t *blk;
+	rh_block_t *newblk;
+	void *start;
+
+	/* Validate size */
+	if (size <= 0)
+		return ERR_PTR(-EINVAL);
+
+	/* Align to configured alignment */
+	size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
+
+	if (assure_empty(info, 1) < 0)
+		return ERR_PTR(-ENOMEM);
+
+	blk = NULL;
+	list_for_each(l, &info->free_list) {
+		blk = list_entry(l, rh_block_t, list);
+		if (size <= blk->size)
+			break;
+		blk = NULL;
+	}
+
+	if (blk == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	/* Just fits */
+	if (blk->size == size) {
+		/* Move from free list to taken list */
+		list_del(&blk->list);
+		blk->owner = owner;
+		start = blk->start;
+
+		attach_taken_block(info, blk);
+
+		return start;
+	}
+
+	newblk = get_slot(info);
+	newblk->start = blk->start;
+	newblk->size = size;
+	newblk->owner = owner;
+
+	/* blk still in free list, with updated start, size */
+	blk->start = (int8_t *)blk->start + size;
+	blk->size -= size;
+
+	start = newblk->start;
+
+	attach_taken_block(info, newblk);
+
+	return start;
+}
+
+/* allocate at precisely the given address */
+void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner)
+{
+	struct list_head *l;
+	rh_block_t *blk, *newblk1, *newblk2;
+	unsigned long s, e, m, bs, be;
+
+	/* Validate size */
+	if (size <= 0)
+		return ERR_PTR(-EINVAL);
+
+	/* The region must be aligned */
+	s = (unsigned long)start;
+	e = s + size;
+	m = info->alignment - 1;
+
+	/* Round start up */
+	s = (s + m) & ~m;
+
+	/* Round end down */
+	e = e & ~m;
+
+	if (assure_empty(info, 2) < 0)
+		return ERR_PTR(-ENOMEM);
+
+	blk = NULL;
+	list_for_each(l, &info->free_list) {
+		blk = list_entry(l, rh_block_t, list);
+		/* The range must lie entirely inside one free block */
+		bs = (unsigned long)blk->start;
+		be = (unsigned long)blk->start + blk->size;
+		if (s >= bs && e <= be)
+			break;
+	}
+
+	if (blk == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	/* Perfect fit */
+	if (bs == s && be == e) {
+		/* Move from free list to taken list */
+		list_del(&blk->list);
+		blk->owner = owner;
+
+		start = blk->start;
+		attach_taken_block(info, blk);
+
+		return start;
+
+	}
+
+	/* blk still in free list, with updated start and/or size */
+	if (bs == s || be == e) {
+		if (bs == s)
+			blk->start = (int8_t *)blk->start + size;
+		blk->size -= size;
+
+	} else {
+		/* The front free fragment */
+		blk->size = s - bs;
+
+		/* The back free fragment */
+		newblk2 = get_slot(info);
+		newblk2->start = (void *)e;
+		newblk2->size = be - e;
+
+		list_add(&newblk2->list, &blk->list);
+	}
+
+	newblk1 = get_slot(info);
+	newblk1->start = (void *)s;
+	newblk1->size = e - s;
+	newblk1->owner = owner;
+
+	start = newblk1->start;
+	attach_taken_block(info, newblk1);
+
+	return start;
+}
+
+int rh_free(rh_info_t * info, void *start)
+{
+	rh_block_t *blk, *blk2;
+	struct list_head *l;
+	int size;
+
+	/* Linear search for block */
+	blk = NULL;
+	list_for_each(l, &info->taken_list) {
+		blk2 = list_entry(l, rh_block_t, list);
+		if (start < blk2->start)
+			break;
+		blk = blk2;
+	}
+
+	if (blk == NULL || start > (blk->start + blk->size))
+		return -EINVAL;
+
+	/* Remove from taken list */
+	list_del(&blk->list);
+
+	/* Get size of freed block */
+	size = blk->size;
+	attach_free_block(info, blk);
+
+	return size;
+}
+
+int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats)
+{
+	rh_block_t *blk;
+	struct list_head *l;
+	struct list_head *h;
+	int nr;
+
+	switch (what) {
+
+	case RHGS_FREE:
+		h = &info->free_list;
+		break;
+
+	case RHGS_TAKEN:
+		h = &info->taken_list;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	/* Linear search for block */
+	nr = 0;
+	list_for_each(l, h) {
+		blk = list_entry(l, rh_block_t, list);
+		if (stats != NULL && nr < max_stats) {
+			stats->start = blk->start;
+			stats->size = blk->size;
+			stats->owner = blk->owner;
+			stats++;
+		}
+		nr++;
+	}
+
+	return nr;
+}
+
+int rh_set_owner(rh_info_t * info, void *start, const char *owner)
+{
+	rh_block_t *blk, *blk2;
+	struct list_head *l;
+	int size;
+
+	/* Linear search for block */
+	blk = NULL;
+	list_for_each(l, &info->taken_list) {
+		blk2 = list_entry(l, rh_block_t, list);
+		if (start < blk2->start)
+			break;
+		blk = blk2;
+	}
+
+	if (blk == NULL || start > (blk->start + blk->size))
+		return -EINVAL;
+
+	blk->owner = owner;
+	size = blk->size;
+
+	return size;
+}
+
+void rh_dump(rh_info_t * info)
+{
+	static rh_stats_t st[32];	/* XXX maximum 32 blocks */
+	int maxnr;
+	int i, nr;
+
+	maxnr = sizeof(st) / sizeof(st[0]);
+
+	printk(KERN_INFO
+	       "info @0x%p (%d slots empty / %d max)\n",
+	       info, info->empty_slots, info->max_blocks);
+
+	printk(KERN_INFO "  Free:\n");
+	nr = rh_get_stats(info, RHGS_FREE, maxnr, st);
+	if (nr > maxnr)
+		nr = maxnr;
+	for (i = 0; i < nr; i++)
+		printk(KERN_INFO
+		       "    0x%p-0x%p (%u)\n",
+		       st[i].start, (int8_t *) st[i].start + st[i].size,
+		       st[i].size);
+	printk(KERN_INFO "\n");
+
+	printk(KERN_INFO "  Taken:\n");
+	nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st);
+	if (nr > maxnr)
+		nr = maxnr;
+	for (i = 0; i < nr; i++)
+		printk(KERN_INFO
+		       "    0x%p-0x%p (%u) %s\n",
+		       st[i].start, (int8_t *) st[i].start + st[i].size,
+		       st[i].size, st[i].owner != NULL ? st[i].owner : "");
+	printk(KERN_INFO "\n");
+}
+
+void rh_dump_blk(rh_info_t * info, rh_block_t * blk)
+{
+	printk(KERN_INFO
+	       "blk @0x%p: 0x%p-0x%p (%u)\n",
+	       blk, blk->start, (int8_t *) blk->start + blk->size, blk->size);
+}
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
new file mode 100644
index 0000000..e79123d
--- /dev/null
+++ b/arch/powerpc/lib/sstep.c
@@ -0,0 +1,141 @@
+/*
+ * Single-step support.
+ *
+ * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <asm/sstep.h>
+#include <asm/processor.h>
+
+extern char system_call_common[];
+
+/* Bits in SRR1 that are copied from MSR */
+#define MSR_MASK	0xffffffff87c0ffff
+
+/*
+ * Determine whether a conditional branch instruction would branch.
+ */
+static int branch_taken(unsigned int instr, struct pt_regs *regs)
+{
+	unsigned int bo = (instr >> 21) & 0x1f;
+	unsigned int bi;
+
+	if ((bo & 4) == 0) {
+		/* decrement counter */
+		--regs->ctr;
+		if (((bo >> 1) & 1) ^ (regs->ctr == 0))
+			return 0;
+	}
+	if ((bo & 0x10) == 0) {
+		/* check bit from CR */
+		bi = (instr >> 16) & 0x1f;
+		if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
+			return 0;
+	}
+	return 1;
+}
+
+/*
+ * Emulate instructions that cause a transfer of control.
+ * Returns 1 if the step was emulated, 0 if not,
+ * or -1 if the instruction is one that should not be stepped,
+ * such as an rfid, or a mtmsrd that would clear MSR_RI.
+ */
+int emulate_step(struct pt_regs *regs, unsigned int instr)
+{
+	unsigned int opcode, rd;
+	unsigned long int imm;
+
+	opcode = instr >> 26;
+	switch (opcode) {
+	case 16:	/* bc */
+		imm = (signed short)(instr & 0xfffc);
+		if ((instr & 2) == 0)
+			imm += regs->nip;
+		regs->nip += 4;
+		if ((regs->msr & MSR_SF) == 0)
+			regs->nip &= 0xffffffffUL;
+		if (instr & 1)
+			regs->link = regs->nip;
+		if (branch_taken(instr, regs))
+			regs->nip = imm;
+		return 1;
+	case 17:	/* sc */
+		/*
+		 * N.B. this uses knowledge about how the syscall
+		 * entry code works.  If that is changed, this will
+		 * need to be changed also.
+		 */
+		regs->gpr[9] = regs->gpr[13];
+		regs->gpr[11] = regs->nip + 4;
+		regs->gpr[12] = regs->msr & MSR_MASK;
+		regs->gpr[13] = (unsigned long) get_paca();
+		regs->nip = (unsigned long) &system_call_common;
+		regs->msr = MSR_KERNEL;
+		return 1;
+	case 18:	/* b */
+		imm = instr & 0x03fffffc;
+		if (imm & 0x02000000)
+			imm -= 0x04000000;
+		if ((instr & 2) == 0)
+			imm += regs->nip;
+		if (instr & 1) {
+			regs->link = regs->nip + 4;
+			if ((regs->msr & MSR_SF) == 0)
+				regs->link &= 0xffffffffUL;
+		}
+		if ((regs->msr & MSR_SF) == 0)
+			imm &= 0xffffffffUL;
+		regs->nip = imm;
+		return 1;
+	case 19:
+		switch (instr & 0x7fe) {
+		case 0x20:	/* bclr */
+		case 0x420:	/* bcctr */
+			imm = (instr & 0x400)? regs->ctr: regs->link;
+			regs->nip += 4;
+			if ((regs->msr & MSR_SF) == 0) {
+				regs->nip &= 0xffffffffUL;
+				imm &= 0xffffffffUL;
+			}
+			if (instr & 1)
+				regs->link = regs->nip;
+			if (branch_taken(instr, regs))
+				regs->nip = imm;
+			return 1;
+		case 0x24:	/* rfid, scary */
+			return -1;
+		}
+	case 31:
+		rd = (instr >> 21) & 0x1f;
+		switch (instr & 0x7fe) {
+		case 0xa6:	/* mfmsr */
+			regs->gpr[rd] = regs->msr & MSR_MASK;
+			regs->nip += 4;
+			if ((regs->msr & MSR_SF) == 0)
+				regs->nip &= 0xffffffffUL;
+			return 1;
+		case 0x164:	/* mtmsrd */
+			/* only MSR_EE and MSR_RI get changed if bit 15 set */
+			/* mtmsrd doesn't change MSR_HV and MSR_ME */
+			imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL;
+			imm = (regs->msr & MSR_MASK & ~imm)
+				| (regs->gpr[rd] & imm);
+			if ((imm & MSR_RI) == 0)
+				/* can't step mtmsrd that would clear MSR_RI */
+				return -1;
+			regs->msr = imm;
+			regs->nip += 4;
+			if ((imm & MSR_SF) == 0)
+				regs->nip &= 0xffffffffUL;
+			return 1;
+		}
+	}
+	return 0;
+}
diff --git a/arch/powerpc/lib/strcase.c b/arch/powerpc/lib/strcase.c
new file mode 100644
index 0000000..36b5210
--- /dev/null
+++ b/arch/powerpc/lib/strcase.c
@@ -0,0 +1,23 @@
+#include <linux/ctype.h>
+
+int strcasecmp(const char *s1, const char *s2)
+{
+	int c1, c2;
+
+	do {
+		c1 = tolower(*s1++);
+		c2 = tolower(*s2++);
+	} while (c1 == c2 && c1 != 0);
+	return c1 - c2;
+}
+
+int strncasecmp(const char *s1, const char *s2, int n)
+{
+	int c1, c2;
+
+	do {
+		c1 = tolower(*s1++);
+		c2 = tolower(*s2++);
+	} while ((--n > 0) && c1 == c2 && c1 != 0);
+	return c1 - c2;
+}
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
new file mode 100644
index 0000000..15d40e9e
--- /dev/null
+++ b/arch/powerpc/lib/string.S
@@ -0,0 +1,203 @@
+/*
+ * String handling functions for PowerPC.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/errno.h>
+#include <asm/ppc_asm.h>
+
+	.text
+	.stabs	"arch/powerpc/lib/",N_SO,0,0,0f
+	.stabs	"string.S",N_SO,0,0,0f
+0:
+
+	.section __ex_table,"a"
+#ifdef CONFIG_PPC64
+	.align	3
+#define EXTBL	.llong
+#else
+	.align	2
+#define EXTBL	.long
+#endif
+	.text
+	
+_GLOBAL(strcpy)
+	addi	r5,r3,-1
+	addi	r4,r4,-1
+1:	lbzu	r0,1(r4)
+	cmpwi	0,r0,0
+	stbu	r0,1(r5)
+	bne	1b
+	blr
+
+/* This clears out any unused part of the destination buffer,
+   just as the libc version does.  -- paulus */
+_GLOBAL(strncpy)
+	cmpwi	0,r5,0
+	beqlr
+	mtctr	r5
+	addi	r6,r3,-1
+	addi	r4,r4,-1
+1:	lbzu	r0,1(r4)
+	cmpwi	0,r0,0
+	stbu	r0,1(r6)
+	bdnzf	2,1b		/* dec ctr, branch if ctr != 0 && !cr0.eq */
+	bnelr			/* if we didn't hit a null char, we're done */
+	mfctr	r5
+	cmpwi	0,r5,0		/* any space left in destination buffer? */
+	beqlr			/* we know r0 == 0 here */
+2:	stbu	r0,1(r6)	/* clear it out if so */
+	bdnz	2b
+	blr
+
+_GLOBAL(strcat)
+	addi	r5,r3,-1
+	addi	r4,r4,-1
+1:	lbzu	r0,1(r5)
+	cmpwi	0,r0,0
+	bne	1b
+	addi	r5,r5,-1
+1:	lbzu	r0,1(r4)
+	cmpwi	0,r0,0
+	stbu	r0,1(r5)
+	bne	1b
+	blr
+
+_GLOBAL(strcmp)
+	addi	r5,r3,-1
+	addi	r4,r4,-1
+1:	lbzu	r3,1(r5)
+	cmpwi	1,r3,0
+	lbzu	r0,1(r4)
+	subf.	r3,r0,r3
+	beqlr	1
+	beq	1b
+	blr
+
+_GLOBAL(strlen)
+	addi	r4,r3,-1
+1:	lbzu	r0,1(r4)
+	cmpwi	0,r0,0
+	bne	1b
+	subf	r3,r3,r4
+	blr
+
+_GLOBAL(memcmp)
+	cmpwi	0,r5,0
+	ble-	2f
+	mtctr	r5
+	addi	r6,r3,-1
+	addi	r4,r4,-1
+1:	lbzu	r3,1(r6)
+	lbzu	r0,1(r4)
+	subf.	r3,r0,r3
+	bdnzt	2,1b
+	blr
+2:	li	r3,0
+	blr
+
+_GLOBAL(memchr)
+	cmpwi	0,r5,0
+	ble-	2f
+	mtctr	r5
+	addi	r3,r3,-1
+1:	lbzu	r0,1(r3)
+	cmpw	0,r0,r4
+	bdnzf	2,1b
+	beqlr
+2:	li	r3,0
+	blr
+
+_GLOBAL(__clear_user)
+	addi	r6,r3,-4
+	li	r3,0
+	li	r5,0
+	cmplwi	0,r4,4
+	blt	7f
+	/* clear a single word */
+11:	stwu	r5,4(r6)
+	beqlr
+	/* clear word sized chunks */
+	andi.	r0,r6,3
+	add	r4,r0,r4
+	subf	r6,r0,r6
+	srwi	r0,r4,2
+	andi.	r4,r4,3
+	mtctr	r0
+	bdz	7f
+1:	stwu	r5,4(r6)
+	bdnz	1b
+	/* clear byte sized chunks */
+7:	cmpwi	0,r4,0
+	beqlr
+	mtctr	r4
+	addi	r6,r6,3
+8:	stbu	r5,1(r6)
+	bdnz	8b
+	blr
+90:	mr	r3,r4
+	blr
+91:	mfctr	r3
+	slwi	r3,r3,2
+	add	r3,r3,r4
+	blr
+92:	mfctr	r3
+	blr
+
+	.section __ex_table,"a"
+	EXTBL	11b,90b
+	EXTBL	1b,91b
+	EXTBL	8b,92b
+	.text
+
+_GLOBAL(__strncpy_from_user)
+	addi	r6,r3,-1
+	addi	r4,r4,-1
+	cmpwi	0,r5,0
+	beq	2f
+	mtctr	r5
+1:	lbzu	r0,1(r4)
+	cmpwi	0,r0,0
+	stbu	r0,1(r6)
+	bdnzf	2,1b		/* dec ctr, branch if ctr != 0 && !cr0.eq */
+	beq	3f
+2:	addi	r6,r6,1
+3:	subf	r3,r3,r6
+	blr
+99:	li	r3,-EFAULT
+	blr
+
+	.section __ex_table,"a"
+	EXTBL	1b,99b
+	.text
+
+/* r3 = str, r4 = len (> 0), r5 = top (highest addr) */
+_GLOBAL(__strnlen_user)
+	addi	r7,r3,-1
+	subf	r6,r7,r5	/* top+1 - str */
+	cmplw	0,r4,r6
+	bge	0f
+	mr	r6,r4
+0:	mtctr	r6		/* ctr = min(len, top - str) */
+1:	lbzu	r0,1(r7)	/* get next byte */
+	cmpwi	0,r0,0
+	bdnzf	2,1b		/* loop if --ctr != 0 && byte != 0 */
+	addi	r7,r7,1
+	subf	r3,r3,r7	/* number of bytes we have looked at */
+	beqlr			/* return if we found a 0 byte */
+	cmpw	0,r3,r4		/* did we look at all len bytes? */
+	blt	99f		/* if not, must have hit top */
+	addi	r3,r4,1		/* return len + 1 to indicate no null found */
+	blr
+99:	li	r3,0		/* bad address, return 0 */
+	blr
+
+	.section __ex_table,"a"
+	EXTBL	1b,99b
diff --git a/arch/powerpc/lib/usercopy.c b/arch/powerpc/lib/usercopy.c
new file mode 100644
index 0000000..5eea6f3
--- /dev/null
+++ b/arch/powerpc/lib/usercopy.c
@@ -0,0 +1,41 @@
+/*
+ * Functions which are too large to be inlined.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	if (likely(access_ok(VERIFY_READ, from, n)))
+		n = __copy_from_user(to, from, n);
+	else
+		memset(to, 0, n);
+	return n;
+}
+
+unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	if (likely(access_ok(VERIFY_WRITE, to, n)))
+		n = __copy_to_user(to, from, n);
+	return n;
+}
+
+unsigned long copy_in_user(void __user *to, const void __user *from,
+			   unsigned long n)
+{
+	might_sleep();
+	if (likely(access_ok(VERIFY_READ, from, n) &&
+	    access_ok(VERIFY_WRITE, to, n)))
+		n =__copy_tofrom_user(to, from, n);
+	return n;
+}
+
+EXPORT_SYMBOL(copy_from_user);
+EXPORT_SYMBOL(copy_to_user);
+EXPORT_SYMBOL(copy_in_user);
+
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
new file mode 100644
index 0000000..3d79ce2
--- /dev/null
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -0,0 +1,120 @@
+/*
+ * Modifications by Matt Porter (mporter@mvista.com) to support
+ * PPC44x Book E processors.
+ *
+ * This file contains the routines for initializing the MMU
+ * on the 4xx series of chips.
+ *  -- paulus
+ *
+ *  Derived from arch/ppc/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/highmem.h>
+
+#include <asm/pgalloc.h>
+#include <asm/prom.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include <asm/bootx.h>
+#include <asm/machdep.h>
+#include <asm/setup.h>
+
+#include "mmu_decl.h"
+
+extern char etext[], _stext[];
+
+/* Used by the 44x TLB replacement exception handler.
+ * Just needed it declared someplace.
+ */
+unsigned int tlb_44x_index = 0;
+unsigned int tlb_44x_hwater = 62;
+
+/*
+ * "Pins" a 256MB TLB entry in AS0 for kernel lowmem
+ */
+static void __init
+ppc44x_pin_tlb(int slot, unsigned int virt, unsigned int phys)
+{
+	unsigned long attrib = 0;
+
+	__asm__ __volatile__("\
+	clrrwi	%2,%2,10\n\
+	ori	%2,%2,%4\n\
+	clrrwi	%1,%1,10\n\
+	li	%0,0\n\
+	ori	%0,%0,%5\n\
+	tlbwe	%2,%3,%6\n\
+	tlbwe	%1,%3,%7\n\
+	tlbwe	%0,%3,%8"
+	:
+	: "r" (attrib), "r" (phys), "r" (virt), "r" (slot),
+	  "i" (PPC44x_TLB_VALID | PPC44x_TLB_256M),
+	  "i" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
+	  "i" (PPC44x_TLB_PAGEID),
+	  "i" (PPC44x_TLB_XLAT),
+	  "i" (PPC44x_TLB_ATTRIB));
+}
+
+/*
+ * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ */
+void __init MMU_init_hw(void)
+{
+	flush_instruction_cache();
+}
+
+unsigned long __init mmu_mapin_ram(void)
+{
+	unsigned int pinned_tlbs = 1;
+	int i;
+
+	/* Determine number of entries necessary to cover lowmem */
+	pinned_tlbs = (unsigned int)
+		(_ALIGN(total_lowmem, PPC44x_PIN_SIZE) >> PPC44x_PIN_SHIFT);
+
+	/* Write upper watermark to save location */
+	tlb_44x_hwater = PPC44x_LOW_SLOT - pinned_tlbs;
+
+	/* If necessary, set additional pinned TLBs */
+	if (pinned_tlbs > 1)
+		for (i = (PPC44x_LOW_SLOT-(pinned_tlbs-1)); i < PPC44x_LOW_SLOT; i++) {
+			unsigned int phys_addr = (PPC44x_LOW_SLOT-i) * PPC44x_PIN_SIZE;
+			ppc44x_pin_tlb(i, phys_addr+PAGE_OFFSET, phys_addr);
+		}
+
+	return total_lowmem;
+}
diff --git a/arch/powerpc/mm/4xx_mmu.c b/arch/powerpc/mm/4xx_mmu.c
new file mode 100644
index 0000000..b7bcbc2
--- /dev/null
+++ b/arch/powerpc/mm/4xx_mmu.c
@@ -0,0 +1,141 @@
+/*
+ * This file contains the routines for initializing the MMU
+ * on the 4xx series of chips.
+ *  -- paulus
+ *
+ *  Derived from arch/ppc/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/highmem.h>
+
+#include <asm/pgalloc.h>
+#include <asm/prom.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include <asm/bootx.h>
+#include <asm/machdep.h>
+#include <asm/setup.h>
+#include "mmu_decl.h"
+
+extern int __map_without_ltlbs;
+/*
+ * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ */
+void __init MMU_init_hw(void)
+{
+	/*
+	 * The Zone Protection Register (ZPR) defines how protection will
+	 * be applied to every page which is a member of a given zone. At
+	 * present, we utilize only two of the 4xx's zones.
+	 * The zone index bits (of ZSEL) in the PTE are used for software
+	 * indicators, except the LSB.  For user access, zone 1 is used,
+	 * for kernel access, zone 0 is used.  We set all but zone 1
+	 * to zero, allowing only kernel access as indicated in the PTE.
+	 * For zone 1, we set a 01 binary (a value of 10 will not work)
+	 * to allow user access as indicated in the PTE.  This also allows
+	 * kernel access as indicated in the PTE.
+	 */
+
+        mtspr(SPRN_ZPR, 0x10000000);
+
+	flush_instruction_cache();
+
+	/*
+	 * Set up the real-mode cache parameters for the exception vector
+	 * handlers (which are run in real-mode).
+	 */
+
+        mtspr(SPRN_DCWR, 0x00000000);	/* All caching is write-back */
+
+        /*
+	 * Cache instruction and data space where the exception
+	 * vectors and the kernel live in real-mode.
+	 */
+
+        mtspr(SPRN_DCCR, 0xF0000000);	/* 512 MB of data space at 0x0. */
+        mtspr(SPRN_ICCR, 0xF0000000);	/* 512 MB of instr. space at 0x0. */
+}
+
+#define LARGE_PAGE_SIZE_16M	(1<<24)
+#define LARGE_PAGE_SIZE_4M	(1<<22)
+
+unsigned long __init mmu_mapin_ram(void)
+{
+	unsigned long v, s;
+	phys_addr_t p;
+
+	v = KERNELBASE;
+	p = PPC_MEMSTART;
+	s = 0;
+
+	if (__map_without_ltlbs) {
+		return s;
+	}
+
+	while (s <= (total_lowmem - LARGE_PAGE_SIZE_16M)) {
+		pmd_t *pmdp;
+		unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
+
+		spin_lock(&init_mm.page_table_lock);
+		pmdp = pmd_offset(pgd_offset_k(v), v);
+		pmd_val(*pmdp++) = val;
+		pmd_val(*pmdp++) = val;
+		pmd_val(*pmdp++) = val;
+		pmd_val(*pmdp++) = val;
+		spin_unlock(&init_mm.page_table_lock);
+
+		v += LARGE_PAGE_SIZE_16M;
+		p += LARGE_PAGE_SIZE_16M;
+		s += LARGE_PAGE_SIZE_16M;
+	}
+
+	while (s <= (total_lowmem - LARGE_PAGE_SIZE_4M)) {
+		pmd_t *pmdp;
+		unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
+
+		spin_lock(&init_mm.page_table_lock);
+		pmdp = pmd_offset(pgd_offset_k(v), v);
+		pmd_val(*pmdp) = val;
+		spin_unlock(&init_mm.page_table_lock);
+
+		v += LARGE_PAGE_SIZE_4M;
+		p += LARGE_PAGE_SIZE_4M;
+		s += LARGE_PAGE_SIZE_4M;
+	}
+
+	return s;
+}
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
new file mode 100644
index 0000000..9f52c26
--- /dev/null
+++ b/arch/powerpc/mm/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the linux ppc-specific parts of the memory manager.
+#
+
+obj-y				:= fault.o mem.o
+obj-$(CONFIG_PPC32)		+= init.o pgtable.o mmu_context.o \
+				   mem_pieces.o tlb.o
+obj-$(CONFIG_PPC64)		+= init64.o pgtable64.o mmu_context64.o
+obj-$(CONFIG_PPC_STD_MMU_32)	+= ppc_mmu.o hash_32.o
+obj-$(CONFIG_40x)		+= 4xx_mmu.o
+obj-$(CONFIG_44x)		+= 44x_mmu.o
+obj-$(CONFIG_FSL_BOOKE)		+= fsl_booke_mmu.o
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
new file mode 100644
index 0000000..3df641f
--- /dev/null
+++ b/arch/powerpc/mm/fault.c
@@ -0,0 +1,391 @@
+/*
+ *  arch/ppc/mm/fault.c
+ *
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Derived from "arch/i386/mm/fault.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  Modified by Cort Dougan and Paul Mackerras.
+ *
+ *  Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/kprobes.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/mmu_context.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/tlbflush.h>
+#include <asm/kdebug.h>
+#include <asm/siginfo.h>
+
+/*
+ * Check whether the instruction at regs->nip is a store using
+ * an update addressing form which will update r1.
+ */
+static int store_updates_sp(struct pt_regs *regs)
+{
+	unsigned int inst;
+
+	if (get_user(inst, (unsigned int __user *)regs->nip))
+		return 0;
+	/* check for 1 in the rA field */
+	if (((inst >> 16) & 0x1f) != 1)
+		return 0;
+	/* check major opcode */
+	switch (inst >> 26) {
+	case 37:	/* stwu */
+	case 39:	/* stbu */
+	case 45:	/* sthu */
+	case 53:	/* stfsu */
+	case 55:	/* stfdu */
+		return 1;
+	case 62:	/* std or stdu */
+		return (inst & 3) == 1;
+	case 31:
+		/* check minor opcode */
+		switch ((inst >> 1) & 0x3ff) {
+		case 181:	/* stdux */
+		case 183:	/* stwux */
+		case 247:	/* stbux */
+		case 439:	/* sthux */
+		case 695:	/* stfsux */
+		case 759:	/* stfdux */
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static void do_dabr(struct pt_regs *regs, unsigned long error_code)
+{
+	siginfo_t info;
+
+	if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
+			11, SIGSEGV) == NOTIFY_STOP)
+		return;
+
+	if (debugger_dabr_match(regs))
+		return;
+
+	/* Clear the DABR */
+	set_dabr(0);
+
+	/* Deliver the signal to userspace */
+	info.si_signo = SIGTRAP;
+	info.si_errno = 0;
+	info.si_code = TRAP_HWBKPT;
+	info.si_addr = (void __user *)regs->nip;
+	force_sig_info(SIGTRAP, &info, current);
+}
+
+/*
+ * For 600- and 800-family processors, the error_code parameter is DSISR
+ * for a data fault, SRR1 for an instruction fault. For 400-family processors
+ * the error_code parameter is ESR for a data fault, 0 for an instruction
+ * fault.
+ * For 64-bit processors, the error_code parameter is
+ *  - DSISR for a non-SLB data access fault,
+ *  - SRR1 & 0x08000000 for a non-SLB instruction access fault
+ *  - 0 any SLB fault.
+ *
+ * The return value is 0 if the fault was handled, or the signal
+ * number if this is a kernel fault that can't be handled here.
+ */
+int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
+			    unsigned long error_code)
+{
+	struct vm_area_struct * vma;
+	struct mm_struct *mm = current->mm;
+	siginfo_t info;
+	int code = SEGV_MAPERR;
+	int is_write = 0;
+	int trap = TRAP(regs);
+ 	int is_exec = trap == 0x400;
+
+#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
+	/*
+	 * Fortunately the bit assignments in SRR1 for an instruction
+	 * fault and DSISR for a data fault are mostly the same for the
+	 * bits we are interested in.  But there are some bits which
+	 * indicate errors in DSISR but can validly be set in SRR1.
+	 */
+	if (trap == 0x400)
+		error_code &= 0x48200000;
+	else
+		is_write = error_code & DSISR_ISSTORE;
+#else
+	is_write = error_code & ESR_DST;
+#endif /* CONFIG_4xx || CONFIG_BOOKE */
+
+	if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, error_code,
+				11, SIGSEGV) == NOTIFY_STOP)
+		return 0;
+
+	if (trap == 0x300) {
+		if (debugger_fault_handler(regs))
+			return 0;
+	}
+
+	/* On a kernel SLB miss we can only check for a valid exception entry */
+	if (!user_mode(regs) && (address >= TASK_SIZE))
+		return SIGSEGV;
+
+#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
+  	if (error_code & DSISR_DABRMATCH) {
+		/* DABR match */
+		do_dabr(regs, error_code);
+		return 0;
+	}
+#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
+
+	if (in_atomic() || mm == NULL) {
+		if (!user_mode(regs))
+			return SIGSEGV;
+		/* in_atomic() in user mode is really bad,
+		   as is current->mm == NULL. */
+		printk(KERN_EMERG "Page fault in user mode with"
+		       "in_atomic() = %d mm = %p\n", in_atomic(), mm);
+		printk(KERN_EMERG "NIP = %lx  MSR = %lx\n",
+		       regs->nip, regs->msr);
+		die("Weird page fault", regs, SIGSEGV);
+	}
+
+	/* When running in the kernel we expect faults to occur only to
+	 * addresses in user space.  All other faults represent errors in the
+	 * kernel and should generate an OOPS.  Unfortunatly, in the case of an
+	 * erroneous fault occuring in a code path which already holds mmap_sem
+	 * we will deadlock attempting to validate the fault against the
+	 * address space.  Luckily the kernel only validly references user
+	 * space from well defined areas of code, which are listed in the
+	 * exceptions table.
+	 *
+	 * As the vast majority of faults will be valid we will only perform
+	 * the source reference check when there is a possibilty of a deadlock.
+	 * Attempt to lock the address space, if we cannot we then validate the
+	 * source.  If this is invalid we can skip the address space check,
+	 * thus avoiding the deadlock.
+	 */
+	if (!down_read_trylock(&mm->mmap_sem)) {
+		if (!user_mode(regs) && !search_exception_tables(regs->nip))
+			goto bad_area_nosemaphore;
+
+		down_read(&mm->mmap_sem);
+	}
+
+	vma = find_vma(mm, address);
+	if (!vma)
+		goto bad_area;
+	if (vma->vm_start <= address)
+		goto good_area;
+	if (!(vma->vm_flags & VM_GROWSDOWN))
+		goto bad_area;
+
+	/*
+	 * N.B. The POWER/Open ABI allows programs to access up to
+	 * 288 bytes below the stack pointer.
+	 * The kernel signal delivery code writes up to about 1.5kB
+	 * below the stack pointer (r1) before decrementing it.
+	 * The exec code can write slightly over 640kB to the stack
+	 * before setting the user r1.  Thus we allow the stack to
+	 * expand to 1MB without further checks.
+	 */
+	if (address + 0x100000 < vma->vm_end) {
+		/* get user regs even if this fault is in kernel mode */
+		struct pt_regs *uregs = current->thread.regs;
+		if (uregs == NULL)
+			goto bad_area;
+
+		/*
+		 * A user-mode access to an address a long way below
+		 * the stack pointer is only valid if the instruction
+		 * is one which would update the stack pointer to the
+		 * address accessed if the instruction completed,
+		 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
+		 * (or the byte, halfword, float or double forms).
+		 *
+		 * If we don't check this then any write to the area
+		 * between the last mapped region and the stack will
+		 * expand the stack rather than segfaulting.
+		 */
+		if (address + 2048 < uregs->gpr[1]
+		    && (!user_mode(regs) || !store_updates_sp(regs)))
+			goto bad_area;
+	}
+	if (expand_stack(vma, address))
+		goto bad_area;
+
+good_area:
+	code = SEGV_ACCERR;
+#if defined(CONFIG_6xx)
+	if (error_code & 0x95700000)
+		/* an error such as lwarx to I/O controller space,
+		   address matching DABR, eciwx, etc. */
+		goto bad_area;
+#endif /* CONFIG_6xx */
+#if defined(CONFIG_8xx)
+        /* The MPC8xx seems to always set 0x80000000, which is
+         * "undefined".  Of those that can be set, this is the only
+         * one which seems bad.
+         */
+	if (error_code & 0x10000000)
+                /* Guarded storage error. */
+		goto bad_area;
+#endif /* CONFIG_8xx */
+
+	if (is_exec) {
+#ifdef CONFIG_PPC64
+		/* protection fault */
+		if (error_code & DSISR_PROTFAULT)
+			goto bad_area;
+		if (!(vma->vm_flags & VM_EXEC))
+			goto bad_area;
+#endif
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+		pte_t *ptep;
+
+		/* Since 4xx/Book-E supports per-page execute permission,
+		 * we lazily flush dcache to icache. */
+		ptep = NULL;
+		if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) {
+			struct page *page = pte_page(*ptep);
+
+			if (! test_bit(PG_arch_1, &page->flags)) {
+				flush_dcache_icache_page(page);
+				set_bit(PG_arch_1, &page->flags);
+			}
+			pte_update(ptep, 0, _PAGE_HWEXEC);
+			_tlbie(address);
+			pte_unmap(ptep);
+			up_read(&mm->mmap_sem);
+			return 0;
+		}
+		if (ptep != NULL)
+			pte_unmap(ptep);
+#endif
+	/* a write */
+	} else if (is_write) {
+		if (!(vma->vm_flags & VM_WRITE))
+			goto bad_area;
+	/* a read */
+	} else {
+		/* protection fault */
+		if (error_code & 0x08000000)
+			goto bad_area;
+		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+			goto bad_area;
+	}
+
+	/*
+	 * If for any reason at all we couldn't handle the fault,
+	 * make sure we exit gracefully rather than endlessly redo
+	 * the fault.
+	 */
+ survive:
+	switch (handle_mm_fault(mm, vma, address, is_write)) {
+
+	case VM_FAULT_MINOR:
+		current->min_flt++;
+		break;
+	case VM_FAULT_MAJOR:
+		current->maj_flt++;
+		break;
+	case VM_FAULT_SIGBUS:
+		goto do_sigbus;
+	case VM_FAULT_OOM:
+		goto out_of_memory;
+	default:
+		BUG();
+	}
+
+	up_read(&mm->mmap_sem);
+	return 0;
+
+bad_area:
+	up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+	/* User mode accesses cause a SIGSEGV */
+	if (user_mode(regs)) {
+		_exception(SIGSEGV, regs, code, address);
+		return 0;
+	}
+
+	if (is_exec && (error_code & DSISR_PROTFAULT)
+	    && printk_ratelimit())
+		printk(KERN_CRIT "kernel tried to execute NX-protected"
+		       " page (%lx) - exploit attempt? (uid: %d)\n",
+		       address, current->uid);
+
+	return SIGSEGV;
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+	up_read(&mm->mmap_sem);
+	if (current->pid == 1) {
+		yield();
+		down_read(&mm->mmap_sem);
+		goto survive;
+	}
+	printk("VM: killing process %s\n", current->comm);
+	if (user_mode(regs))
+		do_exit(SIGKILL);
+	return SIGKILL;
+
+do_sigbus:
+	up_read(&mm->mmap_sem);
+	if (user_mode(regs)) {
+		info.si_signo = SIGBUS;
+		info.si_errno = 0;
+		info.si_code = BUS_ADRERR;
+		info.si_addr = (void __user *)address;
+		force_sig_info(SIGBUS, &info, current);
+		return 0;
+	}
+	return SIGBUS;
+}
+
+/*
+ * bad_page_fault is called when we have a bad access from the kernel.
+ * It is called from the DSI and ISI handlers in head.S and from some
+ * of the procedures in traps.c.
+ */
+void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
+{
+	const struct exception_table_entry *entry;
+
+	/* Are we prepared to handle this fault?  */
+	if ((entry = search_exception_tables(regs->nip)) != NULL) {
+		regs->nip = entry->fixup;
+		return;
+	}
+
+	/* kernel has accessed a bad area */
+	die("Kernel access of bad area", regs, sig);
+}
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
new file mode 100644
index 0000000..af9ca0e
--- /dev/null
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -0,0 +1,237 @@
+/*
+ * Modifications by Kumar Gala (kumar.gala@freescale.com) to support
+ * E500 Book E processors.
+ *
+ * Copyright 2004 Freescale Semiconductor, Inc
+ *
+ * This file contains the routines for initializing the MMU
+ * on the 4xx series of chips.
+ *  -- paulus
+ *
+ *  Derived from arch/ppc/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/highmem.h>
+
+#include <asm/pgalloc.h>
+#include <asm/prom.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include <asm/bootx.h>
+#include <asm/machdep.h>
+#include <asm/setup.h>
+
+extern void loadcam_entry(unsigned int index);
+unsigned int tlbcam_index;
+unsigned int num_tlbcam_entries;
+static unsigned long __cam0, __cam1, __cam2;
+extern unsigned long total_lowmem;
+extern unsigned long __max_low_memory;
+#define MAX_LOW_MEM	CONFIG_LOWMEM_SIZE
+
+#define NUM_TLBCAMS	(16)
+
+struct tlbcam {
+   	u32	MAS0;
+	u32	MAS1;
+	u32	MAS2;
+	u32	MAS3;
+	u32	MAS7;
+} TLBCAM[NUM_TLBCAMS];
+
+struct tlbcamrange {
+   	unsigned long start;
+	unsigned long limit;
+	phys_addr_t phys;
+} tlbcam_addrs[NUM_TLBCAMS];
+
+extern unsigned int tlbcam_index;
+
+/*
+ * Return PA for this VA if it is mapped by a CAM, or 0
+ */
+unsigned long v_mapped_by_tlbcam(unsigned long va)
+{
+	int b;
+	for (b = 0; b < tlbcam_index; ++b)
+		if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit)
+			return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start);
+	return 0;
+}
+
+/*
+ * Return VA for a given PA or 0 if not mapped
+ */
+unsigned long p_mapped_by_tlbcam(unsigned long pa)
+{
+	int b;
+	for (b = 0; b < tlbcam_index; ++b)
+		if (pa >= tlbcam_addrs[b].phys
+	    	    && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start)
+		              +tlbcam_addrs[b].phys)
+			return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys);
+	return 0;
+}
+
+/*
+ * Set up one of the I/D BAT (block address translation) register pairs.
+ * The parameters are not checked; in particular size must be a power
+ * of 4 between 4k and 256M.
+ */
+void settlbcam(int index, unsigned long virt, phys_addr_t phys,
+		unsigned int size, int flags, unsigned int pid)
+{
+	unsigned int tsize, lz;
+
+	asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size));
+	tsize = (21 - lz) / 2;
+
+#ifdef CONFIG_SMP
+	if ((flags & _PAGE_NO_CACHE) == 0)
+		flags |= _PAGE_COHERENT;
+#endif
+
+	TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1);
+	TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid);
+	TLBCAM[index].MAS2 = virt & PAGE_MASK;
+
+	TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0;
+	TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0;
+	TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0;
+	TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0;
+	TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
+
+	TLBCAM[index].MAS3 = (phys & PAGE_MASK) | MAS3_SX | MAS3_SR;
+	TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0);
+
+#ifndef CONFIG_KGDB /* want user access for breakpoints */
+	if (flags & _PAGE_USER) {
+	   TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
+	   TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
+	}
+#else
+	TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
+	TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
+#endif
+
+	tlbcam_addrs[index].start = virt;
+	tlbcam_addrs[index].limit = virt + size - 1;
+	tlbcam_addrs[index].phys = phys;
+
+	loadcam_entry(index);
+}
+
+void invalidate_tlbcam_entry(int index)
+{
+	TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index);
+	TLBCAM[index].MAS1 = ~MAS1_VALID;
+
+	loadcam_entry(index);
+}
+
+void __init cam_mapin_ram(unsigned long cam0, unsigned long cam1,
+		unsigned long cam2)
+{
+	settlbcam(0, KERNELBASE, PPC_MEMSTART, cam0, _PAGE_KERNEL, 0);
+	tlbcam_index++;
+	if (cam1) {
+		tlbcam_index++;
+		settlbcam(1, KERNELBASE+cam0, PPC_MEMSTART+cam0, cam1, _PAGE_KERNEL, 0);
+	}
+	if (cam2) {
+		tlbcam_index++;
+		settlbcam(2, KERNELBASE+cam0+cam1, PPC_MEMSTART+cam0+cam1, cam2, _PAGE_KERNEL, 0);
+	}
+}
+
+/*
+ * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ */
+void __init MMU_init_hw(void)
+{
+	flush_instruction_cache();
+}
+
+unsigned long __init mmu_mapin_ram(void)
+{
+	cam_mapin_ram(__cam0, __cam1, __cam2);
+
+	return __cam0 + __cam1 + __cam2;
+}
+
+
+void __init
+adjust_total_lowmem(void)
+{
+	unsigned long max_low_mem = MAX_LOW_MEM;
+	unsigned long cam_max = 0x10000000;
+	unsigned long ram;
+
+	/* adjust CAM size to max_low_mem */
+	if (max_low_mem < cam_max)
+		cam_max = max_low_mem;
+
+	/* adjust lowmem size to max_low_mem */
+	if (max_low_mem < total_lowmem)
+		ram = max_low_mem;
+	else
+		ram = total_lowmem;
+
+	/* Calculate CAM values */
+	__cam0 = 1UL << 2 * (__ilog2(ram) / 2);
+	if (__cam0 > cam_max)
+		__cam0 = cam_max;
+	ram -= __cam0;
+	if (ram) {
+		__cam1 = 1UL << 2 * (__ilog2(ram) / 2);
+		if (__cam1 > cam_max)
+			__cam1 = cam_max;
+		ram -= __cam1;
+	}
+	if (ram) {
+		__cam2 = 1UL << 2 * (__ilog2(ram) / 2);
+		if (__cam2 > cam_max)
+			__cam2 = cam_max;
+		ram -= __cam2;
+	}
+
+	printk(KERN_INFO "Memory CAM mapping: CAM0=%ldMb, CAM1=%ldMb,"
+			" CAM2=%ldMb residual: %ldMb\n",
+			__cam0 >> 20, __cam1 >> 20, __cam2 >> 20,
+			(total_lowmem - __cam0 - __cam1 - __cam2) >> 20);
+	__max_low_memory = max_low_mem = __cam0 + __cam1 + __cam2;
+}
diff --git a/arch/powerpc/mm/hash_32.S b/arch/powerpc/mm/hash_32.S
new file mode 100644
index 0000000..57278a8
--- /dev/null
+++ b/arch/powerpc/mm/hash_32.S
@@ -0,0 +1,618 @@
+/*
+ *  arch/ppc/kernel/hashtable.S
+ *
+ *  $Id: hashtable.S,v 1.6 1999/10/08 01:56:15 paulus Exp $
+ *
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
+ *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
+ *  Adapted for Power Macintosh by Paul Mackerras.
+ *  Low-level exception handlers and MMU support
+ *  rewritten by Paul Mackerras.
+ *    Copyright (C) 1996 Paul Mackerras.
+ *
+ *  This file contains low-level assembler routines for managing
+ *  the PowerPC MMU hash table.  (PPC 8xx processors don't use a
+ *  hash table, so this file is not used on them.)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/cputable.h>
+#include <asm/ppc_asm.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+
+#ifdef CONFIG_SMP
+	.comm	mmu_hash_lock,4
+#endif /* CONFIG_SMP */
+
+/*
+ * Sync CPUs with hash_page taking & releasing the hash
+ * table lock
+ */
+#ifdef CONFIG_SMP
+	.text
+_GLOBAL(hash_page_sync)
+	lis	r8,mmu_hash_lock@h
+	ori	r8,r8,mmu_hash_lock@l
+	lis	r0,0x0fff
+	b	10f
+11:	lwz	r6,0(r8)
+	cmpwi	0,r6,0
+	bne	11b
+10:	lwarx	r6,0,r8
+	cmpwi	0,r6,0
+	bne-	11b
+	stwcx.	r0,0,r8
+	bne-	10b
+	isync
+	eieio
+	li	r0,0
+	stw	r0,0(r8)
+	blr	
+#endif
+
+/*
+ * Load a PTE into the hash table, if possible.
+ * The address is in r4, and r3 contains an access flag:
+ * _PAGE_RW (0x400) if a write.
+ * r9 contains the SRR1 value, from which we use the MSR_PR bit.
+ * SPRG3 contains the physical address of the current task's thread.
+ *
+ * Returns to the caller if the access is illegal or there is no
+ * mapping for the address.  Otherwise it places an appropriate PTE
+ * in the hash table and returns from the exception.
+ * Uses r0, r3 - r8, ctr, lr.
+ */
+	.text
+_GLOBAL(hash_page)
+#ifdef CONFIG_PPC64BRIDGE
+	mfmsr	r0
+	clrldi	r0,r0,1		/* make sure it's in 32-bit mode */
+	MTMSRD(r0)
+	isync
+#endif
+	tophys(r7,0)			/* gets -KERNELBASE into r7 */
+#ifdef CONFIG_SMP
+	addis	r8,r7,mmu_hash_lock@h
+	ori	r8,r8,mmu_hash_lock@l
+	lis	r0,0x0fff
+	b	10f
+11:	lwz	r6,0(r8)
+	cmpwi	0,r6,0
+	bne	11b
+10:	lwarx	r6,0,r8
+	cmpwi	0,r6,0
+	bne-	11b
+	stwcx.	r0,0,r8
+	bne-	10b
+	isync
+#endif
+	/* Get PTE (linux-style) and check access */
+	lis	r0,KERNELBASE@h		/* check if kernel address */
+	cmplw	0,r4,r0
+	mfspr	r8,SPRN_SPRG3		/* current task's THREAD (phys) */
+	ori	r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
+	lwz	r5,PGDIR(r8)		/* virt page-table root */
+	blt+	112f			/* assume user more likely */
+	lis	r5,swapper_pg_dir@ha	/* if kernel address, use */
+	addi	r5,r5,swapper_pg_dir@l	/* kernel page table */
+	rlwimi	r3,r9,32-12,29,29	/* MSR_PR -> _PAGE_USER */
+112:	add	r5,r5,r7		/* convert to phys addr */
+	rlwimi	r5,r4,12,20,29		/* insert top 10 bits of address */
+	lwz	r8,0(r5)		/* get pmd entry */
+	rlwinm.	r8,r8,0,0,19		/* extract address of pte page */
+#ifdef CONFIG_SMP
+	beq-	hash_page_out		/* return if no mapping */
+#else
+	/* XXX it seems like the 601 will give a machine fault on the
+	   rfi if its alignment is wrong (bottom 4 bits of address are
+	   8 or 0xc) and we have had a not-taken conditional branch
+	   to the address following the rfi. */
+	beqlr-
+#endif
+	rlwimi	r8,r4,22,20,29		/* insert next 10 bits of address */
+	rlwinm	r0,r3,32-3,24,24	/* _PAGE_RW access -> _PAGE_DIRTY */
+	ori	r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
+
+	/*
+	 * Update the linux PTE atomically.  We do the lwarx up-front
+	 * because almost always, there won't be a permission violation
+	 * and there won't already be an HPTE, and thus we will have
+	 * to update the PTE to set _PAGE_HASHPTE.  -- paulus.
+	 */
+retry:
+	lwarx	r6,0,r8			/* get linux-style pte */
+	andc.	r5,r3,r6		/* check access & ~permission */
+#ifdef CONFIG_SMP
+	bne-	hash_page_out		/* return if access not permitted */
+#else
+	bnelr-
+#endif
+	or	r5,r0,r6		/* set accessed/dirty bits */
+	stwcx.	r5,0,r8			/* attempt to update PTE */
+	bne-	retry			/* retry if someone got there first */
+
+	mfsrin	r3,r4			/* get segment reg for segment */
+	mfctr	r0
+	stw	r0,_CTR(r11)
+	bl	create_hpte		/* add the hash table entry */
+
+#ifdef CONFIG_SMP
+	eieio
+	addis	r8,r7,mmu_hash_lock@ha
+	li	r0,0
+	stw	r0,mmu_hash_lock@l(r8)
+#endif
+
+	/* Return from the exception */
+	lwz	r5,_CTR(r11)
+	mtctr	r5
+	lwz	r0,GPR0(r11)
+	lwz	r7,GPR7(r11)
+	lwz	r8,GPR8(r11)
+	b	fast_exception_return
+
+#ifdef CONFIG_SMP
+hash_page_out:
+	eieio
+	addis	r8,r7,mmu_hash_lock@ha
+	li	r0,0
+	stw	r0,mmu_hash_lock@l(r8)
+	blr
+#endif /* CONFIG_SMP */
+
+/*
+ * Add an entry for a particular page to the hash table.
+ *
+ * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
+ *
+ * We assume any necessary modifications to the pte (e.g. setting
+ * the accessed bit) have already been done and that there is actually
+ * a hash table in use (i.e. we're not on a 603).
+ */
+_GLOBAL(add_hash_page)
+	mflr	r0
+	stw	r0,4(r1)
+
+	/* Convert context and va to VSID */
+	mulli	r3,r3,897*16		/* multiply context by context skew */
+	rlwinm	r0,r4,4,28,31		/* get ESID (top 4 bits of va) */
+	mulli	r0,r0,0x111		/* multiply by ESID skew */
+	add	r3,r3,r0		/* note create_hpte trims to 24 bits */
+
+#ifdef CONFIG_SMP
+	rlwinm	r8,r1,0,0,18		/* use cpu number to make tag */
+	lwz	r8,TI_CPU(r8)		/* to go in mmu_hash_lock */
+	oris	r8,r8,12
+#endif /* CONFIG_SMP */
+
+	/*
+	 * We disable interrupts here, even on UP, because we don't
+	 * want to race with hash_page, and because we want the
+	 * _PAGE_HASHPTE bit to be a reliable indication of whether
+	 * the HPTE exists (or at least whether one did once).
+	 * We also turn off the MMU for data accesses so that we
+	 * we can't take a hash table miss (assuming the code is
+	 * covered by a BAT).  -- paulus
+	 */
+	mfmsr	r10
+	SYNC
+	rlwinm	r0,r10,0,17,15		/* clear bit 16 (MSR_EE) */
+	rlwinm	r0,r0,0,28,26		/* clear MSR_DR */
+	mtmsr	r0
+	SYNC_601
+	isync
+
+	tophys(r7,0)
+
+#ifdef CONFIG_SMP
+	addis	r9,r7,mmu_hash_lock@ha
+	addi	r9,r9,mmu_hash_lock@l
+10:	lwarx	r0,0,r9			/* take the mmu_hash_lock */
+	cmpi	0,r0,0
+	bne-	11f
+	stwcx.	r8,0,r9
+	beq+	12f
+11:	lwz	r0,0(r9)
+	cmpi	0,r0,0
+	beq	10b
+	b	11b
+12:	isync
+#endif
+
+	/*
+	 * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
+	 * If _PAGE_HASHPTE was already set, we don't replace the existing
+	 * HPTE, so we just unlock and return.
+	 */
+	mr	r8,r5
+	rlwimi	r8,r4,22,20,29
+1:	lwarx	r6,0,r8
+	andi.	r0,r6,_PAGE_HASHPTE
+	bne	9f			/* if HASHPTE already set, done */
+	ori	r5,r6,_PAGE_HASHPTE
+	stwcx.	r5,0,r8
+	bne-	1b
+
+	bl	create_hpte
+
+9:
+#ifdef CONFIG_SMP
+	eieio
+	li	r0,0
+	stw	r0,0(r9)		/* clear mmu_hash_lock */
+#endif
+
+	/* reenable interrupts and DR */
+	mtmsr	r10
+	SYNC_601
+	isync
+
+	lwz	r0,4(r1)
+	mtlr	r0
+	blr
+
+/*
+ * This routine adds a hardware PTE to the hash table.
+ * It is designed to be called with the MMU either on or off.
+ * r3 contains the VSID, r4 contains the virtual address,
+ * r5 contains the linux PTE, r6 contains the old value of the
+ * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
+ * offset to be added to addresses (0 if the MMU is on,
+ * -KERNELBASE if it is off).
+ * On SMP, the caller should have the mmu_hash_lock held.
+ * We assume that the caller has (or will) set the _PAGE_HASHPTE
+ * bit in the linux PTE in memory.  The value passed in r6 should
+ * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
+ * this routine will skip the search for an existing HPTE.
+ * This procedure modifies r0, r3 - r6, r8, cr0.
+ *  -- paulus.
+ *
+ * For speed, 4 of the instructions get patched once the size and
+ * physical address of the hash table are known.  These definitions
+ * of Hash_base and Hash_bits below are just an example.
+ */
+Hash_base = 0xc0180000
+Hash_bits = 12				/* e.g. 256kB hash table */
+Hash_msk = (((1 << Hash_bits) - 1) * 64)
+
+#ifndef CONFIG_PPC64BRIDGE
+/* defines for the PTE format for 32-bit PPCs */
+#define PTE_SIZE	8
+#define PTEG_SIZE	64
+#define LG_PTEG_SIZE	6
+#define LDPTEu		lwzu
+#define STPTE		stw
+#define CMPPTE		cmpw
+#define PTE_H		0x40
+#define PTE_V		0x80000000
+#define TST_V(r)	rlwinm. r,r,0,0,0
+#define SET_V(r)	oris r,r,PTE_V@h
+#define CLR_V(r,t)	rlwinm r,r,0,1,31
+
+#else
+/* defines for the PTE format for 64-bit PPCs */
+#define PTE_SIZE	16
+#define PTEG_SIZE	128
+#define LG_PTEG_SIZE	7
+#define LDPTEu		ldu
+#define STPTE		std
+#define CMPPTE		cmpd
+#define PTE_H		2
+#define PTE_V		1
+#define TST_V(r)	andi. r,r,PTE_V
+#define SET_V(r)	ori r,r,PTE_V
+#define CLR_V(r,t)	li t,PTE_V; andc r,r,t
+#endif /* CONFIG_PPC64BRIDGE */
+
+#define HASH_LEFT	31-(LG_PTEG_SIZE+Hash_bits-1)
+#define HASH_RIGHT	31-LG_PTEG_SIZE
+
+_GLOBAL(create_hpte)
+	/* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
+	rlwinm	r8,r5,32-10,31,31	/* _PAGE_RW -> PP lsb */
+	rlwinm	r0,r5,32-7,31,31	/* _PAGE_DIRTY -> PP lsb */
+	and	r8,r8,r0		/* writable if _RW & _DIRTY */
+	rlwimi	r5,r5,32-1,30,30	/* _PAGE_USER -> PP msb */
+	rlwimi	r5,r5,32-2,31,31	/* _PAGE_USER -> PP lsb */
+	ori	r8,r8,0xe14		/* clear out reserved bits and M */
+	andc	r8,r5,r8		/* PP = user? (rw&dirty? 2: 3): 0 */
+BEGIN_FTR_SECTION
+	ori	r8,r8,_PAGE_COHERENT	/* set M (coherence required) */
+END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
+
+	/* Construct the high word of the PPC-style PTE (r5) */
+#ifndef CONFIG_PPC64BRIDGE
+	rlwinm	r5,r3,7,1,24		/* put VSID in 0x7fffff80 bits */
+	rlwimi	r5,r4,10,26,31		/* put in API (abbrev page index) */
+#else /* CONFIG_PPC64BRIDGE */
+	clrlwi	r3,r3,8			/* reduce vsid to 24 bits */
+	sldi	r5,r3,12		/* shift vsid into position */
+	rlwimi	r5,r4,16,20,24		/* put in API (abbrev page index) */
+#endif /* CONFIG_PPC64BRIDGE */
+	SET_V(r5)			/* set V (valid) bit */
+
+	/* Get the address of the primary PTE group in the hash table (r3) */
+_GLOBAL(hash_page_patch_A)
+	addis	r0,r7,Hash_base@h	/* base address of hash table */
+	rlwimi	r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
+	rlwinm	r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
+	xor	r3,r3,r0		/* make primary hash */
+	li	r0,8			/* PTEs/group */
+
+	/*
+	 * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
+	 * if it is clear, meaning that the HPTE isn't there already...
+	 */
+	andi.	r6,r6,_PAGE_HASHPTE
+	beq+	10f			/* no PTE: go look for an empty slot */
+	tlbie	r4
+
+	addis	r4,r7,htab_hash_searches@ha
+	lwz	r6,htab_hash_searches@l(r4)
+	addi	r6,r6,1			/* count how many searches we do */
+	stw	r6,htab_hash_searches@l(r4)
+
+	/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
+	mtctr	r0
+	addi	r4,r3,-PTE_SIZE
+1:	LDPTEu	r6,PTE_SIZE(r4)		/* get next PTE */
+	CMPPTE	0,r6,r5
+	bdnzf	2,1b			/* loop while ctr != 0 && !cr0.eq */
+	beq+	found_slot
+
+	/* Search the secondary PTEG for a matching PTE */
+	ori	r5,r5,PTE_H		/* set H (secondary hash) bit */
+_GLOBAL(hash_page_patch_B)
+	xoris	r4,r3,Hash_msk>>16	/* compute secondary hash */
+	xori	r4,r4,(-PTEG_SIZE & 0xffff)
+	addi	r4,r4,-PTE_SIZE
+	mtctr	r0
+2:	LDPTEu	r6,PTE_SIZE(r4)
+	CMPPTE	0,r6,r5
+	bdnzf	2,2b
+	beq+	found_slot
+	xori	r5,r5,PTE_H		/* clear H bit again */
+
+	/* Search the primary PTEG for an empty slot */
+10:	mtctr	r0
+	addi	r4,r3,-PTE_SIZE		/* search primary PTEG */
+1:	LDPTEu	r6,PTE_SIZE(r4)		/* get next PTE */
+	TST_V(r6)			/* test valid bit */
+	bdnzf	2,1b			/* loop while ctr != 0 && !cr0.eq */
+	beq+	found_empty
+
+	/* update counter of times that the primary PTEG is full */
+	addis	r4,r7,primary_pteg_full@ha
+	lwz	r6,primary_pteg_full@l(r4)
+	addi	r6,r6,1
+	stw	r6,primary_pteg_full@l(r4)
+
+	/* Search the secondary PTEG for an empty slot */
+	ori	r5,r5,PTE_H		/* set H (secondary hash) bit */
+_GLOBAL(hash_page_patch_C)
+	xoris	r4,r3,Hash_msk>>16	/* compute secondary hash */
+	xori	r4,r4,(-PTEG_SIZE & 0xffff)
+	addi	r4,r4,-PTE_SIZE
+	mtctr	r0
+2:	LDPTEu	r6,PTE_SIZE(r4)
+	TST_V(r6)
+	bdnzf	2,2b
+	beq+	found_empty
+	xori	r5,r5,PTE_H		/* clear H bit again */
+
+	/*
+	 * Choose an arbitrary slot in the primary PTEG to overwrite.
+	 * Since both the primary and secondary PTEGs are full, and we
+	 * have no information that the PTEs in the primary PTEG are
+	 * more important or useful than those in the secondary PTEG,
+	 * and we know there is a definite (although small) speed
+	 * advantage to putting the PTE in the primary PTEG, we always
+	 * put the PTE in the primary PTEG.
+	 */
+	addis	r4,r7,next_slot@ha
+	lwz	r6,next_slot@l(r4)
+	addi	r6,r6,PTE_SIZE
+	andi.	r6,r6,7*PTE_SIZE
+	stw	r6,next_slot@l(r4)
+	add	r4,r3,r6
+
+#ifndef CONFIG_SMP
+	/* Store PTE in PTEG */
+found_empty:
+	STPTE	r5,0(r4)
+found_slot:
+	STPTE	r8,PTE_SIZE/2(r4)
+
+#else /* CONFIG_SMP */
+/*
+ * Between the tlbie above and updating the hash table entry below,
+ * another CPU could read the hash table entry and put it in its TLB.
+ * There are 3 cases:
+ * 1. using an empty slot
+ * 2. updating an earlier entry to change permissions (i.e. enable write)
+ * 3. taking over the PTE for an unrelated address
+ *
+ * In each case it doesn't really matter if the other CPUs have the old
+ * PTE in their TLB.  So we don't need to bother with another tlbie here,
+ * which is convenient as we've overwritten the register that had the
+ * address. :-)  The tlbie above is mainly to make sure that this CPU comes
+ * and gets the new PTE from the hash table.
+ *
+ * We do however have to make sure that the PTE is never in an invalid
+ * state with the V bit set.
+ */
+found_empty:
+found_slot:
+	CLR_V(r5,r0)		/* clear V (valid) bit in PTE */
+	STPTE	r5,0(r4)
+	sync
+	TLBSYNC
+	STPTE	r8,PTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
+	sync
+	SET_V(r5)
+	STPTE	r5,0(r4)	/* finally set V bit in PTE */
+#endif /* CONFIG_SMP */
+
+	sync		/* make sure pte updates get to memory */
+	blr
+
+	.comm	next_slot,4
+	.comm	primary_pteg_full,4
+	.comm	htab_hash_searches,4
+
+/*
+ * Flush the entry for a particular page from the hash table.
+ *
+ * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
+ *		    int count)
+ *
+ * We assume that there is a hash table in use (Hash != 0).
+ */
+_GLOBAL(flush_hash_pages)
+	tophys(r7,0)
+
+	/*
+	 * We disable interrupts here, even on UP, because we want
+	 * the _PAGE_HASHPTE bit to be a reliable indication of
+	 * whether the HPTE exists (or at least whether one did once).
+	 * We also turn off the MMU for data accesses so that we
+	 * we can't take a hash table miss (assuming the code is
+	 * covered by a BAT).  -- paulus
+	 */
+	mfmsr	r10
+	SYNC
+	rlwinm	r0,r10,0,17,15		/* clear bit 16 (MSR_EE) */
+	rlwinm	r0,r0,0,28,26		/* clear MSR_DR */
+	mtmsr	r0
+	SYNC_601
+	isync
+
+	/* First find a PTE in the range that has _PAGE_HASHPTE set */
+	rlwimi	r5,r4,22,20,29
+1:	lwz	r0,0(r5)
+	cmpwi	cr1,r6,1
+	andi.	r0,r0,_PAGE_HASHPTE
+	bne	2f
+	ble	cr1,19f
+	addi	r4,r4,0x1000
+	addi	r5,r5,4
+	addi	r6,r6,-1
+	b	1b
+
+	/* Convert context and va to VSID */
+2:	mulli	r3,r3,897*16		/* multiply context by context skew */
+	rlwinm	r0,r4,4,28,31		/* get ESID (top 4 bits of va) */
+	mulli	r0,r0,0x111		/* multiply by ESID skew */
+	add	r3,r3,r0		/* note code below trims to 24 bits */
+
+	/* Construct the high word of the PPC-style PTE (r11) */
+#ifndef CONFIG_PPC64BRIDGE
+	rlwinm	r11,r3,7,1,24		/* put VSID in 0x7fffff80 bits */
+	rlwimi	r11,r4,10,26,31		/* put in API (abbrev page index) */
+#else /* CONFIG_PPC64BRIDGE */
+	clrlwi	r3,r3,8			/* reduce vsid to 24 bits */
+	sldi	r11,r3,12		/* shift vsid into position */
+	rlwimi	r11,r4,16,20,24		/* put in API (abbrev page index) */
+#endif /* CONFIG_PPC64BRIDGE */
+	SET_V(r11)			/* set V (valid) bit */
+
+#ifdef CONFIG_SMP
+	addis	r9,r7,mmu_hash_lock@ha
+	addi	r9,r9,mmu_hash_lock@l
+	rlwinm	r8,r1,0,0,18
+	add	r8,r8,r7
+	lwz	r8,TI_CPU(r8)
+	oris	r8,r8,9
+10:	lwarx	r0,0,r9
+	cmpi	0,r0,0
+	bne-	11f
+	stwcx.	r8,0,r9
+	beq+	12f
+11:	lwz	r0,0(r9)
+	cmpi	0,r0,0
+	beq	10b
+	b	11b
+12:	isync
+#endif
+
+	/*
+	 * Check the _PAGE_HASHPTE bit in the linux PTE.  If it is
+	 * already clear, we're done (for this pte).  If not,
+	 * clear it (atomically) and proceed.  -- paulus.
+	 */
+33:	lwarx	r8,0,r5			/* fetch the pte */
+	andi.	r0,r8,_PAGE_HASHPTE
+	beq	8f			/* done if HASHPTE is already clear */
+	rlwinm	r8,r8,0,31,29		/* clear HASHPTE bit */
+	stwcx.	r8,0,r5			/* update the pte */
+	bne-	33b
+
+	/* Get the address of the primary PTE group in the hash table (r3) */
+_GLOBAL(flush_hash_patch_A)
+	addis	r8,r7,Hash_base@h	/* base address of hash table */
+	rlwimi	r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
+	rlwinm	r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
+	xor	r8,r0,r8		/* make primary hash */
+
+	/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
+	li	r0,8			/* PTEs/group */
+	mtctr	r0
+	addi	r12,r8,-PTE_SIZE
+1:	LDPTEu	r0,PTE_SIZE(r12)	/* get next PTE */
+	CMPPTE	0,r0,r11
+	bdnzf	2,1b			/* loop while ctr != 0 && !cr0.eq */
+	beq+	3f
+
+	/* Search the secondary PTEG for a matching PTE */
+	ori	r11,r11,PTE_H		/* set H (secondary hash) bit */
+	li	r0,8			/* PTEs/group */
+_GLOBAL(flush_hash_patch_B)
+	xoris	r12,r8,Hash_msk>>16	/* compute secondary hash */
+	xori	r12,r12,(-PTEG_SIZE & 0xffff)
+	addi	r12,r12,-PTE_SIZE
+	mtctr	r0
+2:	LDPTEu	r0,PTE_SIZE(r12)
+	CMPPTE	0,r0,r11
+	bdnzf	2,2b
+	xori	r11,r11,PTE_H		/* clear H again */
+	bne-	4f			/* should rarely fail to find it */
+
+3:	li	r0,0
+	STPTE	r0,0(r12)		/* invalidate entry */
+4:	sync
+	tlbie	r4			/* in hw tlb too */
+	sync
+
+8:	ble	cr1,9f			/* if all ptes checked */
+81:	addi	r6,r6,-1
+	addi	r5,r5,4			/* advance to next pte */
+	addi	r4,r4,0x1000
+	lwz	r0,0(r5)		/* check next pte */
+	cmpwi	cr1,r6,1
+	andi.	r0,r0,_PAGE_HASHPTE
+	bne	33b
+	bgt	cr1,81b
+
+9:
+#ifdef CONFIG_SMP
+	TLBSYNC
+	li	r0,0
+	stw	r0,0(r9)		/* clear mmu_hash_lock */
+#endif
+
+19:	mtmsr	r10
+	SYNC_601
+	isync
+	blr
diff --git a/arch/powerpc/mm/init.c b/arch/powerpc/mm/init.c
new file mode 100644
index 0000000..3a81ef1
--- /dev/null
+++ b/arch/powerpc/mm/init.c
@@ -0,0 +1,581 @@
+/*
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/initrd.h>
+#include <linux/pagemap.h>
+
+#include <asm/pgalloc.h>
+#include <asm/prom.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/btext.h>
+#include <asm/tlb.h>
+#include <asm/bootinfo.h>
+#include <asm/prom.h>
+
+#include "mem_pieces.h"
+#include "mmu_decl.h"
+
+#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
+/* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */
+#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - KERNELBASE))
+#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL"
+#endif
+#endif
+#define MAX_LOW_MEM	CONFIG_LOWMEM_SIZE
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+unsigned long total_memory;
+unsigned long total_lowmem;
+
+unsigned long ppc_memstart;
+unsigned long ppc_memoffset = PAGE_OFFSET;
+
+int mem_init_done;
+int init_bootmem_done;
+int boot_mapsize;
+#ifdef CONFIG_PPC_PMAC
+unsigned long agp_special_page;
+#endif
+
+extern char _end[];
+extern char etext[], _stext[];
+extern char __init_begin, __init_end;
+
+#ifdef CONFIG_HIGHMEM
+pte_t *kmap_pte;
+pgprot_t kmap_prot;
+
+EXPORT_SYMBOL(kmap_prot);
+EXPORT_SYMBOL(kmap_pte);
+#endif
+
+void MMU_init(void);
+void set_phys_avail(unsigned long total_ram);
+
+/* XXX should be in current.h  -- paulus */
+extern struct task_struct *current_set[NR_CPUS];
+
+char *klimit = _end;
+struct mem_pieces phys_avail;
+struct device_node *memory_node;
+
+/*
+ * this tells the system to map all of ram with the segregs
+ * (i.e. page tables) instead of the bats.
+ * -- Cort
+ */
+int __map_without_bats;
+int __map_without_ltlbs;
+
+/* max amount of RAM to use */
+unsigned long __max_memory;
+/* max amount of low RAM to map in */
+unsigned long __max_low_memory = MAX_LOW_MEM;
+
+/*
+ * Read in a property describing some pieces of memory.
+ */
+static int __init get_mem_prop(char *name, struct mem_pieces *mp)
+{
+	struct reg_property *rp;
+	int i, s;
+	unsigned int *ip;
+	int nac = prom_n_addr_cells(memory_node);
+	int nsc = prom_n_size_cells(memory_node);
+
+	ip = (unsigned int *) get_property(memory_node, name, &s);
+	if (ip == NULL) {
+		printk(KERN_ERR "error: couldn't get %s property on /memory\n",
+		       name);
+		return 0;
+	}
+	s /= (nsc + nac) * 4;
+	rp = mp->regions;
+	for (i = 0; i < s; ++i, ip += nac+nsc) {
+		if (nac >= 2 && ip[nac-2] != 0)
+			continue;
+		rp->address = ip[nac-1];
+		if (nsc >= 2 && ip[nac+nsc-2] != 0)
+			rp->size = ~0U;
+		else
+			rp->size = ip[nac+nsc-1];
+		++rp;
+	}
+	mp->n_regions = rp - mp->regions;
+
+	/* Make sure the pieces are sorted. */
+	mem_pieces_sort(mp);
+	mem_pieces_coalesce(mp);
+	return 1;
+}
+
+/*
+ * Collect information about physical RAM and which pieces are
+ * already in use from the device tree.
+ */
+unsigned long __init find_end_of_memory(void)
+{
+	unsigned long a, total;
+	struct mem_pieces phys_mem;
+
+	/*
+	 * Find out where physical memory is, and check that it
+	 * starts at 0 and is contiguous.  It seems that RAM is
+	 * always physically contiguous on Power Macintoshes.
+	 *
+	 * Supporting discontiguous physical memory isn't hard,
+	 * it just makes the virtual <-> physical mapping functions
+	 * more complicated (or else you end up wasting space
+	 * in mem_map).
+	 */
+	memory_node = find_devices("memory");
+	if (memory_node == NULL || !get_mem_prop("reg", &phys_mem)
+	    || phys_mem.n_regions == 0)
+		panic("No RAM??");
+	a = phys_mem.regions[0].address;
+	if (a != 0)
+		panic("RAM doesn't start at physical address 0");
+	total = phys_mem.regions[0].size;
+
+	if (phys_mem.n_regions > 1) {
+		printk("RAM starting at 0x%x is not contiguous\n",
+		       phys_mem.regions[1].address);
+		printk("Using RAM from 0 to 0x%lx\n", total-1);
+	}
+
+	return total;
+}
+
+/*
+ * Check for command-line options that affect what MMU_init will do.
+ */
+void MMU_setup(void)
+{
+	/* Check for nobats option (used in mapin_ram). */
+	if (strstr(cmd_line, "nobats")) {
+		__map_without_bats = 1;
+	}
+
+	if (strstr(cmd_line, "noltlbs")) {
+		__map_without_ltlbs = 1;
+	}
+
+	/* Look for mem= option on command line */
+	if (strstr(cmd_line, "mem=")) {
+		char *p, *q;
+		unsigned long maxmem = 0;
+
+		for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) {
+			q = p + 4;
+			if (p > cmd_line && p[-1] != ' ')
+				continue;
+			maxmem = simple_strtoul(q, &q, 0);
+			if (*q == 'k' || *q == 'K') {
+				maxmem <<= 10;
+				++q;
+			} else if (*q == 'm' || *q == 'M') {
+				maxmem <<= 20;
+				++q;
+			}
+		}
+		__max_memory = maxmem;
+	}
+}
+
+/*
+ * MMU_init sets up the basic memory mappings for the kernel,
+ * including both RAM and possibly some I/O regions,
+ * and sets up the page tables and the MMU hardware ready to go.
+ */
+void __init MMU_init(void)
+{
+	if (ppc_md.progress)
+		ppc_md.progress("MMU:enter", 0x111);
+
+	/* parse args from command line */
+	MMU_setup();
+
+	/*
+	 * Figure out how much memory we have, how much
+	 * is lowmem, and how much is highmem.  If we were
+	 * passed the total memory size from the bootloader,
+	 * just use it.
+	 */
+	if (boot_mem_size)
+		total_memory = boot_mem_size;
+	else
+		total_memory = find_end_of_memory();
+
+	if (__max_memory && total_memory > __max_memory)
+		total_memory = __max_memory;
+	total_lowmem = total_memory;
+#ifdef CONFIG_FSL_BOOKE
+	/* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
+	 * entries, so we need to adjust lowmem to match the amount we can map
+	 * in the fixed entries */
+	adjust_total_lowmem();
+#endif /* CONFIG_FSL_BOOKE */
+	if (total_lowmem > __max_low_memory) {
+		total_lowmem = __max_low_memory;
+#ifndef CONFIG_HIGHMEM
+		total_memory = total_lowmem;
+#endif /* CONFIG_HIGHMEM */
+	}
+	set_phys_avail(total_lowmem);
+
+	/* Initialize the MMU hardware */
+	if (ppc_md.progress)
+		ppc_md.progress("MMU:hw init", 0x300);
+	MMU_init_hw();
+
+	/* Map in all of RAM starting at KERNELBASE */
+	if (ppc_md.progress)
+		ppc_md.progress("MMU:mapin", 0x301);
+	mapin_ram();
+
+#ifdef CONFIG_HIGHMEM
+	ioremap_base = PKMAP_BASE;
+#else
+	ioremap_base = 0xfe000000UL;	/* for now, could be 0xfffff000 */
+#endif /* CONFIG_HIGHMEM */
+	ioremap_bot = ioremap_base;
+
+	/* Map in I/O resources */
+	if (ppc_md.progress)
+		ppc_md.progress("MMU:setio", 0x302);
+	if (ppc_md.setup_io_mappings)
+		ppc_md.setup_io_mappings();
+
+	/* Initialize the context management stuff */
+	mmu_context_init();
+
+	if (ppc_md.progress)
+		ppc_md.progress("MMU:exit", 0x211);
+
+#ifdef CONFIG_BOOTX_TEXT
+	/* By default, we are no longer mapped */
+       	boot_text_mapped = 0;
+	/* Must be done last, or ppc_md.progress will die. */
+	map_boot_text();
+#endif
+}
+
+/* This is only called until mem_init is done. */
+void __init *early_get_page(void)
+{
+	void *p;
+
+	if (init_bootmem_done) {
+		p = alloc_bootmem_pages(PAGE_SIZE);
+	} else {
+		p = mem_pieces_find(PAGE_SIZE, PAGE_SIZE);
+	}
+	return p;
+}
+
+/* Free up now-unused memory */
+static void free_sec(unsigned long start, unsigned long end, const char *name)
+{
+	unsigned long cnt = 0;
+
+	while (start < end) {
+		ClearPageReserved(virt_to_page(start));
+		set_page_count(virt_to_page(start), 1);
+		free_page(start);
+		cnt++;
+		start += PAGE_SIZE;
+ 	}
+	if (cnt) {
+		printk(" %ldk %s", cnt << (PAGE_SHIFT - 10), name);
+		totalram_pages += cnt;
+	}
+}
+
+void free_initmem(void)
+{
+#define FREESEC(TYPE) \
+	free_sec((unsigned long)(&__ ## TYPE ## _begin), \
+		 (unsigned long)(&__ ## TYPE ## _end), \
+		 #TYPE);
+
+	printk ("Freeing unused kernel memory:");
+	FREESEC(init);
+ 	printk("\n");
+	ppc_md.progress = NULL;
+#undef FREESEC
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+	if (start < end)
+		printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+	for (; start < end; start += PAGE_SIZE) {
+		ClearPageReserved(virt_to_page(start));
+		set_page_count(virt_to_page(start), 1);
+		free_page(start);
+		totalram_pages++;
+	}
+}
+#endif
+
+/*
+ * Initialize the bootmem system and give it all the memory we
+ * have available.
+ */
+void __init do_init_bootmem(void)
+{
+	unsigned long start, size;
+	int i;
+
+	/*
+	 * Find an area to use for the bootmem bitmap.
+	 * We look for the first area which is at least
+	 * 128kB in length (128kB is enough for a bitmap
+	 * for 4GB of memory, using 4kB pages), plus 1 page
+	 * (in case the address isn't page-aligned).
+	 */
+	start = 0;
+	size = 0;
+	for (i = 0; i < phys_avail.n_regions; ++i) {
+		unsigned long a = phys_avail.regions[i].address;
+		unsigned long s = phys_avail.regions[i].size;
+		if (s <= size)
+			continue;
+		start = a;
+		size = s;
+		if (s >= 33 * PAGE_SIZE)
+			break;
+	}
+	start = PAGE_ALIGN(start);
+
+	min_low_pfn = start >> PAGE_SHIFT;
+	max_low_pfn = (PPC_MEMSTART + total_lowmem) >> PAGE_SHIFT;
+	max_pfn = (PPC_MEMSTART + total_memory) >> PAGE_SHIFT;
+	boot_mapsize = init_bootmem_node(&contig_page_data, min_low_pfn,
+					 PPC_MEMSTART >> PAGE_SHIFT,
+					 max_low_pfn);
+
+	/* remove the bootmem bitmap from the available memory */
+	mem_pieces_remove(&phys_avail, start, boot_mapsize, 1);
+
+	/* add everything in phys_avail into the bootmem map */
+	for (i = 0; i < phys_avail.n_regions; ++i)
+		free_bootmem(phys_avail.regions[i].address,
+			     phys_avail.regions[i].size);
+
+	init_bootmem_done = 1;
+}
+
+/*
+ * paging_init() sets up the page tables - in fact we've already done this.
+ */
+void __init paging_init(void)
+{
+	unsigned long zones_size[MAX_NR_ZONES], i;
+
+#ifdef CONFIG_HIGHMEM
+	map_page(PKMAP_BASE, 0, 0);	/* XXX gross */
+	pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
+			(PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
+	map_page(KMAP_FIX_BEGIN, 0, 0);	/* XXX gross */
+	kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
+			(KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
+	kmap_prot = PAGE_KERNEL;
+#endif /* CONFIG_HIGHMEM */
+
+	/*
+	 * All pages are DMA-able so we put them all in the DMA zone.
+	 */
+	zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
+	for (i = 1; i < MAX_NR_ZONES; i++)
+		zones_size[i] = 0;
+
+#ifdef CONFIG_HIGHMEM
+	zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT;
+#endif /* CONFIG_HIGHMEM */
+
+	free_area_init(zones_size);
+}
+
+void __init mem_init(void)
+{
+	unsigned long addr;
+	int codepages = 0;
+	int datapages = 0;
+	int initpages = 0;
+#ifdef CONFIG_HIGHMEM
+	unsigned long highmem_mapnr;
+
+	highmem_mapnr = total_lowmem >> PAGE_SHIFT;
+#endif /* CONFIG_HIGHMEM */
+	max_mapnr = total_memory >> PAGE_SHIFT;
+
+	high_memory = (void *) __va(PPC_MEMSTART + total_lowmem);
+	num_physpages = max_mapnr;	/* RAM is assumed contiguous */
+
+	totalram_pages += free_all_bootmem();
+
+#ifdef CONFIG_BLK_DEV_INITRD
+	/* if we are booted from BootX with an initial ramdisk,
+	   make sure the ramdisk pages aren't reserved. */
+	if (initrd_start) {
+		for (addr = initrd_start; addr < initrd_end; addr += PAGE_SIZE)
+			ClearPageReserved(virt_to_page(addr));
+	}
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+#ifdef CONFIG_PPC_OF
+	/* mark the RTAS pages as reserved */
+	if ( rtas_data )
+		for (addr = (ulong)__va(rtas_data);
+		     addr < PAGE_ALIGN((ulong)__va(rtas_data)+rtas_size) ;
+		     addr += PAGE_SIZE)
+			SetPageReserved(virt_to_page(addr));
+#endif
+#ifdef CONFIG_PPC_PMAC
+	if (agp_special_page)
+		SetPageReserved(virt_to_page(agp_special_page));
+#endif
+	for (addr = PAGE_OFFSET; addr < (unsigned long)high_memory;
+	     addr += PAGE_SIZE) {
+		if (!PageReserved(virt_to_page(addr)))
+			continue;
+		if (addr < (ulong) etext)
+			codepages++;
+		else if (addr >= (unsigned long)&__init_begin
+			 && addr < (unsigned long)&__init_end)
+			initpages++;
+		else if (addr < (ulong) klimit)
+			datapages++;
+	}
+
+#ifdef CONFIG_HIGHMEM
+	{
+		unsigned long pfn;
+
+		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
+			struct page *page = mem_map + pfn;
+
+			ClearPageReserved(page);
+			set_page_count(page, 1);
+			__free_page(page);
+			totalhigh_pages++;
+		}
+		totalram_pages += totalhigh_pages;
+	}
+#endif /* CONFIG_HIGHMEM */
+
+        printk("Memory: %luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n",
+	       (unsigned long)nr_free_pages()<< (PAGE_SHIFT-10),
+	       codepages<< (PAGE_SHIFT-10), datapages<< (PAGE_SHIFT-10),
+	       initpages<< (PAGE_SHIFT-10),
+	       (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
+
+#ifdef CONFIG_PPC_PMAC
+	if (agp_special_page)
+		printk(KERN_INFO "AGP special page: 0x%08lx\n", agp_special_page);
+#endif
+
+	mem_init_done = 1;
+}
+
+/*
+ * Set phys_avail to the amount of physical memory,
+ * less the kernel text/data/bss.
+ */
+void __init
+set_phys_avail(unsigned long total_memory)
+{
+	unsigned long kstart, ksize;
+
+	/*
+	 * Initially, available physical memory is equivalent to all
+	 * physical memory.
+	 */
+
+	phys_avail.regions[0].address = PPC_MEMSTART;
+	phys_avail.regions[0].size = total_memory;
+	phys_avail.n_regions = 1;
+
+	/*
+	 * Map out the kernel text/data/bss from the available physical
+	 * memory.
+	 */
+
+	kstart = __pa(_stext);	/* should be 0 */
+	ksize = PAGE_ALIGN(klimit - _stext);
+
+	mem_pieces_remove(&phys_avail, kstart, ksize, 0);
+	mem_pieces_remove(&phys_avail, 0, 0x4000, 0);
+
+#if defined(CONFIG_BLK_DEV_INITRD)
+	/* Remove the init RAM disk from the available memory. */
+	if (initrd_start) {
+		mem_pieces_remove(&phys_avail, __pa(initrd_start),
+				  initrd_end - initrd_start, 1);
+	}
+#endif /* CONFIG_BLK_DEV_INITRD */
+#ifdef CONFIG_PPC_OF
+	/* remove the RTAS pages from the available memory */
+	if (rtas_data)
+		mem_pieces_remove(&phys_avail, rtas_data, rtas_size, 1);
+#endif
+#ifdef CONFIG_PPC_PMAC
+	/* Because of some uninorth weirdness, we need a page of
+	 * memory as high as possible (it must be outside of the
+	 * bus address seen as the AGP aperture). It will be used
+	 * by the r128 DRM driver
+	 *
+	 * FIXME: We need to make sure that page doesn't overlap any of the\
+	 * above. This could be done by improving mem_pieces_find to be able
+	 * to do a backward search from the end of the list.
+	 */
+	if (_machine == _MACH_Pmac && find_devices("uni-north-agp")) {
+		agp_special_page = (total_memory - PAGE_SIZE);
+		mem_pieces_remove(&phys_avail, agp_special_page, PAGE_SIZE, 0);
+		agp_special_page = (unsigned long)__va(agp_special_page);
+	}
+#endif /* CONFIG_PPC_PMAC */
+}
+
+/* Mark some memory as reserved by removing it from phys_avail. */
+void __init reserve_phys_mem(unsigned long start, unsigned long size)
+{
+	mem_pieces_remove(&phys_avail, start, size, 1);
+}
diff --git a/arch/powerpc/mm/init64.c b/arch/powerpc/mm/init64.c
new file mode 100644
index 0000000..81f6745
--- /dev/null
+++ b/arch/powerpc/mm/init64.c
@@ -0,0 +1,385 @@
+/*
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  Dave Engebretsen <engebret@us.ibm.com>
+ *      Rework for PPC64 port.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/idr.h>
+#include <linux/nodemask.h>
+#include <linux/module.h>
+
+#include <asm/pgalloc.h>
+#include <asm/page.h>
+#include <asm/prom.h>
+#include <asm/lmb.h>
+#include <asm/rtas.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/tlb.h>
+#include <asm/eeh.h>
+#include <asm/processor.h>
+#include <asm/mmzone.h>
+#include <asm/cputable.h>
+#include <asm/ppcdebug.h>
+#include <asm/sections.h>
+#include <asm/system.h>
+#include <asm/iommu.h>
+#include <asm/abs_addr.h>
+#include <asm/vdso.h>
+#include <asm/imalloc.h>
+
+#if PGTABLE_RANGE > USER_VSID_RANGE
+#warning Limited user VSID range means pagetable space is wasted
+#endif
+
+#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
+#warning TASK_SIZE is smaller than it needs to be.
+#endif
+
+int mem_init_done;
+unsigned long ioremap_bot = IMALLOC_BASE;
+static unsigned long phbs_io_bot = PHBS_IO_BASE;
+
+extern pgd_t swapper_pg_dir[];
+extern struct task_struct *current_set[NR_CPUS];
+
+unsigned long klimit = (unsigned long)_end;
+
+unsigned long _SDR1=0;
+unsigned long _ASR=0;
+
+/* max amount of RAM to use */
+unsigned long __max_memory;
+
+/* info on what we think the IO hole is */
+unsigned long 	io_hole_start;
+unsigned long	io_hole_size;
+
+/*
+ * Do very early mm setup.
+ */
+void __init mm_init_ppc64(void)
+{
+#ifndef CONFIG_PPC_ISERIES
+	unsigned long i;
+#endif
+
+	ppc64_boot_msg(0x100, "MM Init");
+
+	/* This is the story of the IO hole... please, keep seated,
+	 * unfortunately, we are out of oxygen masks at the moment.
+	 * So we need some rough way to tell where your big IO hole
+	 * is. On pmac, it's between 2G and 4G, on POWER3, it's around
+	 * that area as well, on POWER4 we don't have one, etc...
+	 * We need that as a "hint" when sizing the TCE table on POWER3
+	 * So far, the simplest way that seem work well enough for us it
+	 * to just assume that the first discontinuity in our physical
+	 * RAM layout is the IO hole. That may not be correct in the future
+	 * (and isn't on iSeries but then we don't care ;)
+	 */
+
+#ifndef CONFIG_PPC_ISERIES
+	for (i = 1; i < lmb.memory.cnt; i++) {
+		unsigned long base, prevbase, prevsize;
+
+		prevbase = lmb.memory.region[i-1].base;
+		prevsize = lmb.memory.region[i-1].size;
+		base = lmb.memory.region[i].base;
+		if (base > (prevbase + prevsize)) {
+			io_hole_start = prevbase + prevsize;
+			io_hole_size = base  - (prevbase + prevsize);
+			break;
+		}
+	}
+#endif /* CONFIG_PPC_ISERIES */
+	if (io_hole_start)
+		printk("IO Hole assumed to be %lx -> %lx\n",
+		       io_hole_start, io_hole_start + io_hole_size - 1);
+
+	ppc64_boot_msg(0x100, "MM Init Done");
+}
+
+void free_initmem(void)
+{
+	unsigned long addr;
+
+	addr = (unsigned long)__init_begin;
+	for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
+		memset((void *)addr, 0xcc, PAGE_SIZE);
+		ClearPageReserved(virt_to_page(addr));
+		set_page_count(virt_to_page(addr), 1);
+		free_page(addr);
+		totalram_pages++;
+	}
+	printk ("Freeing unused kernel memory: %luk freed\n",
+		((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+	if (start < end)
+		printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+	for (; start < end; start += PAGE_SIZE) {
+		ClearPageReserved(virt_to_page(start));
+		set_page_count(virt_to_page(start), 1);
+		free_page(start);
+		totalram_pages++;
+	}
+}
+#endif
+
+/*
+ * Initialize the bootmem system and give it all the memory we
+ * have available.
+ */
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+void __init do_init_bootmem(void)
+{
+	unsigned long i;
+	unsigned long start, bootmap_pages;
+	unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
+	int boot_mapsize;
+
+	/*
+	 * Find an area to use for the bootmem bitmap.  Calculate the size of
+	 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
+	 * Add 1 additional page in case the address isn't page-aligned.
+	 */
+	bootmap_pages = bootmem_bootmap_pages(total_pages);
+
+	start = lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
+	BUG_ON(!start);
+
+	boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
+
+	max_pfn = max_low_pfn;
+
+	/* Add all physical memory to the bootmem map, mark each area
+	 * present.
+	 */
+	for (i=0; i < lmb.memory.cnt; i++)
+		free_bootmem(lmb.memory.region[i].base,
+			     lmb_size_bytes(&lmb.memory, i));
+
+	/* reserve the sections we're already using */
+	for (i=0; i < lmb.reserved.cnt; i++)
+		reserve_bootmem(lmb.reserved.region[i].base,
+				lmb_size_bytes(&lmb.reserved, i));
+
+	for (i=0; i < lmb.memory.cnt; i++)
+		memory_present(0, lmb_start_pfn(&lmb.memory, i),
+			       lmb_end_pfn(&lmb.memory, i));
+}
+
+/*
+ * paging_init() sets up the page tables - in fact we've already done this.
+ */
+void __init paging_init(void)
+{
+	unsigned long zones_size[MAX_NR_ZONES];
+	unsigned long zholes_size[MAX_NR_ZONES];
+	unsigned long total_ram = lmb_phys_mem_size();
+	unsigned long top_of_ram = lmb_end_of_DRAM();
+
+	printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
+	       top_of_ram, total_ram);
+	printk(KERN_INFO "Memory hole size: %ldMB\n",
+	       (top_of_ram - total_ram) >> 20);
+	/*
+	 * All pages are DMA-able so we put them all in the DMA zone.
+	 */
+	memset(zones_size, 0, sizeof(zones_size));
+	memset(zholes_size, 0, sizeof(zholes_size));
+
+	zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
+	zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
+
+	free_area_init_node(0, NODE_DATA(0), zones_size,
+			    __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
+}
+#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
+
+static struct kcore_list kcore_vmem;
+
+static int __init setup_kcore(void)
+{
+	int i;
+
+	for (i=0; i < lmb.memory.cnt; i++) {
+		unsigned long base, size;
+		struct kcore_list *kcore_mem;
+
+		base = lmb.memory.region[i].base;
+		size = lmb.memory.region[i].size;
+
+		/* GFP_ATOMIC to avoid might_sleep warnings during boot */
+		kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
+		if (!kcore_mem)
+			panic("mem_init: kmalloc failed\n");
+
+		kclist_add(kcore_mem, __va(base), size);
+	}
+
+	kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
+
+	return 0;
+}
+module_init(setup_kcore);
+
+void __init mem_init(void)
+{
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+	int nid;
+#endif
+	pg_data_t *pgdat;
+	unsigned long i;
+	struct page *page;
+	unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
+
+	num_physpages = max_low_pfn;	/* RAM is assumed contiguous */
+	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+        for_each_online_node(nid) {
+		if (NODE_DATA(nid)->node_spanned_pages != 0) {
+			printk("freeing bootmem node %x\n", nid);
+			totalram_pages +=
+				free_all_bootmem_node(NODE_DATA(nid));
+		}
+	}
+#else
+	max_mapnr = num_physpages;
+	totalram_pages += free_all_bootmem();
+#endif
+
+	for_each_pgdat(pgdat) {
+		for (i = 0; i < pgdat->node_spanned_pages; i++) {
+			page = pgdat_page_nr(pgdat, i);
+			if (PageReserved(page))
+				reservedpages++;
+		}
+	}
+
+	codesize = (unsigned long)&_etext - (unsigned long)&_stext;
+	initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
+	datasize = (unsigned long)&_edata - (unsigned long)&__init_end;
+	bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
+
+	printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
+	       "%luk reserved, %luk data, %luk bss, %luk init)\n",
+		(unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
+		num_physpages << (PAGE_SHIFT-10),
+		codesize >> 10,
+		reservedpages << (PAGE_SHIFT-10),
+		datasize >> 10,
+		bsssize >> 10,
+		initsize >> 10);
+
+	mem_init_done = 1;
+
+	/* Initialize the vDSO */
+	vdso_init();
+}
+
+void __iomem * reserve_phb_iospace(unsigned long size)
+{
+	void __iomem *virt_addr;
+		
+	if (phbs_io_bot >= IMALLOC_BASE) 
+		panic("reserve_phb_iospace(): phb io space overflow\n");
+			
+	virt_addr = (void __iomem *) phbs_io_bot;
+	phbs_io_bot += size;
+
+	return virt_addr;
+}
+
+static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
+{
+	memset(addr, 0, kmem_cache_size(cache));
+}
+
+static const int pgtable_cache_size[2] = {
+	PTE_TABLE_SIZE, PMD_TABLE_SIZE
+};
+static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
+	"pgd_pte_cache", "pud_pmd_cache",
+};
+
+kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
+
+void pgtable_cache_init(void)
+{
+	int i;
+
+	BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]);
+	BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]);
+	BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]);
+	BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]);
+
+	for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
+		int size = pgtable_cache_size[i];
+		const char *name = pgtable_cache_name[i];
+
+		pgtable_cache[i] = kmem_cache_create(name,
+						     size, size,
+						     SLAB_HWCACHE_ALIGN
+						     | SLAB_MUST_HWCACHE_ALIGN,
+						     zero_ctor,
+						     NULL);
+		if (! pgtable_cache[i])
+			panic("pgtable_cache_init(): could not create %s!\n",
+			      name);
+	}
+}
+
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
+			      unsigned long size, pgprot_t vma_prot)
+{
+	if (ppc_md.phys_mem_access_prot)
+		return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
+
+	if (!page_is_ram(addr >> PAGE_SHIFT))
+		vma_prot = __pgprot(pgprot_val(vma_prot)
+				    | _PAGE_GUARDED | _PAGE_NO_CACHE);
+	return vma_prot;
+}
+EXPORT_SYMBOL(phys_mem_access_prot);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
new file mode 100644
index 0000000..345db08
--- /dev/null
+++ b/arch/powerpc/mm/mem.c
@@ -0,0 +1,299 @@
+/*
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/initrd.h>
+#include <linux/pagemap.h>
+
+#include <asm/pgalloc.h>
+#include <asm/prom.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/btext.h>
+#include <asm/tlb.h>
+#include <asm/bootinfo.h>
+#include <asm/prom.h>
+
+#include "mem_pieces.h"
+#include "mmu_decl.h"
+
+#ifndef CPU_FTR_COHERENT_ICACHE
+#define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
+#define CPU_FTR_NOEXECUTE	0
+#endif
+
+/*
+ * This is called by /dev/mem to know if a given address has to
+ * be mapped non-cacheable or not
+ */
+int page_is_ram(unsigned long pfn)
+{
+	unsigned long paddr = (pfn << PAGE_SHIFT);
+
+#ifndef CONFIG_PPC64	/* XXX for now */
+	return paddr < __pa(high_memory);
+#else
+	int i;
+	for (i=0; i < lmb.memory.cnt; i++) {
+		unsigned long base;
+
+		base = lmb.memory.region[i].base;
+
+		if ((paddr >= base) &&
+			(paddr < (base + lmb.memory.region[i].size))) {
+			return 1;
+		}
+	}
+
+	return 0;
+#endif
+}
+EXPORT_SYMBOL(page_is_ram);
+
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
+			      unsigned long size, pgprot_t vma_prot)
+{
+	if (ppc_md.phys_mem_access_prot)
+		return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
+
+	if (!page_is_ram(addr >> PAGE_SHIFT))
+		vma_prot = __pgprot(pgprot_val(vma_prot)
+				    | _PAGE_GUARDED | _PAGE_NO_CACHE);
+	return vma_prot;
+}
+EXPORT_SYMBOL(phys_mem_access_prot);
+
+void show_mem(void)
+{
+	unsigned long total = 0, reserved = 0;
+	unsigned long shared = 0, cached = 0;
+	unsigned long highmem = 0;
+	struct page *page;
+	pg_data_t *pgdat;
+	unsigned long i;
+
+	printk("Mem-info:\n");
+	show_free_areas();
+	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+	for_each_pgdat(pgdat) {
+		for (i = 0; i < pgdat->node_spanned_pages; i++) {
+			page = pgdat_page_nr(pgdat, i);
+			total++;
+			if (PageHighMem(page))
+				highmem++;
+			if (PageReserved(page))
+				reserved++;
+			else if (PageSwapCache(page))
+				cached++;
+			else if (page_count(page))
+				shared += page_count(page) - 1;
+		}
+	}
+	printk("%ld pages of RAM\n", total);
+#ifdef CONFIG_HIGHMEM
+	printk("%ld pages of HIGHMEM\n", highmem);
+#endif
+	printk("%ld reserved pages\n", reserved);
+	printk("%ld pages shared\n", shared);
+	printk("%ld pages swap cached\n", cached);
+}
+
+/*
+ * This is called when a page has been modified by the kernel.
+ * It just marks the page as not i-cache clean.  We do the i-cache
+ * flush later when the page is given to a user process, if necessary.
+ */
+void flush_dcache_page(struct page *page)
+{
+	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+		return;
+	/* avoid an atomic op if possible */
+	if (test_bit(PG_arch_1, &page->flags))
+		clear_bit(PG_arch_1, &page->flags);
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+void flush_dcache_icache_page(struct page *page)
+{
+#ifdef CONFIG_BOOKE
+	void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
+	__flush_dcache_icache(start);
+	kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
+#elif defined(CONFIG_8xx)
+	/* On 8xx there is no need to kmap since highmem is not supported */
+	__flush_dcache_icache(page_address(page)); 
+#else
+	__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
+#endif
+
+}
+void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
+{
+	clear_page(page);
+
+	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+		return;
+	/*
+	 * We shouldnt have to do this, but some versions of glibc
+	 * require it (ld.so assumes zero filled pages are icache clean)
+	 * - Anton
+	 */
+
+	/* avoid an atomic op if possible */
+	if (test_bit(PG_arch_1, &pg->flags))
+		clear_bit(PG_arch_1, &pg->flags);
+}
+EXPORT_SYMBOL(clear_user_page);
+
+void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+		    struct page *pg)
+{
+	copy_page(vto, vfrom);
+
+	/*
+	 * We should be able to use the following optimisation, however
+	 * there are two problems.
+	 * Firstly a bug in some versions of binutils meant PLT sections
+	 * were not marked executable.
+	 * Secondly the first word in the GOT section is blrl, used
+	 * to establish the GOT address. Until recently the GOT was
+	 * not marked executable.
+	 * - Anton
+	 */
+#if 0
+	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
+		return;
+#endif
+
+	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+		return;
+
+	/* avoid an atomic op if possible */
+	if (test_bit(PG_arch_1, &pg->flags))
+		clear_bit(PG_arch_1, &pg->flags);
+}
+
+void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+			     unsigned long addr, int len)
+{
+	unsigned long maddr;
+
+	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
+	flush_icache_range(maddr, maddr + len);
+	kunmap(page);
+}
+EXPORT_SYMBOL(flush_icache_user_range);
+
+/*
+ * This is called at the end of handling a user page fault, when the
+ * fault has been handled by updating a PTE in the linux page tables.
+ * We use it to preload an HPTE into the hash table corresponding to
+ * the updated linux PTE.
+ * 
+ * This must always be called with the mm->page_table_lock held
+ */
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+		      pte_t pte)
+{
+	/* handle i-cache coherency */
+	unsigned long pfn = pte_pfn(pte);
+#ifdef CONFIG_PPC32
+	pmd_t *pmd;
+#else
+	unsigned long vsid;
+	void *pgdir;
+	pte_t *ptep;
+	int local = 0;
+	cpumask_t tmp;
+	unsigned long flags;
+#endif
+
+	/* handle i-cache coherency */
+	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
+	    !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
+	    pfn_valid(pfn)) {
+		struct page *page = pfn_to_page(pfn);
+		if (!PageReserved(page)
+		    && !test_bit(PG_arch_1, &page->flags)) {
+			if (vma->vm_mm == current->active_mm) {
+#ifdef CONFIG_8xx
+			/* On 8xx, cache control instructions (particularly 
+		 	 * "dcbst" from flush_dcache_icache) fault as write 
+			 * operation if there is an unpopulated TLB entry 
+			 * for the address in question. To workaround that, 
+			 * we invalidate the TLB here, thus avoiding dcbst 
+			 * misbehaviour.
+			 */
+				_tlbie(address);
+#endif
+				__flush_dcache_icache((void *) address);
+			} else
+				flush_dcache_icache_page(page);
+			set_bit(PG_arch_1, &page->flags);
+		}
+	}
+
+#ifdef CONFIG_PPC_STD_MMU
+	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
+	if (!pte_young(pte) || address >= TASK_SIZE)
+		return;
+#ifdef CONFIG_PPC32
+	if (Hash == 0)
+		return;
+	pmd = pmd_offset(pgd_offset(vma->vm_mm, address), address);
+	if (!pmd_none(*pmd))
+		add_hash_page(vma->vm_mm->context, address, pmd_val(*pmd));
+#else
+	pgdir = vma->vm_mm->pgd;
+	if (pgdir == NULL)
+		return;
+
+	ptep = find_linux_pte(pgdir, ea);
+	if (!ptep)
+		return;
+
+	vsid = get_vsid(vma->vm_mm->context.id, ea);
+
+	local_irq_save(flags);
+	tmp = cpumask_of_cpu(smp_processor_id());
+	if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
+		local = 1;
+
+	__hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
+		    0x300, local);
+	local_irq_restore(flags);
+#endif
+#endif
+}
diff --git a/arch/powerpc/mm/mem64.c b/arch/powerpc/mm/mem64.c
new file mode 100644
index 0000000..ef765a8
--- /dev/null
+++ b/arch/powerpc/mm/mem64.c
@@ -0,0 +1,259 @@
+/*
+ *  PowerPC version 
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  Dave Engebretsen <engebret@us.ibm.com>
+ *      Rework for PPC64 port.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/idr.h>
+#include <linux/nodemask.h>
+#include <linux/module.h>
+
+#include <asm/pgalloc.h>
+#include <asm/page.h>
+#include <asm/prom.h>
+#include <asm/lmb.h>
+#include <asm/rtas.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/tlb.h>
+#include <asm/eeh.h>
+#include <asm/processor.h>
+#include <asm/mmzone.h>
+#include <asm/cputable.h>
+#include <asm/ppcdebug.h>
+#include <asm/sections.h>
+#include <asm/system.h>
+#include <asm/iommu.h>
+#include <asm/abs_addr.h>
+#include <asm/vdso.h>
+#include <asm/imalloc.h>
+
+/*
+ * This is called by /dev/mem to know if a given address has to
+ * be mapped non-cacheable or not
+ */
+int page_is_ram(unsigned long pfn)
+{
+	int i;
+	unsigned long paddr = (pfn << PAGE_SHIFT);
+
+	for (i=0; i < lmb.memory.cnt; i++) {
+		unsigned long base;
+
+		base = lmb.memory.region[i].base;
+
+		if ((paddr >= base) &&
+			(paddr < (base + lmb.memory.region[i].size))) {
+			return 1;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(page_is_ram);
+
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
+			      unsigned long size, pgprot_t vma_prot)
+{
+	if (ppc_md.phys_mem_access_prot)
+		return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
+
+	if (!page_is_ram(addr >> PAGE_SHIFT))
+		vma_prot = __pgprot(pgprot_val(vma_prot)
+				    | _PAGE_GUARDED | _PAGE_NO_CACHE);
+	return vma_prot;
+}
+EXPORT_SYMBOL(phys_mem_access_prot);
+
+void show_mem(void)
+{
+	unsigned long total = 0, reserved = 0;
+	unsigned long shared = 0, cached = 0;
+	struct page *page;
+	pg_data_t *pgdat;
+	unsigned long i;
+
+	printk("Mem-info:\n");
+	show_free_areas();
+	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+	for_each_pgdat(pgdat) {
+		for (i = 0; i < pgdat->node_spanned_pages; i++) {
+			page = pgdat_page_nr(pgdat, i);
+			total++;
+			if (PageReserved(page))
+				reserved++;
+			else if (PageSwapCache(page))
+				cached++;
+			else if (page_count(page))
+				shared += page_count(page) - 1;
+		}
+	}
+	printk("%ld pages of RAM\n", total);
+	printk("%ld reserved pages\n", reserved);
+	printk("%ld pages shared\n", shared);
+	printk("%ld pages swap cached\n", cached);
+}
+
+/*
+ * This is called when a page has been modified by the kernel.
+ * It just marks the page as not i-cache clean.  We do the i-cache
+ * flush later when the page is given to a user process, if necessary.
+ */
+void flush_dcache_page(struct page *page)
+{
+	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+		return;
+	/* avoid an atomic op if possible */
+	if (test_bit(PG_arch_1, &page->flags))
+		clear_bit(PG_arch_1, &page->flags);
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
+{
+	clear_page(page);
+
+	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+		return;
+	/*
+	 * We shouldnt have to do this, but some versions of glibc
+	 * require it (ld.so assumes zero filled pages are icache clean)
+	 * - Anton
+	 */
+
+	/* avoid an atomic op if possible */
+	if (test_bit(PG_arch_1, &pg->flags))
+		clear_bit(PG_arch_1, &pg->flags);
+}
+EXPORT_SYMBOL(clear_user_page);
+
+void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+		    struct page *pg)
+{
+	copy_page(vto, vfrom);
+
+	/*
+	 * We should be able to use the following optimisation, however
+	 * there are two problems.
+	 * Firstly a bug in some versions of binutils meant PLT sections
+	 * were not marked executable.
+	 * Secondly the first word in the GOT section is blrl, used
+	 * to establish the GOT address. Until recently the GOT was
+	 * not marked executable.
+	 * - Anton
+	 */
+#if 0
+	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
+		return;
+#endif
+
+	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+		return;
+
+	/* avoid an atomic op if possible */
+	if (test_bit(PG_arch_1, &pg->flags))
+		clear_bit(PG_arch_1, &pg->flags);
+}
+
+void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+			     unsigned long addr, int len)
+{
+	unsigned long maddr;
+
+	maddr = (unsigned long)page_address(page) + (addr & ~PAGE_MASK);
+	flush_icache_range(maddr, maddr + len);
+}
+EXPORT_SYMBOL(flush_icache_user_range);
+
+/*
+ * This is called at the end of handling a user page fault, when the
+ * fault has been handled by updating a PTE in the linux page tables.
+ * We use it to preload an HPTE into the hash table corresponding to
+ * the updated linux PTE.
+ * 
+ * This must always be called with the mm->page_table_lock held
+ */
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
+		      pte_t pte)
+{
+	unsigned long vsid;
+	void *pgdir;
+	pte_t *ptep;
+	int local = 0;
+	cpumask_t tmp;
+	unsigned long flags;
+
+	/* handle i-cache coherency */
+	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
+	    !cpu_has_feature(CPU_FTR_NOEXECUTE)) {
+		unsigned long pfn = pte_pfn(pte);
+		if (pfn_valid(pfn)) {
+			struct page *page = pfn_to_page(pfn);
+			if (!PageReserved(page)
+			    && !test_bit(PG_arch_1, &page->flags)) {
+				__flush_dcache_icache(page_address(page));
+				set_bit(PG_arch_1, &page->flags);
+			}
+		}
+	}
+
+	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
+	if (!pte_young(pte))
+		return;
+
+	pgdir = vma->vm_mm->pgd;
+	if (pgdir == NULL)
+		return;
+
+	ptep = find_linux_pte(pgdir, ea);
+	if (!ptep)
+		return;
+
+	vsid = get_vsid(vma->vm_mm->context.id, ea);
+
+	local_irq_save(flags);
+	tmp = cpumask_of_cpu(smp_processor_id());
+	if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
+		local = 1;
+
+	__hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
+		    0x300, local);
+	local_irq_restore(flags);
+}
diff --git a/arch/powerpc/mm/mem_pieces.c b/arch/powerpc/mm/mem_pieces.c
new file mode 100644
index 0000000..3d63905
--- /dev/null
+++ b/arch/powerpc/mm/mem_pieces.c
@@ -0,0 +1,163 @@
+/*
+ *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
+ *      Changes to accommodate Power Macintoshes.
+ *    Cort Dougan <cort@cs.nmt.edu>
+ *      Rewrites.
+ *    Grant Erickson <grant@lcse.umn.edu>
+ *      General rework and split from mm/init.c.
+ *
+ *    Module name: mem_pieces.c
+ *
+ *    Description:
+ *      Routines and data structures for manipulating and representing
+ *      phyiscal memory extents (i.e. address/length pairs).
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <asm/page.h>
+
+#include "mem_pieces.h"
+
+extern struct mem_pieces phys_avail;
+
+static void mem_pieces_print(struct mem_pieces *);
+
+/*
+ * Scan a region for a piece of a given size with the required alignment.
+ */
+void __init *
+mem_pieces_find(unsigned int size, unsigned int align)
+{
+	int i;
+	unsigned a, e;
+	struct mem_pieces *mp = &phys_avail;
+
+	for (i = 0; i < mp->n_regions; ++i) {
+		a = mp->regions[i].address;
+		e = a + mp->regions[i].size;
+		a = (a + align - 1) & -align;
+		if (a + size <= e) {
+			mem_pieces_remove(mp, a, size, 1);
+			return (void *) __va(a);
+		}
+	}
+	panic("Couldn't find %u bytes at %u alignment\n", size, align);
+
+	return NULL;
+}
+
+/*
+ * Remove some memory from an array of pieces
+ */
+void __init
+mem_pieces_remove(struct mem_pieces *mp, unsigned int start, unsigned int size,
+		  int must_exist)
+{
+	int i, j;
+	unsigned int end, rs, re;
+	struct reg_property *rp;
+
+	end = start + size;
+	for (i = 0, rp = mp->regions; i < mp->n_regions; ++i, ++rp) {
+		if (end > rp->address && start < rp->address + rp->size)
+			break;
+	}
+	if (i >= mp->n_regions) {
+		if (must_exist)
+			printk("mem_pieces_remove: [%x,%x) not in any region\n",
+			       start, end);
+		return;
+	}
+	for (; i < mp->n_regions && end > rp->address; ++i, ++rp) {
+		rs = rp->address;
+		re = rs + rp->size;
+		if (must_exist && (start < rs || end > re)) {
+			printk("mem_pieces_remove: bad overlap [%x,%x) with",
+			       start, end);
+			mem_pieces_print(mp);
+			must_exist = 0;
+		}
+		if (start > rs) {
+			rp->size = start - rs;
+			if (end < re) {
+				/* need to split this entry */
+				if (mp->n_regions >= MEM_PIECES_MAX)
+					panic("eek... mem_pieces overflow");
+				for (j = mp->n_regions; j > i + 1; --j)
+					mp->regions[j] = mp->regions[j-1];
+				++mp->n_regions;
+				rp[1].address = end;
+				rp[1].size = re - end;
+			}
+		} else {
+			if (end < re) {
+				rp->address = end;
+				rp->size = re - end;
+			} else {
+				/* need to delete this entry */
+				for (j = i; j < mp->n_regions - 1; ++j)
+					mp->regions[j] = mp->regions[j+1];
+				--mp->n_regions;
+				--i;
+				--rp;
+			}
+		}
+	}
+}
+
+static void __init
+mem_pieces_print(struct mem_pieces *mp)
+{
+	int i;
+
+	for (i = 0; i < mp->n_regions; ++i)
+		printk(" [%x, %x)", mp->regions[i].address,
+		       mp->regions[i].address + mp->regions[i].size);
+	printk("\n");
+}
+
+void __init
+mem_pieces_sort(struct mem_pieces *mp)
+{
+	unsigned long a, s;
+	int i, j;
+
+	for (i = 1; i < mp->n_regions; ++i) {
+		a = mp->regions[i].address;
+		s = mp->regions[i].size;
+		for (j = i - 1; j >= 0; --j) {
+			if (a >= mp->regions[j].address)
+				break;
+			mp->regions[j+1] = mp->regions[j];
+		}
+		mp->regions[j+1].address = a;
+		mp->regions[j+1].size = s;
+	}
+}
+
+void __init
+mem_pieces_coalesce(struct mem_pieces *mp)
+{
+	unsigned long a, s, ns;
+	int i, j, d;
+
+	d = 0;
+	for (i = 0; i < mp->n_regions; i = j) {
+		a = mp->regions[i].address;
+		s = mp->regions[i].size;
+		for (j = i + 1; j < mp->n_regions
+			     && mp->regions[j].address - a <= s; ++j) {
+			ns = mp->regions[j].address + mp->regions[j].size - a;
+			if (ns > s)
+				s = ns;
+		}
+		mp->regions[d].address = a;
+		mp->regions[d].size = s;
+		++d;
+	}
+	mp->n_regions = d;
+}
diff --git a/arch/powerpc/mm/mem_pieces.h b/arch/powerpc/mm/mem_pieces.h
new file mode 100644
index 0000000..e2b700d
--- /dev/null
+++ b/arch/powerpc/mm/mem_pieces.h
@@ -0,0 +1,48 @@
+/*
+ *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
+ *      Changes to accommodate Power Macintoshes.
+ *    Cort Dougan <cort@cs.nmt.edu>
+ *      Rewrites.
+ *    Grant Erickson <grant@lcse.umn.edu>
+ *      General rework and split from mm/init.c.
+ *
+ *    Module name: mem_pieces.h
+ *
+ *    Description:
+ *      Routines and data structures for manipulating and representing
+ *      phyiscal memory extents (i.e. address/length pairs).
+ *
+ */
+
+#ifndef __MEM_PIECES_H__
+#define	__MEM_PIECES_H__
+
+#include <asm/prom.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* Type Definitions */
+
+#define	MEM_PIECES_MAX	32
+
+struct mem_pieces {
+    int n_regions;
+    struct reg_property regions[MEM_PIECES_MAX];
+};
+
+/* Function Prototypes */
+
+extern void	*mem_pieces_find(unsigned int size, unsigned int align);
+extern void	 mem_pieces_remove(struct mem_pieces *mp, unsigned int start,
+				   unsigned int size, int must_exist);
+extern void	 mem_pieces_coalesce(struct mem_pieces *mp);
+extern void	 mem_pieces_sort(struct mem_pieces *mp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MEM_PIECES_H__ */
diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c
new file mode 100644
index 0000000..a8816e0
--- /dev/null
+++ b/arch/powerpc/mm/mmu_context.c
@@ -0,0 +1,86 @@
+/*
+ * This file contains the routines for handling the MMU on those
+ * PowerPC implementations where the MMU substantially follows the
+ * architecture specification.  This includes the 6xx, 7xx, 7xxx,
+ * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
+ *  -- paulus
+ *
+ *  Derived from arch/ppc/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+
+mm_context_t next_mmu_context;
+unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
+#ifdef FEW_CONTEXTS
+atomic_t nr_free_contexts;
+struct mm_struct *context_mm[LAST_CONTEXT+1];
+void steal_context(void);
+#endif /* FEW_CONTEXTS */
+
+/*
+ * Initialize the context management stuff.
+ */
+void __init
+mmu_context_init(void)
+{
+	/*
+	 * Some processors have too few contexts to reserve one for
+	 * init_mm, and require using context 0 for a normal task.
+	 * Other processors reserve the use of context zero for the kernel.
+	 * This code assumes FIRST_CONTEXT < 32.
+	 */
+	context_map[0] = (1 << FIRST_CONTEXT) - 1;
+	next_mmu_context = FIRST_CONTEXT;
+#ifdef FEW_CONTEXTS
+	atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
+#endif /* FEW_CONTEXTS */
+}
+
+#ifdef FEW_CONTEXTS
+/*
+ * Steal a context from a task that has one at the moment.
+ * This is only used on 8xx and 4xx and we presently assume that
+ * they don't do SMP.  If they do then this will have to check
+ * whether the MM we steal is in use.
+ * We also assume that this is only used on systems that don't
+ * use an MMU hash table - this is true for 8xx and 4xx.
+ * This isn't an LRU system, it just frees up each context in
+ * turn (sort-of pseudo-random replacement :).  This would be the
+ * place to implement an LRU scheme if anyone was motivated to do it.
+ *  -- paulus
+ */
+void
+steal_context(void)
+{
+	struct mm_struct *mm;
+
+	/* free up context `next_mmu_context' */
+	/* if we shouldn't free context 0, don't... */
+	if (next_mmu_context < FIRST_CONTEXT)
+		next_mmu_context = FIRST_CONTEXT;
+	mm = context_mm[next_mmu_context];
+	flush_tlb_mm(mm);
+	destroy_context(mm);
+}
+#endif /* FEW_CONTEXTS */
diff --git a/arch/powerpc/mm/mmu_context64.c b/arch/powerpc/mm/mmu_context64.c
new file mode 100644
index 0000000..714a84d
--- /dev/null
+++ b/arch/powerpc/mm/mmu_context64.c
@@ -0,0 +1,63 @@
+/*
+ *  MMU context allocation for 64-bit kernels.
+ *
+ *  Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+
+#include <asm/mmu_context.h>
+
+static DEFINE_SPINLOCK(mmu_context_lock);
+static DEFINE_IDR(mmu_context_idr);
+
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+	int index;
+	int err;
+
+again:
+	if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
+		return -ENOMEM;
+
+	spin_lock(&mmu_context_lock);
+	err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
+	spin_unlock(&mmu_context_lock);
+
+	if (err == -EAGAIN)
+		goto again;
+	else if (err)
+		return err;
+
+	if (index > MAX_CONTEXT) {
+		idr_remove(&mmu_context_idr, index);
+		return -ENOMEM;
+	}
+
+	mm->context.id = index;
+
+	return 0;
+}
+
+void destroy_context(struct mm_struct *mm)
+{
+	spin_lock(&mmu_context_lock);
+	idr_remove(&mmu_context_idr, mm->context.id);
+	spin_unlock(&mmu_context_lock);
+
+	mm->context.id = NO_CONTEXT;
+}
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
new file mode 100644
index 0000000..540f329
--- /dev/null
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -0,0 +1,85 @@
+/*
+ * Declarations of procedures and variables shared between files
+ * in arch/ppc/mm/.
+ *
+ *  Derived from arch/ppc/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+#include <asm/tlbflush.h>
+#include <asm/mmu.h>
+
+extern void mapin_ram(void);
+extern int map_page(unsigned long va, phys_addr_t pa, int flags);
+extern void setbat(int index, unsigned long virt, unsigned long phys,
+		   unsigned int size, int flags);
+extern void reserve_phys_mem(unsigned long start, unsigned long size);
+extern void settlbcam(int index, unsigned long virt, phys_addr_t phys,
+		      unsigned int size, int flags, unsigned int pid);
+extern void invalidate_tlbcam_entry(int index);
+
+extern int __map_without_bats;
+extern unsigned long ioremap_base;
+extern unsigned long ioremap_bot;
+extern unsigned int rtas_data, rtas_size;
+
+extern unsigned long total_memory;
+extern unsigned long total_lowmem;
+extern int mem_init_done;
+
+extern PTE *Hash, *Hash_end;
+extern unsigned long Hash_size, Hash_mask;
+
+extern unsigned int num_tlbcam_entries;
+
+/* ...and now those things that may be slightly different between processor
+ * architectures.  -- Dan
+ */
+#if defined(CONFIG_8xx)
+#define flush_HPTE(X, va, pg)	_tlbie(va)
+#define MMU_init_hw()		do { } while(0)
+#define mmu_mapin_ram()		(0UL)
+
+#elif defined(CONFIG_4xx)
+#define flush_HPTE(X, va, pg)	_tlbie(va)
+extern void MMU_init_hw(void);
+extern unsigned long mmu_mapin_ram(void);
+
+#elif defined(CONFIG_FSL_BOOKE)
+#define flush_HPTE(X, va, pg)	_tlbie(va)
+extern void MMU_init_hw(void);
+extern unsigned long mmu_mapin_ram(void);
+extern void adjust_total_lowmem(void);
+
+#else
+/* anything except 4xx or 8xx */
+extern void MMU_init_hw(void);
+extern unsigned long mmu_mapin_ram(void);
+
+/* Be careful....this needs to be updated if we ever encounter 603 SMPs,
+ * which includes all new 82xx processors.  We need tlbie/tlbsync here
+ * in that case (I think). -- Dan.
+ */
+static inline void flush_HPTE(unsigned context, unsigned long va,
+			      unsigned long pdval)
+{
+	if ((Hash != 0) &&
+	    cpu_has_feature(CPU_FTR_HPTE_TABLE))
+		flush_hash_pages(0, va, pdval, 1);
+	else
+		_tlbie(va);
+}
+#endif
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
new file mode 100644
index 0000000..81a3d74
--- /dev/null
+++ b/arch/powerpc/mm/pgtable.c
@@ -0,0 +1,470 @@
+/*
+ * This file contains the routines setting up the linux page tables.
+ *  -- paulus
+ *
+ *  Derived from arch/ppc/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/io.h>
+
+#include "mmu_decl.h"
+
+unsigned long ioremap_base;
+unsigned long ioremap_bot;
+int io_bat_index;
+
+#if defined(CONFIG_6xx) || defined(CONFIG_POWER3)
+#define HAVE_BATS	1
+#endif
+
+#if defined(CONFIG_FSL_BOOKE)
+#define HAVE_TLBCAM	1
+#endif
+
+extern char etext[], _stext[];
+
+#ifdef CONFIG_SMP
+extern void hash_page_sync(void);
+#endif
+
+#ifdef HAVE_BATS
+extern unsigned long v_mapped_by_bats(unsigned long va);
+extern unsigned long p_mapped_by_bats(unsigned long pa);
+void setbat(int index, unsigned long virt, unsigned long phys,
+	    unsigned int size, int flags);
+
+#else /* !HAVE_BATS */
+#define v_mapped_by_bats(x)	(0UL)
+#define p_mapped_by_bats(x)	(0UL)
+#endif /* HAVE_BATS */
+
+#ifdef HAVE_TLBCAM
+extern unsigned int tlbcam_index;
+extern unsigned long v_mapped_by_tlbcam(unsigned long va);
+extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
+#else /* !HAVE_TLBCAM */
+#define v_mapped_by_tlbcam(x)	(0UL)
+#define p_mapped_by_tlbcam(x)	(0UL)
+#endif /* HAVE_TLBCAM */
+
+#ifdef CONFIG_PTE_64BIT
+/* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */
+#define PGDIR_ORDER	1
+#else
+#define PGDIR_ORDER	0
+#endif
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	pgd_t *ret;
+
+	ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER);
+	return ret;
+}
+
+void pgd_free(pgd_t *pgd)
+{
+	free_pages((unsigned long)pgd, PGDIR_ORDER);
+}
+
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+{
+	pte_t *pte;
+	extern int mem_init_done;
+	extern void *early_get_page(void);
+
+	if (mem_init_done) {
+		pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+	} else {
+		pte = (pte_t *)early_get_page();
+		if (pte)
+			clear_page(pte);
+	}
+	return pte;
+}
+
+struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+	struct page *ptepage;
+
+#ifdef CONFIG_HIGHPTE
+	int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
+#else
+	int flags = GFP_KERNEL | __GFP_REPEAT;
+#endif
+
+	ptepage = alloc_pages(flags, 0);
+	if (ptepage)
+		clear_highpage(ptepage);
+	return ptepage;
+}
+
+void pte_free_kernel(pte_t *pte)
+{
+#ifdef CONFIG_SMP
+	hash_page_sync();
+#endif
+	free_page((unsigned long)pte);
+}
+
+void pte_free(struct page *ptepage)
+{
+#ifdef CONFIG_SMP
+	hash_page_sync();
+#endif
+	__free_page(ptepage);
+}
+
+#ifndef CONFIG_PHYS_64BIT
+void __iomem *
+ioremap(phys_addr_t addr, unsigned long size)
+{
+	return __ioremap(addr, size, _PAGE_NO_CACHE);
+}
+#else /* CONFIG_PHYS_64BIT */
+void __iomem *
+ioremap64(unsigned long long addr, unsigned long size)
+{
+	return __ioremap(addr, size, _PAGE_NO_CACHE);
+}
+
+void __iomem *
+ioremap(phys_addr_t addr, unsigned long size)
+{
+	phys_addr_t addr64 = fixup_bigphys_addr(addr, size);
+
+	return ioremap64(addr64, size);
+}
+#endif /* CONFIG_PHYS_64BIT */
+
+void __iomem *
+__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
+{
+	unsigned long v, i;
+	phys_addr_t p;
+	int err;
+
+	/*
+	 * Choose an address to map it to.
+	 * Once the vmalloc system is running, we use it.
+	 * Before then, we use space going down from ioremap_base
+	 * (ioremap_bot records where we're up to).
+	 */
+	p = addr & PAGE_MASK;
+	size = PAGE_ALIGN(addr + size) - p;
+
+	/*
+	 * If the address lies within the first 16 MB, assume it's in ISA
+	 * memory space
+	 */
+	if (p < 16*1024*1024)
+		p += _ISA_MEM_BASE;
+
+	/*
+	 * Don't allow anybody to remap normal RAM that we're using.
+	 * mem_init() sets high_memory so only do the check after that.
+	 */
+	if ( mem_init_done && (p < virt_to_phys(high_memory)) )
+	{
+		printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p,
+		       __builtin_return_address(0));
+		return NULL;
+	}
+
+	if (size == 0)
+		return NULL;
+
+	/*
+	 * Is it already mapped?  Perhaps overlapped by a previous
+	 * BAT mapping.  If the whole area is mapped then we're done,
+	 * otherwise remap it since we want to keep the virt addrs for
+	 * each request contiguous.
+	 *
+	 * We make the assumption here that if the bottom and top
+	 * of the range we want are mapped then it's mapped to the
+	 * same virt address (and this is contiguous).
+	 *  -- Cort
+	 */
+	if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ )
+		goto out;
+
+	if ((v = p_mapped_by_tlbcam(p)))
+		goto out;
+
+	if (mem_init_done) {
+		struct vm_struct *area;
+		area = get_vm_area(size, VM_IOREMAP);
+		if (area == 0)
+			return NULL;
+		v = (unsigned long) area->addr;
+	} else {
+		v = (ioremap_bot -= size);
+	}
+
+	if ((flags & _PAGE_PRESENT) == 0)
+		flags |= _PAGE_KERNEL;
+	if (flags & _PAGE_NO_CACHE)
+		flags |= _PAGE_GUARDED;
+
+	/*
+	 * Should check if it is a candidate for a BAT mapping
+	 */
+
+	err = 0;
+	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
+		err = map_page(v+i, p+i, flags);
+	if (err) {
+		if (mem_init_done)
+			vunmap((void *)v);
+		return NULL;
+	}
+
+out:
+	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
+}
+
+void iounmap(volatile void __iomem *addr)
+{
+	/*
+	 * If mapped by BATs then there is nothing to do.
+	 * Calling vfree() generates a benign warning.
+	 */
+	if (v_mapped_by_bats((unsigned long)addr)) return;
+
+	if (addr > high_memory && (unsigned long) addr < ioremap_bot)
+		vunmap((void *) (PAGE_MASK & (unsigned long)addr));
+}
+
+void __iomem *ioport_map(unsigned long port, unsigned int len)
+{
+	return (void __iomem *) (port + _IO_BASE);
+}
+
+void ioport_unmap(void __iomem *addr)
+{
+	/* Nothing to do */
+}
+EXPORT_SYMBOL(ioport_map);
+EXPORT_SYMBOL(ioport_unmap);
+
+int
+map_page(unsigned long va, phys_addr_t pa, int flags)
+{
+	pmd_t *pd;
+	pte_t *pg;
+	int err = -ENOMEM;
+
+	spin_lock(&init_mm.page_table_lock);
+	/* Use upper 10 bits of VA to index the first level map */
+	pd = pmd_offset(pgd_offset_k(va), va);
+	/* Use middle 10 bits of VA to index the second-level map */
+	pg = pte_alloc_kernel(&init_mm, pd, va);
+	if (pg != 0) {
+		err = 0;
+		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
+		if (mem_init_done)
+			flush_HPTE(0, va, pmd_val(*pd));
+	}
+	spin_unlock(&init_mm.page_table_lock);
+	return err;
+}
+
+/*
+ * Map in all of physical memory starting at KERNELBASE.
+ */
+void __init mapin_ram(void)
+{
+	unsigned long v, p, s, f;
+
+	s = mmu_mapin_ram();
+	v = KERNELBASE + s;
+	p = PPC_MEMSTART + s;
+	for (; s < total_lowmem; s += PAGE_SIZE) {
+		if ((char *) v >= _stext && (char *) v < etext)
+			f = _PAGE_RAM_TEXT;
+		else
+			f = _PAGE_RAM;
+		map_page(v, p, f);
+		v += PAGE_SIZE;
+		p += PAGE_SIZE;
+	}
+}
+
+/* is x a power of 2? */
+#define is_power_of_2(x)	((x) != 0 && (((x) & ((x) - 1)) == 0))
+
+/* is x a power of 4? */
+#define is_power_of_4(x)	((x) != 0 && (((x) & (x-1)) == 0) && (ffs(x) & 1))
+
+/*
+ * Set up a mapping for a block of I/O.
+ * virt, phys, size must all be page-aligned.
+ * This should only be called before ioremap is called.
+ */
+void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
+			     unsigned int size, int flags)
+{
+	int i;
+
+	if (virt > KERNELBASE && virt < ioremap_bot)
+		ioremap_bot = ioremap_base = virt;
+
+#ifdef HAVE_BATS
+	/*
+	 * Use a BAT for this if possible...
+	 */
+	if (io_bat_index < 2 && is_power_of_2(size)
+	    && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
+		setbat(io_bat_index, virt, phys, size, flags);
+		++io_bat_index;
+		return;
+	}
+#endif /* HAVE_BATS */
+
+#ifdef HAVE_TLBCAM
+	/*
+	 * Use a CAM for this if possible...
+	 */
+	if (tlbcam_index < num_tlbcam_entries && is_power_of_4(size)
+	    && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
+		settlbcam(tlbcam_index, virt, phys, size, flags, 0);
+		++tlbcam_index;
+		return;
+	}
+#endif /* HAVE_TLBCAM */
+
+	/* No BATs available, put it in the page tables. */
+	for (i = 0; i < size; i += PAGE_SIZE)
+		map_page(virt + i, phys + i, flags);
+}
+
+/* Scan the real Linux page tables and return a PTE pointer for
+ * a virtual address in a context.
+ * Returns true (1) if PTE was found, zero otherwise.  The pointer to
+ * the PTE pointer is unmodified if PTE is not found.
+ */
+int
+get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
+{
+        pgd_t	*pgd;
+        pmd_t	*pmd;
+        pte_t	*pte;
+        int     retval = 0;
+
+        pgd = pgd_offset(mm, addr & PAGE_MASK);
+        if (pgd) {
+                pmd = pmd_offset(pgd, addr & PAGE_MASK);
+                if (pmd_present(*pmd)) {
+                        pte = pte_offset_map(pmd, addr & PAGE_MASK);
+                        if (pte) {
+				retval = 1;
+				*ptep = pte;
+				/* XXX caller needs to do pte_unmap, yuck */
+                        }
+                }
+        }
+        return(retval);
+}
+
+/* Find physical address for this virtual address.  Normally used by
+ * I/O functions, but anyone can call it.
+ */
+unsigned long iopa(unsigned long addr)
+{
+	unsigned long pa;
+
+	/* I don't know why this won't work on PMacs or CHRP.  It
+	 * appears there is some bug, or there is some implicit
+	 * mapping done not properly represented by BATs or in page
+	 * tables.......I am actively working on resolving this, but
+	 * can't hold up other stuff.  -- Dan
+	 */
+	pte_t *pte;
+	struct mm_struct *mm;
+
+	/* Check the BATs */
+	pa = v_mapped_by_bats(addr);
+	if (pa)
+		return pa;
+
+	/* Allow mapping of user addresses (within the thread)
+	 * for DMA if necessary.
+	 */
+	if (addr < TASK_SIZE)
+		mm = current->mm;
+	else
+		mm = &init_mm;
+
+	pa = 0;
+	if (get_pteptr(mm, addr, &pte)) {
+		pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
+		pte_unmap(pte);
+	}
+
+	return(pa);
+}
+
+/* This is will find the virtual address for a physical one....
+ * Swiped from APUS, could be dangerous :-).
+ * This is only a placeholder until I really find a way to make this
+ * work.  -- Dan
+ */
+unsigned long
+mm_ptov (unsigned long paddr)
+{
+	unsigned long ret;
+#if 0
+	if (paddr < 16*1024*1024)
+		ret = ZTWO_VADDR(paddr);
+	else {
+		int i;
+
+		for (i = 0; i < kmap_chunk_count;){
+			unsigned long phys = kmap_chunks[i++];
+			unsigned long size = kmap_chunks[i++];
+			unsigned long virt = kmap_chunks[i++];
+			if (paddr >= phys
+			    && paddr < (phys + size)){
+				ret = virt + paddr - phys;
+				goto exit;
+			}
+		}
+	
+		ret = (unsigned long) __va(paddr);
+	}
+exit:
+#ifdef DEBUGPV
+	printk ("PTOV(%lx)=%lx\n", paddr, ret);
+#endif
+#else
+	ret = (unsigned long)paddr + KERNELBASE;
+#endif
+	return ret;
+}
+
diff --git a/arch/powerpc/mm/pgtable64.c b/arch/powerpc/mm/pgtable64.c
new file mode 100644
index 0000000..724f97e
--- /dev/null
+++ b/arch/powerpc/mm/pgtable64.c
@@ -0,0 +1,357 @@
+/*
+ *  This file contains ioremap and related functions for 64-bit machines.
+ *
+ *  Derived from arch/ppc64/mm/init.c
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  Dave Engebretsen <engebret@us.ibm.com>
+ *      Rework for PPC64 port.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/idr.h>
+#include <linux/nodemask.h>
+#include <linux/module.h>
+
+#include <asm/pgalloc.h>
+#include <asm/page.h>
+#include <asm/prom.h>
+#include <asm/lmb.h>
+#include <asm/rtas.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/tlb.h>
+#include <asm/eeh.h>
+#include <asm/processor.h>
+#include <asm/mmzone.h>
+#include <asm/cputable.h>
+#include <asm/ppcdebug.h>
+#include <asm/sections.h>
+#include <asm/system.h>
+#include <asm/iommu.h>
+#include <asm/abs_addr.h>
+#include <asm/vdso.h>
+#include <asm/imalloc.h>
+
+#if PGTABLE_RANGE > USER_VSID_RANGE
+#warning Limited user VSID range means pagetable space is wasted
+#endif
+
+#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
+#warning TASK_SIZE is smaller than it needs to be.
+#endif
+
+int mem_init_done;
+unsigned long ioremap_bot = IMALLOC_BASE;
+static unsigned long phbs_io_bot = PHBS_IO_BASE;
+
+extern pgd_t swapper_pg_dir[];
+extern struct task_struct *current_set[NR_CPUS];
+
+unsigned long klimit = (unsigned long)_end;
+
+/* max amount of RAM to use */
+unsigned long __max_memory;
+
+/* info on what we think the IO hole is */
+unsigned long 	io_hole_start;
+unsigned long	io_hole_size;
+
+#ifdef CONFIG_PPC_ISERIES
+
+void __iomem *ioremap(unsigned long addr, unsigned long size)
+{
+	return (void __iomem *)addr;
+}
+
+extern void __iomem *__ioremap(unsigned long addr, unsigned long size,
+		       unsigned long flags)
+{
+	return (void __iomem *)addr;
+}
+
+void iounmap(volatile void __iomem *addr)
+{
+	return;
+}
+
+#else
+
+/*
+ * map_io_page currently only called by __ioremap
+ * map_io_page adds an entry to the ioremap page table
+ * and adds an entry to the HPT, possibly bolting it
+ */
+static int map_io_page(unsigned long ea, unsigned long pa, int flags)
+{
+	pgd_t *pgdp;
+	pud_t *pudp;
+	pmd_t *pmdp;
+	pte_t *ptep;
+	unsigned long vsid;
+
+	if (mem_init_done) {
+		spin_lock(&init_mm.page_table_lock);
+		pgdp = pgd_offset_k(ea);
+		pudp = pud_alloc(&init_mm, pgdp, ea);
+		if (!pudp)
+			return -ENOMEM;
+		pmdp = pmd_alloc(&init_mm, pudp, ea);
+		if (!pmdp)
+			return -ENOMEM;
+		ptep = pte_alloc_kernel(&init_mm, pmdp, ea);
+		if (!ptep)
+			return -ENOMEM;
+		set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
+							  __pgprot(flags)));
+		spin_unlock(&init_mm.page_table_lock);
+	} else {
+		unsigned long va, vpn, hash, hpteg;
+
+		/*
+		 * If the mm subsystem is not fully up, we cannot create a
+		 * linux page table entry for this mapping.  Simply bolt an
+		 * entry in the hardware page table.
+		 */
+		vsid = get_kernel_vsid(ea);
+		va = (vsid << 28) | (ea & 0xFFFFFFF);
+		vpn = va >> PAGE_SHIFT;
+
+		hash = hpt_hash(vpn, 0);
+
+		hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
+
+		/* Panic if a pte grpup is full */
+		if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
+				       HPTE_V_BOLTED,
+				       _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
+		    == -1) {
+			panic("map_io_page: could not insert mapping");
+		}
+	}
+	return 0;
+}
+
+
+static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
+			    unsigned long ea, unsigned long size,
+			    unsigned long flags)
+{
+	unsigned long i;
+
+	if ((flags & _PAGE_PRESENT) == 0)
+		flags |= pgprot_val(PAGE_KERNEL);
+
+	for (i = 0; i < size; i += PAGE_SIZE)
+		if (map_io_page(ea+i, pa+i, flags))
+			return NULL;
+
+	return (void __iomem *) (ea + (addr & ~PAGE_MASK));
+}
+
+
+void __iomem *
+ioremap(unsigned long addr, unsigned long size)
+{
+	return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
+}
+
+void __iomem * __ioremap(unsigned long addr, unsigned long size,
+			 unsigned long flags)
+{
+	unsigned long pa, ea;
+	void __iomem *ret;
+
+	/*
+	 * Choose an address to map it to.
+	 * Once the imalloc system is running, we use it.
+	 * Before that, we map using addresses going
+	 * up from ioremap_bot.  imalloc will use
+	 * the addresses from ioremap_bot through
+	 * IMALLOC_END
+	 * 
+	 */
+	pa = addr & PAGE_MASK;
+	size = PAGE_ALIGN(addr + size) - pa;
+
+	if (size == 0)
+		return NULL;
+
+	if (mem_init_done) {
+		struct vm_struct *area;
+		area = im_get_free_area(size);
+		if (area == NULL)
+			return NULL;
+		ea = (unsigned long)(area->addr);
+		ret = __ioremap_com(addr, pa, ea, size, flags);
+		if (!ret)
+			im_free(area->addr);
+	} else {
+		ea = ioremap_bot;
+		ret = __ioremap_com(addr, pa, ea, size, flags);
+		if (ret)
+			ioremap_bot += size;
+	}
+	return ret;
+}
+
+#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
+
+int __ioremap_explicit(unsigned long pa, unsigned long ea,
+		       unsigned long size, unsigned long flags)
+{
+	struct vm_struct *area;
+	void __iomem *ret;
+	
+	/* For now, require page-aligned values for pa, ea, and size */
+	if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
+	    !IS_PAGE_ALIGNED(size)) {
+		printk(KERN_ERR	"unaligned value in %s\n", __FUNCTION__);
+		return 1;
+	}
+	
+	if (!mem_init_done) {
+		/* Two things to consider in this case:
+		 * 1) No records will be kept (imalloc, etc) that the region
+		 *    has been remapped
+		 * 2) It won't be easy to iounmap() the region later (because
+		 *    of 1)
+		 */
+		;
+	} else {
+		area = im_get_area(ea, size,
+			IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
+		if (area == NULL) {
+			/* Expected when PHB-dlpar is in play */
+			return 1;
+		}
+		if (ea != (unsigned long) area->addr) {
+			printk(KERN_ERR "unexpected addr return from "
+			       "im_get_area\n");
+			return 1;
+		}
+	}
+	
+	ret = __ioremap_com(pa, pa, ea, size, flags);
+	if (ret == NULL) {
+		printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
+		return 1;
+	}
+	if (ret != (void *) ea) {
+		printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
+		return 1;
+	}
+
+	return 0;
+}
+
+/*  
+ * Unmap an IO region and remove it from imalloc'd list.
+ * Access to IO memory should be serialized by driver.
+ * This code is modeled after vmalloc code - unmap_vm_area()
+ *
+ * XXX	what about calls before mem_init_done (ie python_countermeasures())
+ */
+void iounmap(volatile void __iomem *token)
+{
+	void *addr;
+
+	if (!mem_init_done)
+		return;
+	
+	addr = (void *) ((unsigned long __force) token & PAGE_MASK);
+
+	im_free(addr);
+}
+
+static int iounmap_subset_regions(unsigned long addr, unsigned long size)
+{
+	struct vm_struct *area;
+
+	/* Check whether subsets of this region exist */
+	area = im_get_area(addr, size, IM_REGION_SUPERSET);
+	if (area == NULL)
+		return 1;
+
+	while (area) {
+		iounmap((void __iomem *) area->addr);
+		area = im_get_area(addr, size,
+				IM_REGION_SUPERSET);
+	}
+
+	return 0;
+}
+
+int iounmap_explicit(volatile void __iomem *start, unsigned long size)
+{
+	struct vm_struct *area;
+	unsigned long addr;
+	int rc;
+	
+	addr = (unsigned long __force) start & PAGE_MASK;
+
+	/* Verify that the region either exists or is a subset of an existing
+	 * region.  In the latter case, split the parent region to create 
+	 * the exact region 
+	 */
+	area = im_get_area(addr, size, 
+			    IM_REGION_EXISTS | IM_REGION_SUBSET);
+	if (area == NULL) {
+		/* Determine whether subset regions exist.  If so, unmap */
+		rc = iounmap_subset_regions(addr, size);
+		if (rc) {
+			printk(KERN_ERR
+			       "%s() cannot unmap nonexistent range 0x%lx\n",
+ 				__FUNCTION__, addr);
+			return 1;
+		}
+	} else {
+		iounmap((void __iomem *) area->addr);
+	}
+	/*
+	 * FIXME! This can't be right:
+	iounmap(area->addr);
+	 * Maybe it should be "iounmap(area);"
+	 */
+	return 0;
+}
+
+#endif
+
+EXPORT_SYMBOL(ioremap);
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/powerpc/mm/ppc_mmu.c b/arch/powerpc/mm/ppc_mmu.c
new file mode 100644
index 0000000..9a381ed5
--- /dev/null
+++ b/arch/powerpc/mm/ppc_mmu.c
@@ -0,0 +1,296 @@
+/*
+ * This file contains the routines for handling the MMU on those
+ * PowerPC implementations where the MMU substantially follows the
+ * architecture specification.  This includes the 6xx, 7xx, 7xxx,
+ * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
+ *  -- paulus
+ *
+ *  Derived from arch/ppc/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+
+#include <asm/prom.h>
+#include <asm/mmu.h>
+#include <asm/machdep.h>
+
+#include "mmu_decl.h"
+#include "mem_pieces.h"
+
+PTE *Hash, *Hash_end;
+unsigned long Hash_size, Hash_mask;
+unsigned long _SDR1;
+
+union ubat {			/* BAT register values to be loaded */
+	BAT	bat;
+#ifdef CONFIG_PPC64BRIDGE
+	u64	word[2];
+#else
+	u32	word[2];
+#endif
+} BATS[4][2];			/* 4 pairs of IBAT, DBAT */
+
+struct batrange {		/* stores address ranges mapped by BATs */
+	unsigned long start;
+	unsigned long limit;
+	unsigned long phys;
+} bat_addrs[4];
+
+/*
+ * Return PA for this VA if it is mapped by a BAT, or 0
+ */
+unsigned long v_mapped_by_bats(unsigned long va)
+{
+	int b;
+	for (b = 0; b < 4; ++b)
+		if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
+			return bat_addrs[b].phys + (va - bat_addrs[b].start);
+	return 0;
+}
+
+/*
+ * Return VA for a given PA or 0 if not mapped
+ */
+unsigned long p_mapped_by_bats(unsigned long pa)
+{
+	int b;
+	for (b = 0; b < 4; ++b)
+		if (pa >= bat_addrs[b].phys
+	    	    && pa < (bat_addrs[b].limit-bat_addrs[b].start)
+		              +bat_addrs[b].phys)
+			return bat_addrs[b].start+(pa-bat_addrs[b].phys);
+	return 0;
+}
+
+unsigned long __init mmu_mapin_ram(void)
+{
+#ifdef CONFIG_POWER4
+	return 0;
+#else
+	unsigned long tot, bl, done;
+	unsigned long max_size = (256<<20);
+	unsigned long align;
+
+	if (__map_without_bats)
+		return 0;
+
+	/* Set up BAT2 and if necessary BAT3 to cover RAM. */
+
+	/* Make sure we don't map a block larger than the
+	   smallest alignment of the physical address. */
+	/* alignment of PPC_MEMSTART */
+	align = ~(PPC_MEMSTART-1) & PPC_MEMSTART;
+	/* set BAT block size to MIN(max_size, align) */
+	if (align && align < max_size)
+		max_size = align;
+
+	tot = total_lowmem;
+	for (bl = 128<<10; bl < max_size; bl <<= 1) {
+		if (bl * 2 > tot)
+			break;
+	}
+
+	setbat(2, KERNELBASE, PPC_MEMSTART, bl, _PAGE_RAM);
+	done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1;
+	if ((done < tot) && !bat_addrs[3].limit) {
+		/* use BAT3 to cover a bit more */
+		tot -= done;
+		for (bl = 128<<10; bl < max_size; bl <<= 1)
+			if (bl * 2 > tot)
+				break;
+		setbat(3, KERNELBASE+done, PPC_MEMSTART+done, bl, _PAGE_RAM);
+		done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1;
+	}
+
+	return done;
+#endif
+}
+
+/*
+ * Set up one of the I/D BAT (block address translation) register pairs.
+ * The parameters are not checked; in particular size must be a power
+ * of 2 between 128k and 256M.
+ */
+void __init setbat(int index, unsigned long virt, unsigned long phys,
+		   unsigned int size, int flags)
+{
+	unsigned int bl;
+	int wimgxpp;
+	union ubat *bat = BATS[index];
+
+	if (((flags & _PAGE_NO_CACHE) == 0) &&
+	    cpu_has_feature(CPU_FTR_NEED_COHERENT))
+		flags |= _PAGE_COHERENT;
+
+	bl = (size >> 17) - 1;
+	if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
+		/* 603, 604, etc. */
+		/* Do DBAT first */
+		wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
+				   | _PAGE_COHERENT | _PAGE_GUARDED);
+		wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
+		bat[1].word[0] = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
+		bat[1].word[1] = phys | wimgxpp;
+#ifndef CONFIG_KGDB /* want user access for breakpoints */
+		if (flags & _PAGE_USER)
+#endif
+			bat[1].bat.batu.vp = 1;
+		if (flags & _PAGE_GUARDED) {
+			/* G bit must be zero in IBATs */
+			bat[0].word[0] = bat[0].word[1] = 0;
+		} else {
+			/* make IBAT same as DBAT */
+			bat[0] = bat[1];
+		}
+	} else {
+		/* 601 cpu */
+		if (bl > BL_8M)
+			bl = BL_8M;
+		wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
+				   | _PAGE_COHERENT);
+		wimgxpp |= (flags & _PAGE_RW)?
+			((flags & _PAGE_USER)? PP_RWRW: PP_RWXX): PP_RXRX;
+		bat->word[0] = virt | wimgxpp | 4;	/* Ks=0, Ku=1 */
+		bat->word[1] = phys | bl | 0x40;	/* V=1 */
+	}
+
+	bat_addrs[index].start = virt;
+	bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
+	bat_addrs[index].phys = phys;
+}
+
+/*
+ * Initialize the hash table and patch the instructions in hashtable.S.
+ */
+void __init MMU_init_hw(void)
+{
+	unsigned int hmask, mb, mb2;
+	unsigned int n_hpteg, lg_n_hpteg;
+
+	extern unsigned int hash_page_patch_A[];
+	extern unsigned int hash_page_patch_B[], hash_page_patch_C[];
+	extern unsigned int hash_page[];
+	extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[];
+
+	if (!cpu_has_feature(CPU_FTR_HPTE_TABLE)) {
+		/*
+		 * Put a blr (procedure return) instruction at the
+		 * start of hash_page, since we can still get DSI
+		 * exceptions on a 603.
+		 */
+		hash_page[0] = 0x4e800020;
+		flush_icache_range((unsigned long) &hash_page[0],
+				   (unsigned long) &hash_page[1]);
+		return;
+	}
+
+	if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
+
+#ifdef CONFIG_PPC64BRIDGE
+#define LG_HPTEG_SIZE	7		/* 128 bytes per HPTEG */
+#define SDR1_LOW_BITS	(lg_n_hpteg - 11)
+#define MIN_N_HPTEG	2048		/* min 256kB hash table */
+#else
+#define LG_HPTEG_SIZE	6		/* 64 bytes per HPTEG */
+#define SDR1_LOW_BITS	((n_hpteg - 1) >> 10)
+#define MIN_N_HPTEG	1024		/* min 64kB hash table */
+#endif
+
+#ifdef CONFIG_POWER4
+	/* The hash table has already been allocated and initialized
+	   in prom.c */
+	n_hpteg = Hash_size >> LG_HPTEG_SIZE;
+	lg_n_hpteg = __ilog2(n_hpteg);
+
+	/* Remove the hash table from the available memory */
+	if (Hash)
+		reserve_phys_mem(__pa(Hash), Hash_size);
+
+#else /* CONFIG_POWER4 */
+	/*
+	 * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
+	 * This is less than the recommended amount, but then
+	 * Linux ain't AIX.
+	 */
+	n_hpteg = total_memory / (PAGE_SIZE * 8);
+	if (n_hpteg < MIN_N_HPTEG)
+		n_hpteg = MIN_N_HPTEG;
+	lg_n_hpteg = __ilog2(n_hpteg);
+	if (n_hpteg & (n_hpteg - 1)) {
+		++lg_n_hpteg;		/* round up if not power of 2 */
+		n_hpteg = 1 << lg_n_hpteg;
+	}
+	Hash_size = n_hpteg << LG_HPTEG_SIZE;
+
+	/*
+	 * Find some memory for the hash table.
+	 */
+	if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
+	Hash = mem_pieces_find(Hash_size, Hash_size);
+	cacheable_memzero(Hash, Hash_size);
+	_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
+#endif /* CONFIG_POWER4 */
+
+	Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);
+
+	printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n",
+	       total_memory >> 20, Hash_size >> 10, Hash);
+
+
+	/*
+	 * Patch up the instructions in hashtable.S:create_hpte
+	 */
+	if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345);
+	Hash_mask = n_hpteg - 1;
+	hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
+	mb2 = mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
+	if (lg_n_hpteg > 16)
+		mb2 = 16 - LG_HPTEG_SIZE;
+
+	hash_page_patch_A[0] = (hash_page_patch_A[0] & ~0xffff)
+		| ((unsigned int)(Hash) >> 16);
+	hash_page_patch_A[1] = (hash_page_patch_A[1] & ~0x7c0) | (mb << 6);
+	hash_page_patch_A[2] = (hash_page_patch_A[2] & ~0x7c0) | (mb2 << 6);
+	hash_page_patch_B[0] = (hash_page_patch_B[0] & ~0xffff) | hmask;
+	hash_page_patch_C[0] = (hash_page_patch_C[0] & ~0xffff) | hmask;
+
+	/*
+	 * Ensure that the locations we've patched have been written
+	 * out from the data cache and invalidated in the instruction
+	 * cache, on those machines with split caches.
+	 */
+	flush_icache_range((unsigned long) &hash_page_patch_A[0],
+			   (unsigned long) &hash_page_patch_C[1]);
+
+	/*
+	 * Patch up the instructions in hashtable.S:flush_hash_page
+	 */
+	flush_hash_patch_A[0] = (flush_hash_patch_A[0] & ~0xffff)
+		| ((unsigned int)(Hash) >> 16);
+	flush_hash_patch_A[1] = (flush_hash_patch_A[1] & ~0x7c0) | (mb << 6);
+	flush_hash_patch_A[2] = (flush_hash_patch_A[2] & ~0x7c0) | (mb2 << 6);
+	flush_hash_patch_B[0] = (flush_hash_patch_B[0] & ~0xffff) | hmask;
+	flush_icache_range((unsigned long) &flush_hash_patch_A[0],
+			   (unsigned long) &flush_hash_patch_B[1]);
+
+	if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205);
+}
diff --git a/arch/powerpc/mm/tlb.c b/arch/powerpc/mm/tlb.c
new file mode 100644
index 0000000..6c3dc3c
--- /dev/null
+++ b/arch/powerpc/mm/tlb.c
@@ -0,0 +1,183 @@
+/*
+ * This file contains the routines for TLB flushing.
+ * On machines where the MMU uses a hash table to store virtual to
+ * physical translations, these routines flush entries from the
+ * hash table also.
+ *  -- paulus
+ *
+ *  Derived from arch/ppc/mm/init.c:
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <asm/tlbflush.h>
+#include <asm/tlb.h>
+
+#include "mmu_decl.h"
+
+/*
+ * Called when unmapping pages to flush entries from the TLB/hash table.
+ */
+void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
+{
+	unsigned long ptephys;
+
+	if (Hash != 0) {
+		ptephys = __pa(ptep) & PAGE_MASK;
+		flush_hash_pages(mm->context, addr, ptephys, 1);
+	}
+}
+
+/*
+ * Called by ptep_set_access_flags, must flush on CPUs for which the
+ * DSI handler can't just "fixup" the TLB on a write fault
+ */
+void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr)
+{
+	if (Hash != 0)
+		return;
+	_tlbie(addr);
+}
+
+/*
+ * Called at the end of a mmu_gather operation to make sure the
+ * TLB flush is completely done.
+ */
+void tlb_flush(struct mmu_gather *tlb)
+{
+	if (Hash == 0) {
+		/*
+		 * 603 needs to flush the whole TLB here since
+		 * it doesn't use a hash table.
+		 */
+		_tlbia();
+	}
+}
+
+/*
+ * TLB flushing:
+ *
+ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ *  - flush_tlb_page(vma, vmaddr) flushes one page
+ *  - flush_tlb_range(vma, start, end) flushes a range of pages
+ *  - flush_tlb_kernel_range(start, end) flushes kernel pages
+ *
+ * since the hardware hash table functions as an extension of the
+ * tlb as far as the linux tables are concerned, flush it too.
+ *    -- Cort
+ */
+
+/*
+ * 750 SMP is a Bad Idea because the 750 doesn't broadcast all
+ * the cache operations on the bus.  Hence we need to use an IPI
+ * to get the other CPU(s) to invalidate their TLBs.
+ */
+#ifdef CONFIG_SMP_750
+#define FINISH_FLUSH	smp_send_tlb_invalidate(0)
+#else
+#define FINISH_FLUSH	do { } while (0)
+#endif
+
+static void flush_range(struct mm_struct *mm, unsigned long start,
+			unsigned long end)
+{
+	pmd_t *pmd;
+	unsigned long pmd_end;
+	int count;
+	unsigned int ctx = mm->context;
+
+	if (Hash == 0) {
+		_tlbia();
+		return;
+	}
+	start &= PAGE_MASK;
+	if (start >= end)
+		return;
+	end = (end - 1) | ~PAGE_MASK;
+	pmd = pmd_offset(pgd_offset(mm, start), start);
+	for (;;) {
+		pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
+		if (pmd_end > end)
+			pmd_end = end;
+		if (!pmd_none(*pmd)) {
+			count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
+			flush_hash_pages(ctx, start, pmd_val(*pmd), count);
+		}
+		if (pmd_end == end)
+			break;
+		start = pmd_end + 1;
+		++pmd;
+	}
+}
+
+/*
+ * Flush kernel TLB entries in the given range
+ */
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+	flush_range(&init_mm, start, end);
+	FINISH_FLUSH;
+}
+
+/*
+ * Flush all the (user) entries for the address space described by mm.
+ */
+void flush_tlb_mm(struct mm_struct *mm)
+{
+	struct vm_area_struct *mp;
+
+	if (Hash == 0) {
+		_tlbia();
+		return;
+	}
+
+	for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
+		flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
+	FINISH_FLUSH;
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+	struct mm_struct *mm;
+	pmd_t *pmd;
+
+	if (Hash == 0) {
+		_tlbie(vmaddr);
+		return;
+	}
+	mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
+	pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr);
+	if (!pmd_none(*pmd))
+		flush_hash_pages(mm->context, vmaddr, pmd_val(*pmd), 1);
+	FINISH_FLUSH;
+}
+
+/*
+ * For each address in the range, find the pte for the address
+ * and check _PAGE_HASHPTE bit; if it is set, find and destroy
+ * the corresponding HPTE.
+ */
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+		     unsigned long end)
+{
+	flush_range(vma->vm_mm, start, end);
+	FINISH_FLUSH;
+}
diff --git a/arch/ppc/oprofile/Kconfig b/arch/powerpc/oprofile/Kconfig
similarity index 100%
rename from arch/ppc/oprofile/Kconfig
rename to arch/powerpc/oprofile/Kconfig
diff --git a/arch/ppc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile
similarity index 69%
rename from arch/ppc/oprofile/Makefile
rename to arch/powerpc/oprofile/Makefile
index e2218d3..0782d0c 100644
--- a/arch/ppc/oprofile/Makefile
+++ b/arch/powerpc/oprofile/Makefile
@@ -7,8 +7,5 @@
 		timer_int.o )
 
 oprofile-y := $(DRIVER_OBJS) common.o
-
-ifeq ($(CONFIG_FSL_BOOKE),y)
-	oprofile-y += op_model_fsl_booke.o
-endif
-
+oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o
+oprofile-$(CONFIG_FSL_BOOKE) += op_model_fsl_booke.o
diff --git a/arch/ppc64/oprofile/common.c b/arch/powerpc/oprofile/common.c
similarity index 60%
rename from arch/ppc64/oprofile/common.c
rename to arch/powerpc/oprofile/common.c
index e5f5727..0ec12c8 100644
--- a/arch/ppc64/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -1,5 +1,9 @@
 /*
+ * PPC 64 oprofile support:
  * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
+ * PPC 32 oprofile support: (based on PPC 64 support)
+ * Copyright (C) Freescale Semiconductor, Inc 2004
+ *	Author: Andy Fleming
  *
  * Based on alpha version.
  *
@@ -10,26 +14,37 @@
  */
 
 #include <linux/oprofile.h>
+#ifndef __powerpc64__
+#include <linux/slab.h>
+#endif /* ! __powerpc64__ */
 #include <linux/init.h>
 #include <linux/smp.h>
 #include <linux/errno.h>
 #include <asm/ptrace.h>
 #include <asm/system.h>
+#ifdef __powerpc64__
 #include <asm/pmc.h>
+#else /* __powerpc64__ */
+#include <asm/perfmon.h>
+#endif /* __powerpc64__ */
 #include <asm/cputable.h>
 #include <asm/oprofile_impl.h>
 
-static struct op_ppc64_model *model;
+static struct op_powerpc_model *model;
 
 static struct op_counter_config ctr[OP_MAX_COUNTER];
 static struct op_system_config sys;
 
+#ifndef __powerpc64__
+static char *cpu_type;
+#endif /* ! __powerpc64__ */
+
 static void op_handle_interrupt(struct pt_regs *regs)
 {
 	model->handle_interrupt(regs, ctr);
 }
 
-static int op_ppc64_setup(void)
+static int op_powerpc_setup(void)
 {
 	int err;
 
@@ -42,41 +57,49 @@
 	model->reg_setup(ctr, &sys, model->num_counters);
 
 	/* Configure the registers on all cpus.  */
+#ifdef __powerpc64__
 	on_each_cpu(model->cpu_setup, NULL, 0, 1);
+#else /* __powerpc64__ */
+#if 0
+	/* FIXME: Make multi-cpu work */
+	on_each_cpu(model->reg_setup, NULL, 0, 1);
+#endif
+#endif /* __powerpc64__ */
 
 	return 0;
 }
 
-static void op_ppc64_shutdown(void)
+static void op_powerpc_shutdown(void)
 {
 	release_pmc_hardware();
 }
 
-static void op_ppc64_cpu_start(void *dummy)
+static void op_powerpc_cpu_start(void *dummy)
 {
 	model->start(ctr);
 }
 
-static int op_ppc64_start(void)
+static int op_powerpc_start(void)
 {
-	on_each_cpu(op_ppc64_cpu_start, NULL, 0, 1);
+	on_each_cpu(op_powerpc_cpu_start, NULL, 0, 1);
 	return 0;
 }
 
-static inline void op_ppc64_cpu_stop(void *dummy)
+static inline void op_powerpc_cpu_stop(void *dummy)
 {
 	model->stop();
 }
 
-static void op_ppc64_stop(void)
+static void op_powerpc_stop(void)
 {
-	on_each_cpu(op_ppc64_cpu_stop, NULL, 0, 1);
+	on_each_cpu(op_powerpc_cpu_stop, NULL, 0, 1);
 }
 
-static int op_ppc64_create_files(struct super_block *sb, struct dentry *root)
+static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
 {
 	int i;
 
+#ifdef __powerpc64__
 	/*
 	 * There is one mmcr0, mmcr1 and mmcra for setting the events for
 	 * all of the counters.
@@ -84,6 +107,7 @@
 	oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0);
 	oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1);
 	oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra);
+#endif /* __powerpc64__ */
 
 	for (i = 0; i < model->num_counters; ++i) {
 		struct dentry *dir;
@@ -95,44 +119,70 @@
 		oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
 		oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
 		oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
+#ifdef __powerpc64__
 		/*
 		 * We dont support per counter user/kernel selection, but
 		 * we leave the entries because userspace expects them
 		 */
+#endif /* __powerpc64__ */
 		oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
 		oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
+
+#ifndef __powerpc64__
+		/* FIXME: Not sure if this is used */
+#endif /* ! __powerpc64__ */
 		oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
 	}
 
 	oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
 	oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
+#ifdef __powerpc64__
 	oprofilefs_create_ulong(sb, root, "backtrace_spinlocks",
 				&sys.backtrace_spinlocks);
+#endif /* __powerpc64__ */
 
 	/* Default to tracing both kernel and user */
 	sys.enable_kernel = 1;
 	sys.enable_user = 1;
-
+#ifdef __powerpc64__
 	/* Turn on backtracing through spinlocks by default */
 	sys.backtrace_spinlocks = 1;
+#endif /* __powerpc64__ */
 
 	return 0;
 }
 
 int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
+#ifndef __powerpc64__
+#ifdef CONFIG_FSL_BOOKE
+	model = &op_model_fsl_booke;
+#else
+	return -ENODEV;
+#endif
+
+	cpu_type = kmalloc(32, GFP_KERNEL);
+	if (NULL == cpu_type)
+		return -ENOMEM;
+
+	sprintf(cpu_type, "ppc/%s", cur_cpu_spec->cpu_name);
+
+	model->num_counters = cur_cpu_spec->num_pmcs;
+
+	ops->cpu_type = cpu_type;
+#else /* __powerpc64__ */
 	if (!cur_cpu_spec->oprofile_model || !cur_cpu_spec->oprofile_cpu_type)
 		return -ENODEV;
-
 	model = cur_cpu_spec->oprofile_model;
 	model->num_counters = cur_cpu_spec->num_pmcs;
 
 	ops->cpu_type = cur_cpu_spec->oprofile_cpu_type;
-	ops->create_files = op_ppc64_create_files;
-	ops->setup = op_ppc64_setup;
-	ops->shutdown = op_ppc64_shutdown;
-	ops->start = op_ppc64_start;
-	ops->stop = op_ppc64_stop;
+#endif /* __powerpc64__ */
+	ops->create_files = op_powerpc_create_files;
+	ops->setup = op_powerpc_setup;
+	ops->shutdown = op_powerpc_shutdown;
+	ops->start = op_powerpc_start;
+	ops->stop = op_powerpc_stop;
 
 	printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
 	       ops->cpu_type);
@@ -142,4 +192,8 @@
 
 void oprofile_arch_exit(void)
 {
+#ifndef __powerpc64__
+	kfree(cpu_type);
+	cpu_type = NULL;
+#endif /* ! __powerpc64__ */
 }
diff --git a/arch/ppc/oprofile/op_model_fsl_booke.c b/arch/powerpc/oprofile/op_model_fsl_booke.c
similarity index 97%
rename from arch/ppc/oprofile/op_model_fsl_booke.c
rename to arch/powerpc/oprofile/op_model_fsl_booke.c
index fc9c859..1917f8d 100644
--- a/arch/ppc/oprofile/op_model_fsl_booke.c
+++ b/arch/powerpc/oprofile/op_model_fsl_booke.c
@@ -25,8 +25,7 @@
 #include <asm/reg_booke.h>
 #include <asm/page.h>
 #include <asm/perfmon.h>
-
-#include "op_impl.h"
+#include <asm/oprofile_impl.h>
 
 static unsigned long reset_value[OP_MAX_COUNTER];
 
@@ -176,7 +175,7 @@
 	pmc_start_ctrs(1);
 }
 
-struct op_ppc32_model op_model_fsl_booke = {
+struct op_powerpc_model op_model_fsl_booke = {
 	.reg_setup		= fsl_booke_reg_setup,
 	.start			= fsl_booke_start,
 	.stop			= fsl_booke_stop,
diff --git a/arch/ppc64/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
similarity index 99%
rename from arch/ppc64/oprofile/op_model_power4.c
rename to arch/powerpc/oprofile/op_model_power4.c
index 32b2bb5..8864493 100644
--- a/arch/ppc64/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -300,7 +300,7 @@
 	mtspr(SPRN_MMCR0, mmcr0);
 }
 
-struct op_ppc64_model op_model_power4 = {
+struct op_powerpc_model op_model_power4 = {
 	.reg_setup		= power4_reg_setup,
 	.cpu_setup		= power4_cpu_setup,
 	.start			= power4_start,
diff --git a/arch/ppc64/oprofile/op_model_rs64.c b/arch/powerpc/oprofile/op_model_rs64.c
similarity index 98%
rename from arch/ppc64/oprofile/op_model_rs64.c
rename to arch/powerpc/oprofile/op_model_rs64.c
index 08c5b33..e010b85 100644
--- a/arch/ppc64/oprofile/op_model_rs64.c
+++ b/arch/powerpc/oprofile/op_model_rs64.c
@@ -209,7 +209,7 @@
 	mtspr(SPRN_MMCR0, mmcr0);
 }
 
-struct op_ppc64_model op_model_rs64 = {
+struct op_powerpc_model op_model_rs64 = {
 	.reg_setup		= rs64_reg_setup,
 	.cpu_setup		= rs64_cpu_setup,
 	.start			= rs64_start,
diff --git a/arch/powerpc/platforms/4xx/Kconfig b/arch/powerpc/platforms/4xx/Kconfig
new file mode 100644
index 0000000..ed39d6a
--- /dev/null
+++ b/arch/powerpc/platforms/4xx/Kconfig
@@ -0,0 +1,280 @@
+config 4xx
+	bool
+	depends on 40x || 44x
+	default y
+
+config WANT_EARLY_SERIAL
+	bool
+	select SERIAL_8250
+	default n
+
+menu "AMCC 4xx options"
+	depends on 4xx
+
+choice
+	prompt "Machine Type"
+	depends on 40x
+	default WALNUT
+
+config BUBINGA
+	bool "Bubinga"
+	select WANT_EARLY_SERIAL
+	help
+	  This option enables support for the IBM 405EP evaluation board.
+
+config CPCI405
+	bool "CPCI405"
+	help
+	  This option enables support for the CPCI405 board.
+
+config EP405
+	bool "EP405/EP405PC"
+	help
+	  This option enables support for the EP405/EP405PC boards.
+
+config REDWOOD_5
+	bool "Redwood-5"
+	help
+	  This option enables support for the IBM STB04 evaluation board.
+
+config REDWOOD_6
+	bool "Redwood-6"
+	help
+	  This option enables support for the IBM STBx25xx evaluation board.
+
+config SYCAMORE
+	bool "Sycamore"
+	help
+	  This option enables support for the IBM PPC405GPr evaluation board.
+
+config WALNUT
+	bool "Walnut"
+	help
+	  This option enables support for the IBM PPC405GP evaluation board.
+
+config XILINX_ML300
+	bool "Xilinx-ML300"
+	help
+	  This option enables support for the Xilinx ML300 evaluation board.
+
+endchoice
+
+choice
+	prompt "Machine Type"
+	depends on 44x
+	default EBONY
+
+config BAMBOO
+	bool "Bamboo"
+	select WANT_EARLY_SERIAL
+	help
+	  This option enables support for the IBM PPC440EP evaluation board.
+
+config EBONY
+	bool "Ebony"
+	select WANT_EARLY_SERIAL
+	help
+	  This option enables support for the IBM PPC440GP evaluation board.
+
+config LUAN
+	bool "Luan"
+	select WANT_EARLY_SERIAL
+	help
+	  This option enables support for the IBM PPC440SP evaluation board.
+
+config OCOTEA
+	bool "Ocotea"
+	select WANT_EARLY_SERIAL
+	help
+	  This option enables support for the IBM PPC440GX evaluation board.
+
+endchoice
+
+config EP405PC
+	bool "EP405PC Support"
+	depends on EP405
+
+
+# It's often necessary to know the specific 4xx processor type.
+# Fortunately, it is impled (so far) from the board type, so we
+# don't need to ask more redundant questions.
+config NP405H
+	bool
+	depends on ASH
+	default y
+
+config 440EP
+	bool
+	depends on BAMBOO
+	select PPC_FPU
+	default y
+
+config 440GP
+	bool
+	depends on EBONY
+	default y
+
+config 440GX
+	bool
+	depends on OCOTEA
+	default y
+
+config 440SP
+	bool
+	depends on LUAN
+	default y
+
+config 440
+	bool
+	depends on 440GP || 440SP || 440EP
+	default y
+
+config 440A
+	bool
+	depends on 440GX
+	default y
+
+config IBM440EP_ERR42
+	bool
+	depends on 440EP
+	default y
+
+# All 405-based cores up until the 405GPR and 405EP have this errata.
+config IBM405_ERR77
+	bool
+	depends on 40x && !403GCX && !405GPR && !405EP
+	default y
+
+# All 40x-based cores, up until the 405GPR and 405EP have this errata.
+config IBM405_ERR51
+	bool
+	depends on 40x && !405GPR && !405EP
+	default y
+
+config BOOKE
+	bool
+	depends on 44x
+	default y
+
+config IBM_OCP
+	bool
+	depends on ASH || BAMBOO || BUBINGA || CPCI405 || EBONY || EP405 || LUAN || OCOTEA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT
+	default y
+
+config XILINX_OCP
+	bool
+	depends on XILINX_ML300
+	default y
+
+config IBM_EMAC4
+	bool
+	depends on 440GX || 440SP
+	default y
+
+config BIOS_FIXUP
+	bool
+	depends on BUBINGA || EP405 || SYCAMORE || WALNUT
+	default y
+
+# OAK doesn't exist but wanted to keep this around for any future 403GCX boards
+config 403GCX
+	bool
+	depends OAK
+	default y
+
+config 405EP
+	bool
+	depends on BUBINGA
+	default y
+
+config 405GP
+	bool
+	depends on CPCI405 || EP405 || WALNUT
+	default y
+
+config 405GPR
+	bool
+	depends on SYCAMORE
+	default y
+
+config VIRTEX_II_PRO
+	bool
+	depends on XILINX_ML300
+	default y
+
+config STB03xxx
+	bool
+	depends on REDWOOD_5 || REDWOOD_6
+	default y
+
+config EMBEDDEDBOOT
+	bool
+	depends on EP405 || XILINX_ML300
+	default y
+
+config IBM_OPENBIOS
+	bool
+	depends on ASH || BUBINGA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT
+	default y
+
+config PPC4xx_DMA
+	bool "PPC4xx DMA controller support"
+	depends on 4xx
+
+config PPC4xx_EDMA
+	bool
+	depends on !STB03xxx && PPC4xx_DMA
+	default y
+
+config PPC_GEN550
+	bool
+	depends on 4xx
+	default y
+
+choice
+	prompt "TTYS0 device and default console"
+	depends on 40x
+	default UART0_TTYS0
+
+config UART0_TTYS0
+	bool "UART0"
+
+config UART0_TTYS1
+	bool "UART1"
+
+endchoice
+
+config SERIAL_SICC
+	bool "SICC Serial port support"
+	depends on STB03xxx
+
+config UART1_DFLT_CONSOLE
+	bool
+	depends on SERIAL_SICC && UART0_TTYS1
+	default y
+
+config SERIAL_SICC_CONSOLE
+	bool
+	depends on SERIAL_SICC && UART0_TTYS1
+	default y
+endmenu
+
+
+menu "IBM 40x options"
+	depends on 40x
+
+config SERIAL_SICC
+	bool "SICC Serial port"
+	depends on STB03xxx
+
+config UART1_DFLT_CONSOLE
+	bool
+	depends on SERIAL_SICC && UART0_TTYS1
+	default y
+
+config SERIAL_SICC_CONSOLE
+	bool
+	depends on SERIAL_SICC && UART0_TTYS1
+	default y
+
+endmenu
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
new file mode 100644
index 0000000..c5bc282
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -0,0 +1,86 @@
+config 85xx
+	bool
+	depends on E500
+	default y
+
+config PPC_INDIRECT_PCI_BE
+	bool
+	depends on 85xx
+	default y
+
+menu "Freescale 85xx options"
+	depends on E500
+
+choice
+	prompt "Machine Type"
+	depends on 85xx
+	default MPC8540_ADS
+
+config MPC8540_ADS
+	bool "Freescale MPC8540 ADS"
+	help
+	  This option enables support for the MPC 8540 ADS evaluation board.
+
+config MPC8548_CDS
+	bool "Freescale MPC8548 CDS"
+	help
+	  This option enablese support for the MPC8548 CDS evaluation board.
+
+config MPC8555_CDS
+	bool "Freescale MPC8555 CDS"
+	help
+	  This option enablese support for the MPC8555 CDS evaluation board.
+
+config MPC8560_ADS
+	bool "Freescale MPC8560 ADS"
+	help
+	  This option enables support for the MPC 8560 ADS evaluation board.
+
+config SBC8560
+	bool "WindRiver PowerQUICC III SBC8560"
+	help
+	  This option enables support for the WindRiver PowerQUICC III 
+	  SBC8560 board.
+
+config STX_GP3
+	bool "Silicon Turnkey Express GP3"
+	help
+	  This option enables support for the Silicon Turnkey Express GP3
+	  board.
+
+endchoice
+
+# It's often necessary to know the specific 85xx processor type.
+# Fortunately, it is implied (so far) from the board type, so we
+# don't need to ask more redundant questions.
+config MPC8540
+	bool
+	depends on MPC8540_ADS
+	default y
+
+config MPC8548
+	bool
+	depends on MPC8548_CDS
+	default y
+
+config MPC8555
+	bool
+	depends on MPC8555_CDS
+	default y
+
+config MPC8560
+	bool
+	depends on SBC8560 || MPC8560_ADS || STX_GP3
+	default y
+
+config 85xx_PCI2
+	bool "Supprt for 2nd PCI host controller"
+	depends on MPC8555_CDS
+	default y
+
+config PPC_GEN550
+	bool
+	depends on MPC8540 || SBC8560 || MPC8555
+	default y
+
+endmenu
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
new file mode 100644
index 0000000..c8c0ba3
--- /dev/null
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -0,0 +1,352 @@
+config FADS
+	bool
+
+choice
+	prompt "8xx Machine Type"
+	depends on 8xx
+	default RPXLITE
+
+config RPXLITE
+	bool "RPX-Lite"
+	---help---
+	  Single-board computers based around the PowerPC MPC8xx chips and
+	  intended for embedded applications.  The following types are
+	  supported:
+
+	  RPX-Lite:
+	  Embedded Planet RPX Lite. PC104 form-factor SBC based on the MPC823.
+
+	  RPX-Classic:
+	  Embedded Planet RPX Classic Low-fat. Credit-card-size SBC based on
+	  the MPC 860
+
+	  BSE-IP:
+	  Bright Star Engineering ip-Engine.
+
+	  TQM823L:
+	  TQM850L:
+	  TQM855L:
+	  TQM860L:
+	  MPC8xx based family of mini modules, half credit card size,
+	  up to 64 MB of RAM, 8 MB Flash, (Fast) Ethernet, 2 x serial ports,
+	  2 x CAN bus interface, ...
+	  Manufacturer: TQ Components, www.tq-group.de
+	  Date of Release: October (?) 1999
+	  End of Life: not yet :-)
+	  URL:
+	  - module: <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>
+	  - starter kit: <http://www.denx.de/PDF/STK8xxLHWM201.pdf>
+	  - images: <http://www.denx.de/embedded-ppc-en.html>
+
+	  FPS850L:
+	  FingerPrint Sensor System (based on TQM850L)
+	  Manufacturer: IKENDI AG, <http://www.ikendi.com/>
+	  Date of Release: November 1999
+	  End of life: end 2000 ?
+	  URL: see TQM850L
+
+	  IVMS8:
+	  MPC860 based board used in the "Integrated Voice Mail System",
+	  Small Version (8 voice channels)
+	  Manufacturer: Speech Design, <http://www.speech-design.de/>
+	  Date of Release: December 2000 (?)
+	  End of life: -
+	  URL: <http://www.speech-design.de/>
+
+	  IVML24:
+	  MPC860 based board used in the "Integrated Voice Mail System",
+	  Large Version (24 voice channels)
+	  Manufacturer: Speech Design, <http://www.speech-design.de/>
+	  Date of Release: March 2001  (?)
+	  End of life: -
+	  URL: <http://www.speech-design.de/>
+
+	  HERMES:
+	  Hermes-Pro ISDN/LAN router with integrated 8 x hub
+	  Manufacturer: Multidata Gesellschaft fur Datentechnik und Informatik
+	  <http://www.multidata.de/>
+	  Date of Release: 2000 (?)
+	  End of life: -
+	  URL: <http://www.multidata.de/english/products/hpro.htm>
+
+	  IP860:
+	  VMEBus IP (Industry Pack) carrier board with MPC860
+	  Manufacturer: MicroSys GmbH, <http://www.microsys.de/>
+	  Date of Release: ?
+	  End of life: -
+	  URL: <http://www.microsys.de/html/ip860.html>
+
+	  PCU_E:
+	  PCU = Peripheral Controller Unit, Extended
+	  Manufacturer: Siemens AG, ICN (Information and Communication Networks)
+	  	<http://www.siemens.de/page/1,3771,224315-1-999_2_226207-0,00.html>
+	  Date of Release: April 2001
+	  End of life: August 2001
+	  URL: n. a.
+
+config RPXCLASSIC
+	bool "RPX-Classic"
+	help
+	  The RPX-Classic is a single-board computer based on the Motorola
+	  MPC860.  It features 16MB of DRAM and a variable amount of flash,
+	  I2C EEPROM, thermal monitoring, a PCMCIA slot, a DIP switch and two
+	  LEDs.  Variants with Ethernet ports exist.  Say Y here to support it
+	  directly.
+
+config BSEIP
+	bool "BSE-IP"
+	help
+	  Say Y here to support the Bright Star Engineering ipEngine SBC.
+	  This is a credit-card-sized device featuring a MPC823 processor,
+	  26MB DRAM, 4MB flash, Ethernet, a 16K-gate FPGA, USB, an LCD/video
+	  controller, and two RS232 ports.
+
+config MPC8XXFADS
+	bool "FADS"
+	select FADS
+
+config MPC86XADS
+	bool "MPC86XADS"
+	help
+	  MPC86x Application Development System by Freescale Semiconductor.
+	  The MPC86xADS is meant to serve as a platform for s/w and h/w
+	  development around the MPC86X processor families.
+	select FADS
+
+config MPC885ADS
+	bool "MPC885ADS"
+	help
+	  Freescale Semiconductor MPC885 Application Development System (ADS).
+	  Also known as DUET.
+	  The MPC885ADS is meant to serve as a platform for s/w and h/w
+	  development around the MPC885 processor family.
+
+config TQM823L
+	bool "TQM823L"
+	help
+	  Say Y here to support the TQM823L, one of an MPC8xx-based family of
+	  mini SBCs (half credit-card size) from TQ Components first released
+	  in late 1999.  Technical references are at
+	  <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
+	  <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
+	  <http://www.denx.de/embedded-ppc-en.html>.
+
+config TQM850L
+	bool "TQM850L"
+	help
+	  Say Y here to support the TQM850L, one of an MPC8xx-based family of
+	  mini SBCs (half credit-card size) from TQ Components first released
+	  in late 1999.  Technical references are at
+	  <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
+	  <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
+	  <http://www.denx.de/embedded-ppc-en.html>.
+
+config TQM855L
+	bool "TQM855L"
+	help
+	  Say Y here to support the TQM855L, one of an MPC8xx-based family of
+	  mini SBCs (half credit-card size) from TQ Components first released
+	  in late 1999.  Technical references are at
+	  <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
+	  <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
+	  <http://www.denx.de/embedded-ppc-en.html>.
+
+config TQM860L
+	bool "TQM860L"
+	help
+	  Say Y here to support the TQM860L, one of an MPC8xx-based family of
+	  mini SBCs (half credit-card size) from TQ Components first released
+	  in late 1999.  Technical references are at
+	  <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
+	  <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
+	  <http://www.denx.de/embedded-ppc-en.html>.
+
+config FPS850L
+	bool "FPS850L"
+
+config IVMS8
+	bool "IVMS8"
+	help
+	  Say Y here to support the Integrated Voice-Mail Small 8-channel SBC
+	  from Speech Design, released March 2001.  The manufacturer's website
+	  is at <http://www.speech-design.de/>.
+
+config IVML24
+	bool "IVML24"
+	help
+	  Say Y here to support the Integrated Voice-Mail Large 24-channel SBC
+	  from Speech Design, released March 2001.  The manufacturer's website
+	  is at <http://www.speech-design.de/>.
+
+config HERMES_PRO
+	bool "HERMES"
+
+config IP860
+	bool "IP860"
+
+config LWMON
+	bool "LWMON"
+
+config PCU_E
+	bool "PCU_E"
+
+config CCM
+	bool "CCM"
+
+config LANTEC
+	bool "LANTEC"
+
+config MBX
+	bool "MBX"
+	help
+	  MBX is a line of Motorola single-board computer based around the
+	  MPC821 and MPC860 processors, and intended for embedded-controller
+	  applications.  Say Y here to support these boards directly.
+
+config WINCEPT
+	bool "WinCept"
+	help
+	  The Wincept 100/110 is a Motorola single-board computer based on the
+	  MPC821 PowerPC, introduced in 1998 and designed to be used in
+	  thin-client machines.  Say Y to support it directly.
+
+endchoice
+
+#
+# MPC8xx Communication options
+#
+
+menu "MPC8xx CPM Options"
+	depends on 8xx
+
+config SCC_ENET
+	bool "CPM SCC Ethernet"
+	depends on NET_ETHERNET
+	help
+	  Enable Ethernet support via the Motorola MPC8xx serial
+	  communications controller.
+
+choice
+	prompt "SCC used for Ethernet"
+	depends on SCC_ENET
+	default SCC1_ENET
+
+config SCC1_ENET
+	bool "SCC1"
+	help
+	  Use MPC8xx serial communications controller 1 to drive Ethernet
+	  (default).
+
+config SCC2_ENET
+	bool "SCC2"
+	help
+	  Use MPC8xx serial communications controller 2 to drive Ethernet.
+
+config SCC3_ENET
+	bool "SCC3"
+	help
+	  Use MPC8xx serial communications controller 3 to drive Ethernet.
+
+endchoice
+
+config FEC_ENET
+	bool "860T FEC Ethernet"
+	depends on NET_ETHERNET
+	help
+	  Enable Ethernet support via the Fast Ethernet Controller (FCC) on
+	  the Motorola MPC8260.
+
+config USE_MDIO
+	bool "Use MDIO for PHY configuration"
+	depends on FEC_ENET
+	help
+	  On some boards the hardware configuration of the ethernet PHY can be
+	  used without any software interaction over the MDIO interface, so
+	  all MII code can be omitted. Say N here if unsure or if you don't
+	  need link status reports.
+
+config  FEC_AM79C874
+	bool "Support AMD79C874 PHY"
+	depends on USE_MDIO
+
+config FEC_LXT970
+	bool "Support LXT970 PHY"
+	depends on USE_MDIO
+
+config FEC_LXT971
+	bool "Support LXT971 PHY"
+	depends on USE_MDIO
+	
+config FEC_QS6612
+	bool "Support QS6612 PHY"
+	depends on USE_MDIO
+	
+config ENET_BIG_BUFFERS
+	bool "Use Big CPM Ethernet Buffers"
+	depends on SCC_ENET || FEC_ENET
+	help
+	  Allocate large buffers for MPC8xx Ethernet. Increases throughput
+	  and decreases the likelihood of dropped packets, but costs memory.
+
+config HTDMSOUND
+	bool "Embedded Planet HIOX Audio"
+	depends on SOUND=y
+
+# This doesn't really belong here, but it is convenient to ask
+# 8xx specific questions.
+comment "Generic MPC8xx Options"
+
+config 8xx_COPYBACK
+	bool "Copy-Back Data Cache (else Writethrough)"
+	help
+	  Saying Y here will cause the cache on an MPC8xx processor to be used
+	  in Copy-Back mode.  If you say N here, it is used in Writethrough
+	  mode.
+
+	  If in doubt, say Y here.
+
+config 8xx_CPU6
+	bool "CPU6 Silicon Errata (860 Pre Rev. C)"
+	help
+	  MPC860 CPUs, prior to Rev C have some bugs in the silicon, which
+	  require workarounds for Linux (and most other OSes to work).  If you
+	  get a BUG() very early in boot, this might fix the problem.  For
+	  more details read the document entitled "MPC860 Family Device Errata
+	  Reference" on Motorola's website.  This option also incurs a
+	  performance hit.
+
+	  If in doubt, say N here.
+
+choice
+	prompt "Microcode patch selection"
+	default NO_UCODE_PATCH
+	help
+	  Help not implemented yet, coming soon.
+
+config NO_UCODE_PATCH
+	bool "None"
+
+config USB_SOF_UCODE_PATCH
+	bool "USB SOF patch"
+	help
+	  Help not implemented yet, coming soon.
+
+config I2C_SPI_UCODE_PATCH
+	bool "I2C/SPI relocation patch"
+	help
+	  Help not implemented yet, coming soon.
+
+config I2C_SPI_SMC1_UCODE_PATCH
+	bool "I2C/SPI/SMC1 relocation patch"
+	help
+	  Help not implemented yet, coming soon.
+
+endchoice
+
+config UCODE_PATCH
+	bool
+	default y
+	depends on !NO_UCODE_PATCH
+
+endmenu
+
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
new file mode 100644
index 0000000..7637ff3
--- /dev/null
+++ b/arch/powerpc/platforms/Makefile
@@ -0,0 +1,7 @@
+ifeq ($(CONFIG_PPC32),y)
+obj-$(CONFIG_PPC_PMAC)		+= powermac/
+endif
+obj-$(CONFIG_4xx)		+= 4xx/
+obj-$(CONFIG_83xx)		+= 83xx/
+obj-$(CONFIG_85xx)		+= 85xx/
+obj-$(CONFIG_PPC_ISERIES)	+= iseries/
diff --git a/arch/powerpc/platforms/apus/Kconfig b/arch/powerpc/platforms/apus/Kconfig
new file mode 100644
index 0000000..6bde3bf
--- /dev/null
+++ b/arch/powerpc/platforms/apus/Kconfig
@@ -0,0 +1,130 @@
+
+config AMIGA
+	bool
+	depends on APUS
+	default y
+	help
+	  This option enables support for the Amiga series of computers.
+
+config ZORRO
+	bool
+	depends on APUS
+	default y
+	help
+	  This enables support for the Zorro bus in the Amiga. If you have
+	  expansion cards in your Amiga that conform to the Amiga
+	  AutoConfig(tm) specification, say Y, otherwise N. Note that even
+	  expansion cards that do not fit in the Zorro slots but fit in e.g.
+	  the CPU slot may fall in this category, so you have to say Y to let
+	  Linux use these.
+
+config ABSTRACT_CONSOLE
+	bool
+	depends on APUS
+	default y
+
+config APUS_FAST_EXCEPT
+	bool
+	depends on APUS
+	default y
+
+config AMIGA_PCMCIA
+	bool "Amiga 1200/600 PCMCIA support"
+	depends on APUS && EXPERIMENTAL
+	help
+	  Include support in the kernel for pcmcia on Amiga 1200 and Amiga
+	  600. If you intend to use pcmcia cards say Y; otherwise say N.
+
+config AMIGA_BUILTIN_SERIAL
+	tristate "Amiga builtin serial support"
+	depends on APUS
+	help
+	  If you want to use your Amiga's built-in serial port in Linux,
+	  answer Y.
+
+	  To compile this driver as a module, choose M here.
+
+config GVPIOEXT
+	tristate "GVP IO-Extender support"
+	depends on APUS
+	help
+	  If you want to use a GVP IO-Extender serial card in Linux, say Y.
+	  Otherwise, say N.
+
+config GVPIOEXT_LP
+	tristate "GVP IO-Extender parallel printer support"
+	depends on GVPIOEXT
+	help
+	  Say Y to enable driving a printer from the parallel port on your
+	  GVP IO-Extender card, N otherwise.
+
+config GVPIOEXT_PLIP
+	tristate "GVP IO-Extender PLIP support"
+	depends on GVPIOEXT
+	help
+	  Say Y to enable doing IP over the parallel port on your GVP
+	  IO-Extender card, N otherwise.
+
+config MULTIFACE_III_TTY
+	tristate "Multiface Card III serial support"
+	depends on APUS
+	help
+	  If you want to use a Multiface III card's serial port in Linux,
+	  answer Y.
+
+	  To compile this driver as a module, choose M here.
+
+config A2232
+	tristate "Commodore A2232 serial support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && APUS
+	---help---
+	  This option supports the 2232 7-port serial card shipped with the
+	  Amiga 2000 and other Zorro-bus machines, dating from 1989.  At
+	  a max of 19,200 bps, the ports are served by a 6551 ACIA UART chip
+	  each, plus a 8520 CIA, and a master 6502 CPU and buffer as well. The
+	  ports were connected with 8 pin DIN connectors on the card bracket,
+	  for which 8 pin to DB25 adapters were supplied. The card also had
+	  jumpers internally to toggle various pinning configurations.
+
+	  This driver can be built as a module; but then "generic_serial"
+	  will also be built as a module. This has to be loaded before
+	  "ser_a2232". If you want to do this, answer M here.
+
+config WHIPPET_SERIAL
+	tristate "Hisoft Whippet PCMCIA serial support"
+	depends on AMIGA_PCMCIA
+	help
+	  HiSoft has a web page at <http://www.hisoft.co.uk/>, but there
+	  is no listing for the Whippet in their Amiga section.
+
+config APNE
+	tristate "PCMCIA NE2000 support"
+	depends on AMIGA_PCMCIA
+	help
+	  If you have a PCMCIA NE2000 compatible adapter, say Y.  Otherwise,
+	  say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called apne.
+
+config SERIAL_CONSOLE
+	bool "Support for serial port console"
+	depends on APUS && (AMIGA_BUILTIN_SERIAL=y || GVPIOEXT=y || MULTIFACE_III_TTY=y)
+
+config HEARTBEAT
+	bool "Use power LED as a heartbeat"
+	depends on APUS
+	help
+	  Use the power-on LED on your machine as a load meter.  The exact
+	  behavior is platform-dependent, but normally the flash frequency is
+	  a hyperbolic function of the 5-minute load average.
+
+config PROC_HARDWARE
+	bool "/proc/hardware support"
+	depends on APUS
+
+source "drivers/zorro/Kconfig"
+
+config PCI_PERMEDIA
+	bool "PCI for Permedia2"
+	depends on !4xx && !8xx && APUS
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
new file mode 100644
index 0000000..2d755b7
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -0,0 +1,305 @@
+choice
+	prompt "Machine Type"
+	depends on EMBEDDED6xx
+
+config KATANA
+	bool "Artesyn-Katana"
+	help
+	  Select KATANA if configuring an Artesyn KATANA 750i or 3750
+	  cPCI board.
+
+config WILLOW
+	bool "Cogent-Willow"
+
+config CPCI690
+	bool "Force-CPCI690"
+	help
+	  Select CPCI690 if configuring a Force CPCI690 cPCI board.
+
+config POWERPMC250
+	bool "Force-PowerPMC250"
+
+config CHESTNUT
+	bool "IBM 750FX Eval board or 750GX Eval board"
+	help
+	  Select CHESTNUT if configuring an IBM 750FX Eval Board or a
+	  IBM 750GX Eval board.
+
+config SPRUCE
+	bool "IBM-Spruce"
+
+config HDPU
+	bool "Sky-HDPU"
+	help
+	  Select HDPU if configuring a Sky Computers Compute Blade.
+
+config HDPU_FEATURES
+	depends HDPU
+	tristate "HDPU-Features"
+	help
+	  Select to enable HDPU enhanced features.
+
+config EV64260
+	bool "Marvell-EV64260BP"
+	help
+	  Select EV64260 if configuring a Marvell (formerly Galileo)
+	  EV64260BP Evaluation platform.
+
+config LOPEC
+	bool "Motorola-LoPEC"
+
+config MVME5100
+	bool "Motorola-MVME5100"
+
+config PPLUS
+	bool "Motorola-PowerPlus"
+
+config PRPMC750
+	bool "Motorola-PrPMC750"
+
+config PRPMC800
+	bool "Motorola-PrPMC800"
+
+config SANDPOINT
+	bool "Motorola-Sandpoint"
+	help
+	  Select SANDPOINT if configuring for a Motorola Sandpoint X3
+	  (any flavor).
+
+config RADSTONE_PPC7D
+	bool "Radstone Technology PPC7D board"
+
+config PAL4
+	bool "SBS-Palomar4"
+
+config GEMINI
+	bool "Synergy-Gemini"
+	depends on BROKEN
+	help
+	  Select Gemini if configuring for a Synergy Microsystems' Gemini
+	  series Single Board Computer.  More information is available at:
+	  <http://www.synergymicro.com/PressRel/97_10_15.html>.
+
+config EST8260
+	bool "EST8260"
+	---help---
+	  The EST8260 is a single-board computer manufactured by Wind River
+	  Systems, Inc. (formerly Embedded Support Tools Corp.) and based on
+	  the MPC8260.  Wind River Systems has a website at
+	  <http://www.windriver.com/>, but the EST8260 cannot be found on it
+	  and has probably been discontinued or rebadged.
+
+config SBC82xx
+	bool "SBC82xx"
+	---help---
+	  SBC PowerQUICC II, single-board computer with MPC82xx CPU
+	  Manufacturer: Wind River Systems, Inc.
+	  Date of Release: May 2003
+	  End of Life: -
+	  URL: <http://www.windriver.com/>
+
+config SBS8260
+	bool "SBS8260"
+
+config RPX8260
+	bool "RPXSUPER"
+
+config TQM8260
+	bool "TQM8260"
+	---help---
+	  MPC8260 based module, little larger than credit card,
+	  up to 128 MB global + 64 MB local RAM, 32 MB Flash,
+	  32 kB EEPROM, 256 kB L@ Cache, 10baseT + 100baseT Ethernet,
+	  2 x serial ports, ...
+	  Manufacturer: TQ Components, www.tq-group.de
+	  Date of Release: June 2001
+	  End of Life: not yet :-)
+	  URL: <http://www.denx.de/PDF/TQM82xx_SPEC_Rev005.pdf>
+
+config ADS8272
+	bool "ADS8272"
+
+config PQ2FADS
+	bool "Freescale-PQ2FADS"
+	help
+	  Select PQ2FADS if you wish to configure for a Freescale
+	  PQ2FADS board (-VR or -ZU).
+
+config LITE5200
+	bool "Freescale LITE5200 / (IceCube)"
+	select PPC_MPC52xx
+	help
+	  Support for the LITE5200 dev board for the MPC5200 from Freescale.
+	  This is for the LITE5200 version 2.0 board. Don't know if it changes
+	  much but it's only been tested on this board version. I think this
+	  board is also known as IceCube.
+
+config MPC834x_SYS
+	bool "Freescale MPC834x SYS"
+	help
+	  This option enables support for the MPC 834x SYS evaluation board.
+
+	  Be aware that PCI buses can only function when SYS board is plugged
+	  into the PIB (Platform IO Board) board from Freescale which provide
+	  3 PCI slots.  The PIBs PCI initialization is the bootloader's
+	  responsiblilty.
+
+config EV64360
+	bool "Marvell-EV64360BP"
+	help
+	  Select EV64360 if configuring a Marvell EV64360BP Evaluation
+	  platform.
+endchoice
+
+config PQ2ADS
+	bool
+	depends on ADS8272
+	default y
+
+config TQM8xxL
+	bool
+	depends on 8xx && (TQM823L || TQM850L || FPS850L || TQM855L || TQM860L)
+	default y
+
+config PPC_MPC52xx
+	bool
+
+config 8260
+	bool "CPM2 Support" if WILLOW
+	depends on 6xx
+	default y if TQM8260 || RPX8260 || EST8260 || SBS8260 || SBC82xx || PQ2FADS
+	help
+	  The MPC8260 is a typical embedded CPU made by Motorola.  Selecting
+	  this option means that you wish to build a kernel for a machine with
+	  an 8260 class CPU.
+
+config 8272
+	bool
+	depends on 6xx
+	default y if ADS8272
+	select 8260
+	help
+	  The MPC8272 CPM has a different internal dpram setup than other CPM2
+	  devices
+
+config 83xx
+	bool
+	default y if MPC834x_SYS
+
+config MPC834x
+	bool
+	default y if MPC834x_SYS
+
+config CPM2
+	bool
+	depends on 8260 || MPC8560 || MPC8555
+	default y
+	help
+	  The CPM2 (Communications Processor Module) is a coprocessor on
+	  embedded CPUs made by Motorola.  Selecting this option means that
+	  you wish to build a kernel for a machine with a CPM2 coprocessor
+	  on it (826x, 827x, 8560).
+
+config PPC_GEN550
+	bool
+	depends on SANDPOINT || SPRUCE || PPLUS || \
+		PRPMC750 || PRPMC800 || LOPEC || \
+		(EV64260 && !SERIAL_MPSC) || CHESTNUT || RADSTONE_PPC7D || \
+		83xx
+	default y
+
+config FORCE
+	bool
+	depends on 6xx && POWERPMC250
+	default y
+
+config GT64260
+	bool
+	depends on EV64260 || CPCI690
+	default y
+
+config MV64360		# Really MV64360 & MV64460
+	bool
+	depends on CHESTNUT || KATANA || RADSTONE_PPC7D || HDPU || EV64360
+	default y
+
+config MV64X60
+	bool
+	depends on (GT64260 || MV64360)
+	default y
+
+menu "Set bridge options"
+	depends on MV64X60
+
+config NOT_COHERENT_CACHE
+	bool "Turn off Cache Coherency"
+	default n
+	help
+	  Some 64x60 bridges lock up when trying to enforce cache coherency.
+	  When this option is selected, cache coherency will be turned off.
+	  Note that this can cause other problems (e.g., stale data being
+	  speculatively loaded via a cached mapping).  Use at your own risk.
+
+config MV64X60_BASE
+	hex "Set bridge base used by firmware"
+	default "0xf1000000"
+	help
+	  A firmware can leave the base address of the bridge's registers at
+	  a non-standard location.  If so, set this value to reflect the
+	  address of that non-standard location.
+
+config MV64X60_NEW_BASE
+	hex "Set bridge base used by kernel"
+	default "0xf1000000"
+	help
+	  If the current base address of the bridge's registers is not where
+	  you want it, set this value to the address that you want it moved to.
+
+endmenu
+
+config NONMONARCH_SUPPORT
+	bool "Enable Non-Monarch Support"
+	depends on PRPMC800
+
+config HARRIER
+	bool
+	depends on PRPMC800
+	default y
+
+config EPIC_SERIAL_MODE
+	bool
+	depends on 6xx && (LOPEC || SANDPOINT)
+	default y
+
+config MPC10X_BRIDGE
+	bool
+	depends on POWERPMC250 || LOPEC || SANDPOINT
+	default y
+
+config MPC10X_OPENPIC
+	bool
+	depends on POWERPMC250 || LOPEC || SANDPOINT
+	default y
+
+config MPC10X_STORE_GATHERING
+	bool "Enable MPC10x store gathering"
+	depends on MPC10X_BRIDGE
+
+config SANDPOINT_ENABLE_UART1
+	bool "Enable DUART mode on Sandpoint"
+	depends on SANDPOINT
+	help
+	  If this option is enabled then the MPC824x processor will run
+	  in DUART mode instead of UART mode.
+
+config HARRIER_STORE_GATHERING
+	bool "Enable Harrier store gathering"
+	depends on HARRIER
+
+config MVME5100_IPMC761_PRESENT
+	bool "MVME5100 configured with an IPMC761"
+	depends on MVME5100
+
+config SPRUCE_BAUD_33M
+	bool "Spruce baud clock support"
+	depends on SPRUCE
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
new file mode 100644
index 0000000..3d957a3
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/Kconfig
@@ -0,0 +1,31 @@
+
+menu "iSeries device drivers"
+	depends on PPC_ISERIES
+
+config VIOCONS
+	tristate "iSeries Virtual Console Support"
+
+config VIODASD
+	tristate "iSeries Virtual I/O disk support"
+	help
+	  If you are running on an iSeries system and you want to use
+ 	  virtual disks created and managed by OS/400, say Y.
+
+config VIOCD
+	tristate "iSeries Virtual I/O CD support"
+	help
+	  If you are running Linux on an IBM iSeries system and you want to
+	  read a CD drive owned by OS/400, say Y here.
+
+config VIOTAPE
+	tristate "iSeries Virtual Tape Support"
+	help
+	  If you are running Linux on an iSeries system and you want Linux
+	  to read and/or write a tape drive owned by OS/400, say Y here.
+
+endmenu
+
+config VIOPATH
+	bool
+	depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
+	default y
diff --git a/arch/powerpc/platforms/iseries/Makefile b/arch/powerpc/platforms/iseries/Makefile
new file mode 100644
index 0000000..18bf400
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/Makefile
@@ -0,0 +1,7 @@
+obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o mf.o lpevents.o \
+	hvcall.o proc.o htab.o iommu.o misc.o
+obj-$(CONFIG_PCI) += pci.o irq.o vpdinfo.o
+obj-$(CONFIG_IBMVIO) += vio.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_VIOPATH) += viopath.o
+obj-$(CONFIG_MODULES) += ksyms.o
diff --git a/arch/ppc64/kernel/iSeries_htab.c b/arch/powerpc/platforms/iseries/htab.c
similarity index 86%
rename from arch/ppc64/kernel/iSeries_htab.c
rename to arch/powerpc/platforms/iseries/htab.c
index 2192055..431b227 100644
--- a/arch/ppc64/kernel/iSeries_htab.c
+++ b/arch/powerpc/platforms/iseries/htab.c
@@ -1,10 +1,10 @@
 /*
  * iSeries hashtable management.
- * 	Derived from pSeries_htab.c
+ *	Derived from pSeries_htab.c
  *
  * SMP scalability work:
  *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
- * 
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version
@@ -18,7 +18,8 @@
 #include <asm/abs_addr.h>
 #include <linux/spinlock.h>
 
-static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp = { [0 ... 63] = SPIN_LOCK_UNLOCKED};
+static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp =
+	{ [0 ... 63] = SPIN_LOCK_UNLOCKED};
 
 /*
  * Very primitive algorithm for picking up a lock
@@ -84,6 +85,25 @@
 	return (secondary << 3) | (slot & 7);
 }
 
+long iSeries_hpte_bolt_or_insert(unsigned long hpte_group,
+		unsigned long va, unsigned long prpn, unsigned long vflags,
+		unsigned long rflags)
+{
+	long slot;
+	hpte_t lhpte;
+
+	slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT);
+
+	if (lhpte.v & HPTE_V_VALID) {
+		/* Bolt the existing HPTE */
+		HvCallHpt_setSwBits(slot, 0x10, 0);
+		HvCallHpt_setPp(slot, PP_RWXX);
+		return 0;
+	}
+
+	return iSeries_hpte_insert(hpte_group, va, prpn, vflags, rflags);
+}
+
 static unsigned long iSeries_hpte_getword0(unsigned long slot)
 {
 	hpte_t hpte;
@@ -107,7 +127,7 @@
 		hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset);
 
 		if (! (hpte_v & HPTE_V_BOLTED)) {
-			HvCallHpt_invalidateSetSwBitsGet(hpte_group + 
+			HvCallHpt_invalidateSetSwBitsGet(hpte_group +
 							 slot_offset, 0, 0);
 			iSeries_hunlock(hpte_group);
 			return i;
@@ -124,9 +144,9 @@
 
 /*
  * The HyperVisor expects the "flags" argument in this form:
- * 	bits  0..59 : reserved
- * 	bit      60 : N
- * 	bits 61..63 : PP2,PP1,PP0
+ *	bits  0..59 : reserved
+ *	bit      60 : N
+ *	bits 61..63 : PP2,PP1,PP0
  */
 static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
 				  unsigned long va, int large, int local)
@@ -152,7 +172,7 @@
 }
 
 /*
- * Functions used to find the PTE for a particular virtual address. 
+ * Functions used to find the PTE for a particular virtual address.
  * Only used during boot when bolting pages.
  *
  * Input : vpn      : virtual page number
@@ -170,7 +190,7 @@
 	 * 0x00000000xxxxxxxx : Entry found in primary group, slot x
 	 * 0x80000000xxxxxxxx : Entry found in secondary group, slot x
 	 */
-	slot = HvCallHpt_findValid(&hpte, vpn); 
+	slot = HvCallHpt_findValid(&hpte, vpn);
 	if (hpte.v & HPTE_V_VALID) {
 		if (slot < 0) {
 			slot &= 0x7fffffffffffffff;
@@ -197,7 +217,7 @@
 	vsid = get_kernel_vsid(ea);
 	va = (vsid << 28) | (ea & 0x0fffffff);
 	vpn = va >> PAGE_SHIFT;
-	slot = iSeries_hpte_find(vpn); 
+	slot = iSeries_hpte_find(vpn);
 	if (slot == -1)
 		panic("updateboltedpp: Could not find page to bolt\n");
 	HvCallHpt_setPp(slot, newpp);
@@ -215,7 +235,7 @@
 	iSeries_hlock(slot);
 
 	hpte_v = iSeries_hpte_getword0(slot);
-	
+
 	if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID))
 		HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
 
@@ -230,7 +250,7 @@
 	ppc_md.hpte_updatepp	= iSeries_hpte_updatepp;
 	ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
 	ppc_md.hpte_insert	= iSeries_hpte_insert;
-	ppc_md.hpte_remove     	= iSeries_hpte_remove;
+	ppc_md.hpte_remove	= iSeries_hpte_remove;
 
 	htab_finish_init();
 }
diff --git a/arch/ppc64/kernel/hvCall.S b/arch/powerpc/platforms/iseries/hvcall.S
similarity index 96%
rename from arch/ppc64/kernel/hvCall.S
rename to arch/powerpc/platforms/iseries/hvcall.S
index 4c699ea..9901c0e 100644
--- a/arch/ppc64/kernel/hvCall.S
+++ b/arch/powerpc/platforms/iseries/hvcall.S
@@ -1,7 +1,4 @@
 /*
- * arch/ppc64/kernel/hvCall.S
- *
- *
  * This file contains the code to perform calls to the
  * iSeries LPAR hypervisor
  *
@@ -16,12 +13,12 @@
 
 	.text
 
-/* 
+/*
  * Hypervisor call
- * 
+ *
  * Invoke the iSeries hypervisor via the System Call instruction
  * Parameters are passed to this routine in registers r3 - r10
- * 
+ *
  * r3 contains the HV function to be called
  * r4-r10 contain the operands to the hypervisor function
  *
@@ -41,11 +38,11 @@
 	mfcr	r0
 	std	r0,-8(r1)
 	stdu	r1,-(STACK_FRAME_OVERHEAD+16)(r1)
-	
+
 	/* r0 = 0xffffffffffffffff indicates a hypervisor call */
-	
+
 	li	r0,-1
-	
+
 	/* Invoke the hypervisor */
 
 	sc
@@ -55,7 +52,7 @@
 	mtcrf	0xff,r0
 
 	/*  return to caller, return value in r3 */
-	
+
 	blr
 
 _GLOBAL(HvCall0Ret16)
@@ -92,7 +89,5 @@
 	ld	r0,-8(r1)
 	mtcrf	0xff,r0
 	ld	r31,-16(r1)
-	
+
 	blr
-
-
diff --git a/arch/ppc64/kernel/HvCall.c b/arch/powerpc/platforms/iseries/hvlog.c
similarity index 97%
rename from arch/ppc64/kernel/HvCall.c
rename to arch/powerpc/platforms/iseries/hvlog.c
index b772e65..f61e2e9 100644
--- a/arch/ppc64/kernel/HvCall.c
+++ b/arch/powerpc/platforms/iseries/hvlog.c
@@ -1,5 +1,4 @@
 /*
- * HvCall.c
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  * 
  * This program is free software; you can redistribute it and/or modify
diff --git a/arch/ppc64/kernel/HvLpConfig.c b/arch/powerpc/platforms/iseries/hvlpconfig.c
similarity index 97%
rename from arch/ppc64/kernel/HvLpConfig.c
rename to arch/powerpc/platforms/iseries/hvlpconfig.c
index cb1d647..dc28621 100644
--- a/arch/ppc64/kernel/HvLpConfig.c
+++ b/arch/powerpc/platforms/iseries/hvlpconfig.c
@@ -1,5 +1,4 @@
 /*
- * HvLpConfig.c
  * Copyright (C) 2001  Kyle A. Lucke, IBM Corporation
  * 
  * This program is free software; you can redistribute it and/or modify
diff --git a/arch/ppc64/kernel/iSeries_iommu.c b/arch/powerpc/platforms/iseries/iommu.c
similarity index 86%
rename from arch/ppc64/kernel/iSeries_iommu.c
rename to arch/powerpc/platforms/iseries/iommu.c
index f8ff1bb..9ac735d 100644
--- a/arch/ppc64/kernel/iSeries_iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -1,6 +1,4 @@
 /*
- * arch/ppc64/kernel/iSeries_iommu.c
- *
  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
  *
  * Rewrite, cleanup:
@@ -30,6 +28,7 @@
 #include <linux/list.h>
 
 #include <asm/iommu.h>
+#include <asm/tce.h>
 #include <asm/machdep.h>
 #include <asm/iSeries/HvCallXm.h>
 #include <asm/iSeries/iSeries_pci.h>
@@ -90,15 +89,17 @@
  */
 static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
 {
-	struct iSeries_Device_Node *dp;
+	struct device_node *dp;
 
 	list_for_each_entry(dp, &iSeries_Global_Device_List, Device_List) {
-		if ((dp->iommu_table != NULL) &&
-		    (dp->iommu_table->it_type == TCE_PCI) &&
-		    (dp->iommu_table->it_offset == tbl->it_offset) &&
-		    (dp->iommu_table->it_index == tbl->it_index) &&
-		    (dp->iommu_table->it_size == tbl->it_size))
-			return dp->iommu_table;
+		struct iommu_table *it = PCI_DN(dp)->iommu_table;
+
+		if ((it != NULL) &&
+		    (it->it_type == TCE_PCI) &&
+		    (it->it_offset == tbl->it_offset) &&
+		    (it->it_index == tbl->it_index) &&
+		    (it->it_size == tbl->it_size))
+			return it;
 	}
 	return NULL;
 }
@@ -112,7 +113,7 @@
  * 2. TCE table per Bus.
  * 3. TCE Table per IOA.
  */
-static void iommu_table_getparms(struct iSeries_Device_Node* dn,
+static void iommu_table_getparms(struct device_node *dn,
 				 struct iommu_table* tbl)
 {
 	struct iommu_table_cb *parms;
@@ -124,7 +125,7 @@
 	memset(parms, 0, sizeof(*parms));
 
 	parms->itc_busno = ISERIES_BUS(dn);
-	parms->itc_slotno = dn->LogicalSlot;
+	parms->itc_slotno = PCI_DN(dn)->LogicalSlot;
 	parms->itc_virtbus = 0;
 
 	HvCallXm_getTceTableParms(ISERIES_HV_ADDR(parms));
@@ -144,18 +145,19 @@
 }
 
 
-void iommu_devnode_init_iSeries(struct iSeries_Device_Node *dn)
+void iommu_devnode_init_iSeries(struct device_node *dn)
 {
 	struct iommu_table *tbl;
+	struct pci_dn *pdn = PCI_DN(dn);
 
 	tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
 
 	iommu_table_getparms(dn, tbl);
 
 	/* Look for existing tce table */
-	dn->iommu_table = iommu_table_find(tbl);
-	if (dn->iommu_table == NULL)
-		dn->iommu_table = iommu_init_table(tbl);
+	pdn->iommu_table = iommu_table_find(tbl);
+	if (pdn->iommu_table == NULL)
+		pdn->iommu_table = iommu_init_table(tbl);
 	else
 		kfree(tbl);
 }
diff --git a/arch/ppc64/kernel/iSeries_irq.c b/arch/powerpc/platforms/iseries/irq.c
similarity index 97%
rename from arch/ppc64/kernel/iSeries_irq.c
rename to arch/powerpc/platforms/iseries/irq.c
index 77376c1..5a8a005 100644
--- a/arch/ppc64/kernel/iSeries_irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -351,3 +351,15 @@
 	irq_desc[virtirq].handler = &iSeries_IRQ_handler;
 	return virtirq;
 }
+
+int virt_irq_create_mapping(unsigned int real_irq)
+{
+	BUG(); /* Don't call this on iSeries, yet */
+
+	return 0;
+}
+
+void virt_irq_init(void)
+{
+	return;
+}
diff --git a/arch/powerpc/platforms/iseries/ksyms.c b/arch/powerpc/platforms/iseries/ksyms.c
new file mode 100644
index 0000000..f271b35
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/ksyms.c
@@ -0,0 +1,27 @@
+/*
+ * (C) 2001-2005 PPC 64 Team, IBM Corp
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+#include <linux/module.h>
+
+#include <asm/hw_irq.h>
+#include <asm/iSeries/HvCallSc.h>
+
+EXPORT_SYMBOL(HvCall0);
+EXPORT_SYMBOL(HvCall1);
+EXPORT_SYMBOL(HvCall2);
+EXPORT_SYMBOL(HvCall3);
+EXPORT_SYMBOL(HvCall4);
+EXPORT_SYMBOL(HvCall5);
+EXPORT_SYMBOL(HvCall6);
+EXPORT_SYMBOL(HvCall7);
+
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(local_get_flags);
+EXPORT_SYMBOL(local_irq_disable);
+EXPORT_SYMBOL(local_irq_restore);
+#endif
diff --git a/arch/ppc64/kernel/LparData.c b/arch/powerpc/platforms/iseries/lpardata.c
similarity index 97%
rename from arch/ppc64/kernel/LparData.c
rename to arch/powerpc/platforms/iseries/lpardata.c
index 0a9c23c..87b7ad8 100644
--- a/arch/ppc64/kernel/LparData.c
+++ b/arch/powerpc/platforms/iseries/lpardata.c
@@ -1,4 +1,4 @@
-/* 
+/*
  * Copyright 2001 Mike Corrigan, IBM Corp
  *
  * This program is free software; you can redistribute it and/or
@@ -29,8 +29,8 @@
 #include <asm/iSeries/ItSpCommArea.h>
 
 
-/* The HvReleaseData is the root of the information shared between 
- * the hypervisor and Linux.  
+/* The HvReleaseData is the root of the information shared between
+ * the hypervisor and Linux.
  */
 struct HvReleaseData hvReleaseData = {
 	.xDesc = 0xc8a5d9c4,	/* "HvRD" ebcdic */
@@ -79,7 +79,7 @@
 extern void performance_monitor_iSeries(void);
 extern void data_access_slb_iSeries(void);
 extern void instruction_access_slb_iSeries(void);
-	
+
 struct ItLpNaca itLpNaca = {
 	.xDesc = 0xd397d581,		/* "LpNa" ebcdic */
 	.xSize = 0x0400,		/* size of ItLpNaca */
@@ -106,7 +106,7 @@
 	.xLoadAreaChunks = 0,		/* chunks for load area */
 	.xPaseSysCallCRMask = 0,	/* PASE mask */
 	.xSlicSegmentTablePtr = 0,	/* seg table */
-	.xOldLpQueue = { 0 }, 		/* Old LP Queue */
+	.xOldLpQueue = { 0 },		/* Old LP Queue */
 	.xInterruptHdlr = {
 		(u64)system_reset_iSeries,	/* 0x100 System Reset */
 		(u64)machine_check_iSeries,	/* 0x200 Machine Check */
@@ -134,7 +134,7 @@
 EXPORT_SYMBOL(itLpNaca);
 
 /* May be filled in by the hypervisor so cannot end up in the BSS */
-struct ItIplParmsReal xItIplParmsReal __attribute__((__section__(".data"))); 
+struct ItIplParmsReal xItIplParmsReal __attribute__((__section__(".data")));
 
 /* May be filled in by the hypervisor so cannot end up in the BSS */
 struct ItExtVpdPanel xItExtVpdPanel __attribute__((__section__(".data")));
@@ -151,7 +151,7 @@
 		.xPVR = 0x3600
 	}
 };
-	
+
 /* Space for Main Store Vpd 27,200 bytes */
 /* May be filled in by the hypervisor so cannot end up in the BSS */
 u64    xMsVpd[3400] __attribute__((__section__(".data")));
@@ -197,7 +197,7 @@
 		26992,			/*	 7 length of MS VPD */
 		0,			/*       8 */
 		sizeof(struct ItLpNaca),/*       9 length of LP Naca */
-		0, 			/*	10 */
+		0,			/*	10 */
 		256,			/*	11 length of Recovery Log Buf */
 		sizeof(struct SpCommArea), /*   12 length of SP Comm Area */
 		0,0,0,			/* 13 - 15 */
@@ -207,7 +207,7 @@
 		0,0			/* 24 - 25 */
 		},
 	.xSlicVpdAdrs = {			/* VPD addresses */
-		0,0,0,  		/*	 0 -  2 */
+		0,0,0,			/*	 0 -  2 */
 		&xItExtVpdPanel,        /*       3 Extended VPD */
 		&paca[0],		/*       4 first Paca */
 		0,			/*       5 */
diff --git a/arch/ppc64/kernel/ItLpQueue.c b/arch/powerpc/platforms/iseries/lpevents.c
similarity index 77%
rename from arch/ppc64/kernel/ItLpQueue.c
rename to arch/powerpc/platforms/iseries/lpevents.c
index 4231861..8836030 100644
--- a/arch/ppc64/kernel/ItLpQueue.c
+++ b/arch/powerpc/platforms/iseries/lpevents.c
@@ -1,5 +1,4 @@
 /*
- * ItLpQueue.c
  * Copyright (C) 2001 Mike Corrigan  IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
@@ -19,6 +18,7 @@
 #include <asm/iSeries/ItLpQueue.h>
 #include <asm/iSeries/HvLpEvent.h>
 #include <asm/iSeries/HvCallEvent.h>
+#include <asm/iSeries/ItLpNaca.h>
 
 /*
  * The LpQueue is used to pass event data from the hypervisor to
@@ -43,7 +43,8 @@
 };
 
 /* Array of LpEvent handler functions */
-extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
+static LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
+static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
 
 static struct HvLpEvent * get_next_hvlpevent(void)
 {
@@ -199,6 +200,70 @@
 	hvlpevent_queue.xIndex = 0;
 }
 
+/* Register a handler for an LpEvent type */
+int HvLpEvent_registerHandler(HvLpEvent_Type eventType, LpEventHandler handler)
+{
+	if (eventType < HvLpEvent_Type_NumTypes) {
+		lpEventHandler[eventType] = handler;
+		return 0;
+	}
+	return 1;
+}
+EXPORT_SYMBOL(HvLpEvent_registerHandler);
+
+int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType)
+{
+	might_sleep();
+
+	if (eventType < HvLpEvent_Type_NumTypes) {
+		if (!lpEventHandlerPaths[eventType]) {
+			lpEventHandler[eventType] = NULL;
+			/*
+			 * We now sleep until all other CPUs have scheduled.
+			 * This ensures that the deletion is seen by all
+			 * other CPUs, and that the deleted handler isn't
+			 * still running on another CPU when we return.
+			 */
+			synchronize_rcu();
+			return 0;
+		}
+	}
+	return 1;
+}
+EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
+
+/*
+ * lpIndex is the partition index of the target partition.
+ * needed only for VirtualIo, VirtualLan and SessionMgr.  Zero
+ * indicates to use our partition index - for the other types.
+ */
+int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
+{
+	if ((eventType < HvLpEvent_Type_NumTypes) &&
+			lpEventHandler[eventType]) {
+		if (lpIndex == 0)
+			lpIndex = itLpNaca.xLpIndex;
+		HvCallEvent_openLpEventPath(lpIndex, eventType);
+		++lpEventHandlerPaths[eventType];
+		return 0;
+	}
+	return 1;
+}
+
+int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
+{
+	if ((eventType < HvLpEvent_Type_NumTypes) &&
+			lpEventHandler[eventType] &&
+			lpEventHandlerPaths[eventType]) {
+		if (lpIndex == 0)
+			lpIndex = itLpNaca.xLpIndex;
+		HvCallEvent_closeLpEventPath(lpIndex, eventType);
+		--lpEventHandlerPaths[eventType];
+		return 0;
+	}
+	return 1;
+}
+
 static int proc_lpevents_show(struct seq_file *m, void *v)
 {
 	int cpu, i;
diff --git a/arch/ppc64/kernel/mf.c b/arch/powerpc/platforms/iseries/mf.c
similarity index 93%
rename from arch/ppc64/kernel/mf.c
rename to arch/powerpc/platforms/iseries/mf.c
index ef4a338..82f5aba 100644
--- a/arch/ppc64/kernel/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -1,29 +1,28 @@
 /*
-  * mf.c
-  * Copyright (C) 2001 Troy D. Armstrong  IBM Corporation
-  * Copyright (C) 2004-2005 Stephen Rothwell  IBM Corporation
-  *
-  * This modules exists as an interface between a Linux secondary partition
-  * running on an iSeries and the primary partition's Virtual Service
-  * Processor (VSP) object.  The VSP has final authority over powering on/off
-  * all partitions in the iSeries.  It also provides miscellaneous low-level
-  * machine facility type operations.
-  *
-  *
-  * This program is free software; you can redistribute it and/or modify
-  * it under the terms of the GNU General Public License as published by
-  * the Free Software Foundation; either version 2 of the License, or
-  * (at your option) any later version.
-  *
-  * This program is distributed in the hope that it will be useful,
-  * but WITHOUT ANY WARRANTY; without even the implied warranty of
-  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  * GNU General Public License for more details.
-  *
-  * You should have received a copy of the GNU General Public License
-  * along with this program; if not, write to the Free Software
-  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
-  */
+ * Copyright (C) 2001 Troy D. Armstrong  IBM Corporation
+ * Copyright (C) 2004-2005 Stephen Rothwell  IBM Corporation
+ *
+ * This modules exists as an interface between a Linux secondary partition
+ * running on an iSeries and the primary partition's Virtual Service
+ * Processor (VSP) object.  The VSP has final authority over powering on/off
+ * all partitions in the iSeries.  It also provides miscellaneous low-level
+ * machine facility type operations.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
 
 #include <linux/types.h>
 #include <linux/errno.h>
@@ -42,6 +41,10 @@
 #include <asm/iSeries/HvLpConfig.h>
 #include <asm/iSeries/ItLpQueue.h>
 
+#include "setup.h"
+
+extern int piranha_simulator;
+
 /*
  * This is the structure layout for the Machine Facilites LPAR event
  * flows.
@@ -1279,3 +1282,35 @@
 __initcall(mf_proc_init);
 
 #endif /* CONFIG_PROC_FS */
+
+/*
+ * Get the RTC from the virtual service processor
+ * This requires flowing LpEvents to the primary partition
+ */
+void iSeries_get_rtc_time(struct rtc_time *rtc_tm)
+{
+	if (piranha_simulator)
+		return;
+
+	mf_get_rtc(rtc_tm);
+	rtc_tm->tm_mon--;
+}
+
+/*
+ * Set the RTC in the virtual service processor
+ * This requires flowing LpEvents to the primary partition
+ */
+int iSeries_set_rtc_time(struct rtc_time *tm)
+{
+	mf_set_rtc(tm);
+	return 0;
+}
+
+void iSeries_get_boot_time(struct rtc_time *tm)
+{
+	if (piranha_simulator)
+		return;
+
+	mf_get_boot_rtc(tm);
+	tm->tm_mon  -= 1;
+}
diff --git a/arch/powerpc/platforms/iseries/misc.S b/arch/powerpc/platforms/iseries/misc.S
new file mode 100644
index 0000000..09f1452
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/misc.S
@@ -0,0 +1,55 @@
+/*
+ * This file contains miscellaneous low-level functions.
+ *    Copyright (C) 1995-2005 IBM Corp
+ *
+ * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
+ * and Paul Mackerras.
+ * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
+ * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/processor.h>
+#include <asm/asm-offsets.h>
+
+	.text
+
+/* unsigned long local_save_flags(void) */
+_GLOBAL(local_get_flags)
+	lbz	r3,PACAPROCENABLED(r13)
+	blr
+
+/* unsigned long local_irq_disable(void) */
+_GLOBAL(local_irq_disable)
+	lbz	r3,PACAPROCENABLED(r13)
+	li	r4,0
+	stb	r4,PACAPROCENABLED(r13)
+	blr			/* Done */
+
+/* void local_irq_restore(unsigned long flags) */
+_GLOBAL(local_irq_restore)
+	lbz	r5,PACAPROCENABLED(r13)
+	 /* Check if things are setup the way we want _already_. */
+	cmpw	0,r3,r5
+	beqlr
+	/* are we enabling interrupts? */
+	cmpdi	0,r3,0
+	stb	r3,PACAPROCENABLED(r13)
+	beqlr
+	/* Check pending interrupts */
+	/*   A decrementer, IPI or PMC interrupt may have occurred
+	 *   while we were in the hypervisor (which enables) */
+	ld	r4,PACALPPACA+LPPACAANYINT(r13)
+	cmpdi	r4,0
+	beqlr
+
+	/*
+	 * Handle pending interrupts in interrupt context
+	 */
+	li	r0,0x5555
+	sc
+	blr
diff --git a/arch/ppc64/kernel/iSeries_pci.c b/arch/powerpc/platforms/iseries/pci.c
similarity index 86%
rename from arch/ppc64/kernel/iSeries_pci.c
rename to arch/powerpc/platforms/iseries/pci.c
index fbc273c..501b1dc 100644
--- a/arch/ppc64/kernel/iSeries_pci.c
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -1,28 +1,26 @@
 /*
- * iSeries_pci.c
- *
  * Copyright (C) 2001 Allan Trautman, IBM Corporation
  *
  * iSeries specific routines for PCI.
- * 
+ *
  * Based on code from pci.c and iSeries_pci.c 32bit
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
 #include <linux/kernel.h>
-#include <linux/list.h> 
+#include <linux/list.h>
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/module.h>
@@ -43,14 +41,14 @@
 #include <asm/iSeries/iSeries_pci.h>
 #include <asm/iSeries/mf.h>
 
-#include "pci.h"
+#include <asm/ppc-pci.h>
 
 extern unsigned long io_page_mask;
 
 /*
- * Forward declares of prototypes. 
+ * Forward declares of prototypes.
  */
-static struct iSeries_Device_Node *find_Device_Node(int bus, int devfn);
+static struct device_node *find_Device_Node(int bus, int devfn);
 static void scan_PHB_slots(struct pci_controller *Phb);
 static void scan_EADS_bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel);
 static int scan_bridge_slot(HvBusNumber Bus, struct HvCallPci_BridgeInfo *Info);
@@ -68,7 +66,7 @@
 #endif
 static long Pci_Error_Count;
 
-static int Pci_Retry_Max = 3;	/* Only retry 3 times  */	
+static int Pci_Retry_Max = 3;	/* Only retry 3 times  */
 static int Pci_Error_Flag = 1;	/* Set Retry Error on. */
 
 static struct pci_ops iSeries_pci_ops;
@@ -87,7 +85,7 @@
 /*
  * Lookup Tables.
  */
-static struct iSeries_Device_Node **iomm_table;
+static struct device_node **iomm_table;
 static u8 *iobar_table;
 
 /*
@@ -179,7 +177,7 @@
 	for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num) {
 		bar_res = &dev->resource[bar_num];
 		iomm_table_allocate_entry(dev, bar_num);
-    	}
+	}
 }
 
 /*
@@ -201,29 +199,35 @@
 /*
  * build_device_node(u16 Bus, int SubBus, u8 DevFn)
  */
-static struct iSeries_Device_Node *build_device_node(HvBusNumber Bus,
+static struct device_node *build_device_node(HvBusNumber Bus,
 		HvSubBusNumber SubBus, int AgentId, int Function)
 {
-	struct iSeries_Device_Node *node;
+	struct device_node *node;
+	struct pci_dn *pdn;
 
 	PPCDBG(PPCDBG_BUSWALK,
 			"-build_device_node 0x%02X.%02X.%02X Function: %02X\n",
 			Bus, SubBus, AgentId, Function);
 
-	node = kmalloc(sizeof(struct iSeries_Device_Node), GFP_KERNEL);
+	node = kmalloc(sizeof(struct device_node), GFP_KERNEL);
 	if (node == NULL)
 		return NULL;
-
-	memset(node, 0, sizeof(struct iSeries_Device_Node));
+	memset(node, 0, sizeof(struct device_node));
+	pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
+	if (pdn == NULL) {
+		kfree(node);
+		return NULL;
+	}
+	node->data = pdn;
 	list_add_tail(&node->Device_List, &iSeries_Global_Device_List);
 #if 0
-	node->DsaAddr = ((u64)Bus << 48) + ((u64)SubBus << 40) + ((u64)0x10 << 32);
+	pdn->DsaAddr = ((u64)Bus << 48) + ((u64)SubBus << 40) + ((u64)0x10 << 32);
 #endif
-	node->DsaAddr.DsaAddr = 0;
-	node->DsaAddr.Dsa.busNumber = Bus;
-	node->DsaAddr.Dsa.subBusNumber = SubBus;
-	node->DsaAddr.Dsa.deviceId = 0x10;
-	node->DevFn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function);
+	pdn->DsaAddr.DsaAddr = 0;
+	pdn->DsaAddr.Dsa.busNumber = Bus;
+	pdn->DsaAddr.Dsa.subBusNumber = SubBus;
+	pdn->DsaAddr.Dsa.deviceId = 0x10;
+	pdn->devfn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function);
 	return node;
 }
 
@@ -278,28 +282,28 @@
 
 /*
  * iSeries_pcibios_init
- *  
+ *
  * Chance to initialize and structures or variable before PCI Bus walk.
  */
 void iSeries_pcibios_init(void)
 {
-	PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Entry.\n"); 
+	PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Entry.\n");
 	iomm_table_initialize();
 	find_and_init_phbs();
 	io_page_mask = -1;
-	PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Exit.\n"); 
+	PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Exit.\n");
 }
 
 /*
- * iSeries_pci_final_fixup(void)  
+ * iSeries_pci_final_fixup(void)
  */
 void __init iSeries_pci_final_fixup(void)
 {
 	struct pci_dev *pdev = NULL;
-	struct iSeries_Device_Node *node;
-    	int DeviceCount = 0;
+	struct device_node *node;
+	int DeviceCount = 0;
 
-	PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup Entry.\n"); 
+	PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup Entry.\n");
 
 	/* Fix up at the device node and pci_dev relationship */
 	mf_display_src(0xC9000100);
@@ -313,7 +317,7 @@
 		if (node != NULL) {
 			++DeviceCount;
 			pdev->sysdata = (void *)node;
-			node->PciDev = pdev;
+			PCI_DN(node)->pcidev = pdev;
 			PPCDBG(PPCDBG_BUSWALK,
 					"pdev 0x%p <==> DevNode 0x%p\n",
 					pdev, node);
@@ -323,7 +327,7 @@
 		} else
 			printk("PCI: Device Tree not found for 0x%016lX\n",
 					(unsigned long)pdev);
-		pdev->irq = node->Irq;
+		pdev->irq = PCI_DN(node)->Irq;
 	}
 	iSeries_activate_IRQs();
 	mf_display_src(0xC9000200);
@@ -332,24 +336,24 @@
 void pcibios_fixup_bus(struct pci_bus *PciBus)
 {
 	PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup_bus(0x%04X) Entry.\n",
-			PciBus->number); 
+			PciBus->number);
 }
 
 void pcibios_fixup_resources(struct pci_dev *pdev)
 {
 	PPCDBG(PPCDBG_BUSWALK, "fixup_resources pdev %p\n", pdev);
-}   
+}
 
 /*
- * Loop through each node function to find usable EADs bridges.  
+ * Loop through each node function to find usable EADs bridges.
  */
 static void scan_PHB_slots(struct pci_controller *Phb)
 {
 	struct HvCallPci_DeviceInfo *DevInfo;
-	HvBusNumber bus = Phb->local_number;	/* System Bus */	
+	HvBusNumber bus = Phb->local_number;	/* System Bus */
 	const HvSubBusNumber SubBus = 0;	/* EADs is always 0. */
 	int HvRc = 0;
-	int IdSel;	
+	int IdSel;
 	const int MaxAgents = 8;
 
 	DevInfo = (struct HvCallPci_DeviceInfo*)
@@ -358,10 +362,10 @@
 		return;
 
 	/*
-	 * Probe for EADs Bridges      
+	 * Probe for EADs Bridges
 	 */
 	for (IdSel = 1; IdSel < MaxAgents; ++IdSel) {
-    		HvRc = HvCallPci_getDeviceInfo(bus, SubBus, IdSel,
+		HvRc = HvCallPci_getDeviceInfo(bus, SubBus, IdSel,
 				ISERIES_HV_ADDR(DevInfo),
 				sizeof(struct HvCallPci_DeviceInfo));
 		if (HvRc == 0) {
@@ -393,19 +397,19 @@
 
 	/* Note: hvSubBus and irq is always be 0 at this level! */
 	for (Function = 0; Function < 8; ++Function) {
-	  	AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
+		AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
 		HvRc = HvCallXm_connectBusUnit(bus, SubBus, AgentId, 0);
- 		if (HvRc == 0) {
+		if (HvRc == 0) {
 			printk("found device at bus %d idsel %d func %d (AgentId %x)\n",
 			       bus, IdSel, Function, AgentId);
-  			/*  Connect EADs: 0x18.00.12 = 0x00 */
+			/*  Connect EADs: 0x18.00.12 = 0x00 */
 			PPCDBG(PPCDBG_BUSWALK,
 					"PCI:Connect EADs: 0x%02X.%02X.%02X\n",
 					bus, SubBus, AgentId);
-	    		HvRc = HvCallPci_getBusUnitInfo(bus, SubBus, AgentId,
+			HvRc = HvCallPci_getBusUnitInfo(bus, SubBus, AgentId,
 					ISERIES_HV_ADDR(BridgeInfo),
 					sizeof(struct HvCallPci_BridgeInfo));
-	 		if (HvRc == 0) {
+			if (HvRc == 0) {
 				printk("bridge info: type %x subbus %x maxAgents %x maxsubbus %x logslot %x\n",
 					BridgeInfo->busUnitInfo.deviceType,
 					BridgeInfo->subBusNumber,
@@ -428,7 +432,7 @@
 					printk("PCI: Invalid Bridge Configuration(0x%02X)",
 						BridgeInfo->busUnitInfo.deviceType);
 			}
-    		} else if (HvRc != 0x000B)
+		} else if (HvRc != 0x000B)
 			pci_Log_Error("EADs Connect",
 					bus, SubBus, AgentId, HvRc);
 	}
@@ -441,7 +445,7 @@
 static int scan_bridge_slot(HvBusNumber Bus,
 		struct HvCallPci_BridgeInfo *BridgeInfo)
 {
-	struct iSeries_Device_Node *node;
+	struct device_node *node;
 	HvSubBusNumber SubBus = BridgeInfo->subBusNumber;
 	u16 VendorId = 0;
 	int HvRc = 0;
@@ -451,16 +455,16 @@
 	HvAgentId EADsIdSel = ISERIES_PCI_AGENTID(IdSel, Function);
 
 	/* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */
-  	Irq = iSeries_allocate_IRQ(Bus, 0, EADsIdSel);
+	Irq = iSeries_allocate_IRQ(Bus, 0, EADsIdSel);
 	PPCDBG(PPCDBG_BUSWALK,
 		"PCI:- allocate and assign IRQ 0x%02X.%02X.%02X = 0x%02X\n",
 		Bus, 0, EADsIdSel, Irq);
 
 	/*
-	 * Connect all functions of any device found.  
+	 * Connect all functions of any device found.
 	 */
-  	for (IdSel = 1; IdSel <= BridgeInfo->maxAgents; ++IdSel) {
-    		for (Function = 0; Function < 8; ++Function) {
+	for (IdSel = 1; IdSel <= BridgeInfo->maxAgents; ++IdSel) {
+		for (Function = 0; Function < 8; ++Function) {
 			HvAgentId AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
 			HvRc = HvCallXm_connectBusUnit(Bus, SubBus,
 					AgentId, Irq);
@@ -484,15 +488,15 @@
 			       "PCI:- FoundDevice: 0x%02X.%02X.%02X = 0x%04X, irq %d\n",
 			       Bus, SubBus, AgentId, VendorId, Irq);
 			HvRc = HvCallPci_configStore8(Bus, SubBus, AgentId,
-						      PCI_INTERRUPT_LINE, Irq);  
+						      PCI_INTERRUPT_LINE, Irq);
 			if (HvRc != 0)
 				pci_Log_Error("PciCfgStore Irq Failed!",
 					      Bus, SubBus, AgentId, HvRc);
 
 			++DeviceCount;
 			node = build_device_node(Bus, SubBus, EADsIdSel, Function);
-			node->Irq = Irq;
-			node->LogicalSlot = BridgeInfo->logicalSlotNumber;
+			PCI_DN(node)->Irq = Irq;
+			PCI_DN(node)->LogicalSlot = BridgeInfo->logicalSlotNumber;
 
 		} /* for (Function = 0; Function < 8; ++Function) */
 	} /* for (IdSel = 1; IdSel <= MaxAgents; ++IdSel) */
@@ -542,15 +546,16 @@
 /*
  * Look down the chain to find the matching Device Device
  */
-static struct iSeries_Device_Node *find_Device_Node(int bus, int devfn)
+static struct device_node *find_Device_Node(int bus, int devfn)
 {
 	struct list_head *pos;
 
 	list_for_each(pos, &iSeries_Global_Device_List) {
-		struct iSeries_Device_Node *node =
-			list_entry(pos, struct iSeries_Device_Node, Device_List);
+		struct device_node *node =
+			list_entry(pos, struct device_node, Device_List);
 
-		if ((bus == ISERIES_BUS(node)) && (devfn == node->DevFn))
+		if ((bus == ISERIES_BUS(node)) &&
+				(devfn == PCI_DN(node)->devfn))
 			return node;
 	}
 	return NULL;
@@ -562,12 +567,12 @@
  * Sanity Check Node PciDev to passed pci_dev
  * If none is found, returns a NULL which the client must handle.
  */
-static struct iSeries_Device_Node *get_Device_Node(struct pci_dev *pdev)
+static struct device_node *get_Device_Node(struct pci_dev *pdev)
 {
-	struct iSeries_Device_Node *node;
+	struct device_node *node;
 
 	node = pdev->sysdata;
-	if (node == NULL || node->PciDev != pdev)
+	if (node == NULL || PCI_DN(node)->pcidev != pdev)
 		node = find_Device_Node(pdev->bus->number, pdev->devfn);
 	return node;
 }
@@ -595,7 +600,7 @@
 static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
 		int offset, int size, u32 *val)
 {
-	struct iSeries_Device_Node *node = find_Device_Node(bus->number, devfn);
+	struct device_node *node = find_Device_Node(bus->number, devfn);
 	u64 fn;
 	struct HvCallPci_LoadReturn ret;
 
@@ -607,7 +612,7 @@
 	}
 
 	fn = hv_cfg_read_func[(size - 1) & 3];
-	HvCall3Ret16(fn, &ret, node->DsaAddr.DsaAddr, offset, 0);
+	HvCall3Ret16(fn, &ret, PCI_DN(node)->DsaAddr.DsaAddr, offset, 0);
 
 	if (ret.rc != 0) {
 		*val = ~0;
@@ -625,7 +630,7 @@
 static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
 		int offset, int size, u32 val)
 {
-	struct iSeries_Device_Node *node = find_Device_Node(bus->number, devfn);
+	struct device_node *node = find_Device_Node(bus->number, devfn);
 	u64 fn;
 	u64 ret;
 
@@ -635,7 +640,7 @@
 		return PCIBIOS_BAD_REGISTER_NUMBER;
 
 	fn = hv_cfg_write_func[(size - 1) & 3];
-	ret = HvCall4(fn, node->DsaAddr.DsaAddr, offset, val, 0);
+	ret = HvCall4(fn, PCI_DN(node)->DsaAddr.DsaAddr, offset, val, 0);
 
 	if (ret != 0)
 		return PCIBIOS_DEVICE_NOT_FOUND;
@@ -657,14 +662,16 @@
  * PCI: Device 23.90 ReadL Retry( 1)
  * PCI: Device 23.90 ReadL Retry Successful(1)
  */
-static int CheckReturnCode(char *TextHdr, struct iSeries_Device_Node *DevNode,
+static int CheckReturnCode(char *TextHdr, struct device_node *DevNode,
 		int *retry, u64 ret)
 {
 	if (ret != 0)  {
+		struct pci_dn *pdn = PCI_DN(DevNode);
+
 		++Pci_Error_Count;
 		(*retry)++;
 		printk("PCI: %s: Device 0x%04X:%02X  I/O Error(%2d): 0x%04X\n",
-				TextHdr, DevNode->DsaAddr.Dsa.busNumber, DevNode->DevFn,
+				TextHdr, pdn->DsaAddr.Dsa.busNumber, pdn->devfn,
 				*retry, (int)ret);
 		/*
 		 * Bump the retry and check for retry count exceeded.
@@ -687,14 +694,14 @@
  * Note: Make sure the passed variable end up on the stack to avoid
  * the exposure of being device global.
  */
-static inline struct iSeries_Device_Node *xlate_iomm_address(
+static inline struct device_node *xlate_iomm_address(
 		const volatile void __iomem *IoAddress,
 		u64 *dsaptr, u64 *BarOffsetPtr)
 {
 	unsigned long OrigIoAddr;
 	unsigned long BaseIoAddr;
 	unsigned long TableIndex;
-	struct iSeries_Device_Node *DevNode;
+	struct device_node *DevNode;
 
 	OrigIoAddr = (unsigned long __force)IoAddress;
 	if ((OrigIoAddr < BASE_IO_MEMORY) || (OrigIoAddr >= max_io_memory))
@@ -705,7 +712,7 @@
 
 	if (DevNode != NULL) {
 		int barnum = iobar_table[TableIndex];
-		*dsaptr = DevNode->DsaAddr.DsaAddr | (barnum << 24);
+		*dsaptr = PCI_DN(DevNode)->DsaAddr.DsaAddr | (barnum << 24);
 		*BarOffsetPtr = BaseIoAddr % IOMM_TABLE_ENTRY_SIZE;
 	} else
 		panic("PCI: Invalid PCI IoAddress detected!\n");
@@ -727,7 +734,7 @@
 	u64 dsa;
 	int retry = 0;
 	struct HvCallPci_LoadReturn ret;
-	struct iSeries_Device_Node *DevNode =
+	struct device_node *DevNode =
 		xlate_iomm_address(IoAddress, &dsa, &BarOffset);
 
 	if (DevNode == NULL) {
@@ -757,7 +764,7 @@
 	u64 dsa;
 	int retry = 0;
 	struct HvCallPci_LoadReturn ret;
-	struct iSeries_Device_Node *DevNode =
+	struct device_node *DevNode =
 		xlate_iomm_address(IoAddress, &dsa, &BarOffset);
 
 	if (DevNode == NULL) {
@@ -788,7 +795,7 @@
 	u64 dsa;
 	int retry = 0;
 	struct HvCallPci_LoadReturn ret;
-	struct iSeries_Device_Node *DevNode =
+	struct device_node *DevNode =
 		xlate_iomm_address(IoAddress, &dsa, &BarOffset);
 
 	if (DevNode == NULL) {
@@ -826,7 +833,7 @@
 	u64 dsa;
 	int retry = 0;
 	u64 rc;
-	struct iSeries_Device_Node *DevNode =
+	struct device_node *DevNode =
 		xlate_iomm_address(IoAddress, &dsa, &BarOffset);
 
 	if (DevNode == NULL) {
@@ -854,7 +861,7 @@
 	u64 dsa;
 	int retry = 0;
 	u64 rc;
-	struct iSeries_Device_Node *DevNode =
+	struct device_node *DevNode =
 		xlate_iomm_address(IoAddress, &dsa, &BarOffset);
 
 	if (DevNode == NULL) {
@@ -882,7 +889,7 @@
 	u64 dsa;
 	int retry = 0;
 	u64 rc;
-	struct iSeries_Device_Node *DevNode =
+	struct device_node *DevNode =
 		xlate_iomm_address(IoAddress, &dsa, &BarOffset);
 
 	if (DevNode == NULL) {
diff --git a/arch/ppc64/kernel/iSeries_proc.c b/arch/powerpc/platforms/iseries/proc.c
similarity index 92%
rename from arch/ppc64/kernel/iSeries_proc.c
rename to arch/powerpc/platforms/iseries/proc.c
index 0fe3116..d46b473 100644
--- a/arch/ppc64/kernel/iSeries_proc.c
+++ b/arch/powerpc/platforms/iseries/proc.c
@@ -1,5 +1,4 @@
 /*
- * iSeries_proc.c
  * Copyright (C) 2001  Kyle A. Lucke IBM Corporation
  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation
  *
@@ -68,12 +67,15 @@
 		unsigned long tb_ticks = (tb0 - startTb);
 		unsigned long titan_jiffies = titan_usec / (1000000/HZ);
 		unsigned long titan_jiff_usec = titan_jiffies * (1000000/HZ);
-		unsigned long titan_jiff_rem_usec = titan_usec - titan_jiff_usec;
+		unsigned long titan_jiff_rem_usec =
+			titan_usec - titan_jiff_usec;
 		unsigned long tb_jiffies = tb_ticks / tb_ticks_per_jiffy;
 		unsigned long tb_jiff_ticks = tb_jiffies * tb_ticks_per_jiffy;
 		unsigned long tb_jiff_rem_ticks = tb_ticks - tb_jiff_ticks;
-		unsigned long tb_jiff_rem_usec = tb_jiff_rem_ticks / tb_ticks_per_usec;
-		unsigned long new_tb_ticks_per_jiffy = (tb_ticks * (1000000/HZ))/titan_usec;
+		unsigned long tb_jiff_rem_usec =
+			tb_jiff_rem_ticks / tb_ticks_per_usec;
+		unsigned long new_tb_ticks_per_jiffy =
+			(tb_ticks * (1000000/HZ))/titan_usec;
 
 		seq_printf(m, "  titan elapsed = %lu uSec\n", titan_usec);
 		seq_printf(m, "  tb elapsed    = %lu ticks\n", tb_ticks);
diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/powerpc/platforms/iseries/setup.c
similarity index 76%
rename from arch/ppc64/kernel/iSeries_setup.c
rename to arch/powerpc/platforms/iseries/setup.c
index 3ffefbb..ad78c85 100644
--- a/arch/ppc64/kernel/iSeries_setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -2,8 +2,6 @@
  *    Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
  *    Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
  *
- *    Module name: iSeries_setup.c
- *
  *    Description:
  *      Architecture- / platform-specific boot-time initialization code for
  *      the IBM iSeries LPAR.  Adapted from original code by Grant Erickson and
@@ -42,7 +40,6 @@
 #include <asm/firmware.h>
 
 #include <asm/time.h>
-#include "iSeries_setup.h"
 #include <asm/naca.h>
 #include <asm/paca.h>
 #include <asm/cache.h>
@@ -62,6 +59,8 @@
 #include <asm/iSeries/ItVpdAreas.h>
 #include <asm/iSeries/LparMap.h>
 
+#include "setup.h"
+
 extern void hvlog(char *fmt, ...);
 
 #ifdef DEBUG
@@ -74,8 +73,8 @@
 extern void ppcdbg_initialize(void);
 
 static void build_iSeries_Memory_Map(void);
-static void setup_iSeries_cache_sizes(void);
-static void iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr);
+static int iseries_shared_idle(void);
+static int iseries_dedicated_idle(void);
 #ifdef CONFIG_PCI
 extern void iSeries_pci_final_fixup(void);
 #else
@@ -83,14 +82,6 @@
 #endif
 
 /* Global Variables */
-static unsigned long procFreqHz;
-static unsigned long procFreqMhz;
-static unsigned long procFreqMhzHundreths;
-
-static unsigned long tbFreqHz;
-static unsigned long tbFreqMhz;
-static unsigned long tbFreqMhzHundreths;
-
 int piranha_simulator;
 
 extern int rd_size;		/* Defined in drivers/block/rd.c */
@@ -319,6 +310,8 @@
 
 	ppcdbg_initialize();
 
+	ppc64_interrupt_controller = IC_ISERIES;
+
 #if defined(CONFIG_BLK_DEV_INITRD)
 	/*
 	 * If the init RAM disk has been configured and there is
@@ -341,12 +334,6 @@
 	iSeries_recal_titan = HvCallXm_loadTod();
 
 	/*
-	 * Cache sizes must be initialized before hpte_init_iSeries is called
-	 * as the later need them for flush_icache_range()
-	 */
-	setup_iSeries_cache_sizes();
-
-	/*
 	 * Initialize the hash table management pointers
 	 */
 	hpte_init_iSeries();
@@ -356,12 +343,6 @@
 	 */
 	iommu_init_early_iSeries();
 
-	/*
-	 * Initialize the table which translate Linux physical addresses to
-	 * AS/400 absolute addresses
-	 */
-	build_iSeries_Memory_Map();
-
 	iSeries_get_cmdline();
 
 	/* Save unparsed command line copy for /proc/cmdline */
@@ -379,14 +360,6 @@
 		}
 	}
 
-	/* Bolt kernel mappings for all of memory (or just a bit if we've got a limit) */
-	iSeries_bolt_kernel(0, systemcfg->physicalMemorySize);
-
-	lmb_init();
-	lmb_add(0, systemcfg->physicalMemorySize);
-	lmb_analyze();
-	lmb_reserve(0, __pa(klimit));
-
 	/* Initialize machine-dependency vectors */
 #ifdef CONFIG_SMP
 	smp_init_iSeries();
@@ -592,139 +565,28 @@
 }
 
 /*
- * Set up the variables that describe the cache line sizes
- * for this machine.
- */
-static void __init setup_iSeries_cache_sizes(void)
-{
-	unsigned int i, n;
-	unsigned int procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
-
-	systemcfg->icache_size =
-	ppc64_caches.isize = xIoHriProcessorVpd[procIx].xInstCacheSize * 1024;
-	systemcfg->icache_line_size =
-	ppc64_caches.iline_size =
-		xIoHriProcessorVpd[procIx].xInstCacheOperandSize;
-	systemcfg->dcache_size =
-	ppc64_caches.dsize =
-		xIoHriProcessorVpd[procIx].xDataL1CacheSizeKB * 1024;
-	systemcfg->dcache_line_size =
-	ppc64_caches.dline_size =
-		xIoHriProcessorVpd[procIx].xDataCacheOperandSize;
-	ppc64_caches.ilines_per_page = PAGE_SIZE / ppc64_caches.iline_size;
-	ppc64_caches.dlines_per_page = PAGE_SIZE / ppc64_caches.dline_size;
-
-	i = ppc64_caches.iline_size;
-	n = 0;
-	while ((i = (i / 2)))
-		++n;
-	ppc64_caches.log_iline_size = n;
-
-	i = ppc64_caches.dline_size;
-	n = 0;
-	while ((i = (i / 2)))
-		++n;
-	ppc64_caches.log_dline_size = n;
-
-	printk("D-cache line size = %d\n",
-			(unsigned int)ppc64_caches.dline_size);
-	printk("I-cache line size = %d\n",
-			(unsigned int)ppc64_caches.iline_size);
-}
-
-/*
- * Create a pte. Used during initialization only.
- */
-static void iSeries_make_pte(unsigned long va, unsigned long pa,
-			     int mode)
-{
-	hpte_t local_hpte, rhpte;
-	unsigned long hash, vpn;
-	long slot;
-
-	vpn = va >> PAGE_SHIFT;
-	hash = hpt_hash(vpn, 0);
-
-	local_hpte.r = pa | mode;
-	local_hpte.v = ((va >> 23) << HPTE_V_AVPN_SHIFT)
-		| HPTE_V_BOLTED | HPTE_V_VALID;
-
-	slot = HvCallHpt_findValid(&rhpte, vpn);
-	if (slot < 0) {
-		/* Must find space in primary group */
-		panic("hash_page: hpte already exists\n");
-	}
-	HvCallHpt_addValidate(slot, 0, &local_hpte);
-}
-
-/*
- * Bolt the kernel addr space into the HPT
- */
-static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
-{
-	unsigned long pa;
-	unsigned long mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
-	hpte_t hpte;
-
-	for (pa = saddr; pa < eaddr ;pa += PAGE_SIZE) {
-		unsigned long ea = (unsigned long)__va(pa);
-		unsigned long vsid = get_kernel_vsid(ea);
-		unsigned long va = (vsid << 28) | (pa & 0xfffffff);
-		unsigned long vpn = va >> PAGE_SHIFT;
-		unsigned long slot = HvCallHpt_findValid(&hpte, vpn);
-
-		/* Make non-kernel text non-executable */
-		if (!in_kernel_text(ea))
-			mode_rw |= HW_NO_EXEC;
-
-		if (hpte.v & HPTE_V_VALID) {
-			/* HPTE exists, so just bolt it */
-			HvCallHpt_setSwBits(slot, 0x10, 0);
-			/* And make sure the pp bits are correct */
-			HvCallHpt_setPp(slot, PP_RWXX);
-		} else
-			/* No HPTE exists, so create a new bolted one */
-			iSeries_make_pte(va, phys_to_abs(pa), mode_rw);
-	}
-}
-
-/*
  * Document me.
  */
 static void __init iSeries_setup_arch(void)
 {
 	unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
 
-	/* Add an eye catcher and the systemcfg layout version number */
-	strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
-	systemcfg->version.major = SYSTEMCFG_MAJOR;
-	systemcfg->version.minor = SYSTEMCFG_MINOR;
+	if (get_paca()->lppaca.shared_proc) {
+		ppc_md.idle_loop = iseries_shared_idle;
+		printk(KERN_INFO "Using shared processor idle loop\n");
+	} else {
+		ppc_md.idle_loop = iseries_dedicated_idle;
+		printk(KERN_INFO "Using dedicated idle loop\n");
+	}
 
 	/* Setup the Lp Event Queue */
 	setup_hvlpevent_queue();
 
-	/* Compute processor frequency */
-	procFreqHz = ((1UL << 34) * 1000000) /
-			xIoHriProcessorVpd[procIx].xProcFreq;
-	procFreqMhz = procFreqHz / 1000000;
-	procFreqMhzHundreths = (procFreqHz / 10000) - (procFreqMhz * 100);
-	ppc_proc_freq = procFreqHz;
-
-	/* Compute time base frequency */
-	tbFreqHz = ((1UL << 32) * 1000000) /
-		xIoHriProcessorVpd[procIx].xTimeBaseFreq;
-	tbFreqMhz = tbFreqHz / 1000000;
-	tbFreqMhzHundreths = (tbFreqHz / 10000) - (tbFreqMhz * 100);
-	ppc_tb_freq = tbFreqHz;
-
 	printk("Max  logical processors = %d\n",
 			itVpdAreas.xSlicMaxLogicalProcs);
 	printk("Max physical processors = %d\n",
 			itVpdAreas.xSlicMaxPhysicalProcs);
-	printk("Processor frequency = %lu.%02lu\n", procFreqMhz,
-			procFreqMhzHundreths);
-	printk("Time base frequency = %lu.%02lu\n", tbFreqMhz,
-			tbFreqMhzHundreths);
+
 	systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
 	printk("Processor version = %x\n", systemcfg->processor);
 }
@@ -768,49 +630,6 @@
 	mf_power_off();
 }
 
-/*
- * void __init iSeries_calibrate_decr()
- *
- * Description:
- *   This routine retrieves the internal processor frequency from the VPD,
- *   and sets up the kernel timer decrementer based on that value.
- *
- */
-static void __init iSeries_calibrate_decr(void)
-{
-	unsigned long	cyclesPerUsec;
-	struct div_result divres;
-
-	/* Compute decrementer (and TB) frequency in cycles/sec */
-	cyclesPerUsec = ppc_tb_freq / 1000000;
-
-	/*
-	 * Set the amount to refresh the decrementer by.  This
-	 * is the number of decrementer ticks it takes for
-	 * 1/HZ seconds.
-	 */
-	tb_ticks_per_jiffy = ppc_tb_freq / HZ;
-
-#if 0
-	/* TEST CODE FOR ADJTIME */
-	tb_ticks_per_jiffy += tb_ticks_per_jiffy / 5000;
-	/* END OF TEST CODE */
-#endif
-
-	/*
-	 * tb_ticks_per_sec = freq; would give better accuracy
-	 * but tb_ticks_per_sec = tb_ticks_per_jiffy*HZ; assures
-	 * that jiffies (and xtime) will match the time returned
-	 * by do_gettimeofday.
-	 */
-	tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
-	tb_ticks_per_usec = cyclesPerUsec;
-	tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
-	div128_by_32(1024 * 1024, 0, tb_ticks_per_sec, &divres);
-	tb_to_xs = divres.result_low;
-	setup_default_decr();
-}
-
 static void __init iSeries_progress(char * st, unsigned short code)
 {
 	printk("Progress: [%04x] - %s\n", (unsigned)code, st);
@@ -942,36 +761,246 @@
 void __init iSeries_init_IRQ(void) { }
 #endif
 
-void __init iSeries_early_setup(void)
+static int __init iseries_probe(int platform)
 {
-	iSeries_fixup_klimit();
+	return PLATFORM_ISERIES_LPAR == platform;
+}
 
-	ppc_md.setup_arch = iSeries_setup_arch;
-	ppc_md.get_cpuinfo = iSeries_get_cpuinfo;
-	ppc_md.init_IRQ = iSeries_init_IRQ;
-	ppc_md.get_irq = iSeries_get_irq;
-	ppc_md.init_early = iSeries_init_early,
-
-	ppc_md.pcibios_fixup  = iSeries_pci_final_fixup;
-
-	ppc_md.restart = iSeries_restart;
-	ppc_md.power_off = iSeries_power_off;
-	ppc_md.halt = iSeries_halt;
-
-	ppc_md.get_boot_time = iSeries_get_boot_time;
-	ppc_md.set_rtc_time = iSeries_set_rtc_time;
-	ppc_md.get_rtc_time = iSeries_get_rtc_time;
-	ppc_md.calibrate_decr = iSeries_calibrate_decr;
-	ppc_md.progress = iSeries_progress;
-
+struct machdep_calls __initdata iseries_md = {
+	.setup_arch	= iSeries_setup_arch,
+	.get_cpuinfo	= iSeries_get_cpuinfo,
+	.init_IRQ	= iSeries_init_IRQ,
+	.get_irq	= iSeries_get_irq,
+	.init_early	= iSeries_init_early,
+	.pcibios_fixup	= iSeries_pci_final_fixup,
+	.restart	= iSeries_restart,
+	.power_off	= iSeries_power_off,
+	.halt		= iSeries_halt,
+	.get_boot_time	= iSeries_get_boot_time,
+	.set_rtc_time	= iSeries_set_rtc_time,
+	.get_rtc_time	= iSeries_get_rtc_time,
+	.calibrate_decr	= generic_calibrate_decr,
+	.progress	= iSeries_progress,
+	.probe		= iseries_probe,
 	/* XXX Implement enable_pmcs for iSeries */
+};
 
-	if (get_paca()->lppaca.shared_proc) {
-		ppc_md.idle_loop = iseries_shared_idle;
-		printk(KERN_INFO "Using shared processor idle loop\n");
-	} else {
-		ppc_md.idle_loop = iseries_dedicated_idle;
-		printk(KERN_INFO "Using dedicated idle loop\n");
+struct blob {
+	unsigned char data[PAGE_SIZE];
+	unsigned long next;
+};
+
+struct iseries_flat_dt {
+	struct boot_param_header header;
+	u64 reserve_map[2];
+	struct blob dt;
+	struct blob strings;
+};
+
+struct iseries_flat_dt iseries_dt;
+
+void dt_init(struct iseries_flat_dt *dt)
+{
+	dt->header.off_mem_rsvmap =
+		offsetof(struct iseries_flat_dt, reserve_map);
+	dt->header.off_dt_struct = offsetof(struct iseries_flat_dt, dt);
+	dt->header.off_dt_strings = offsetof(struct iseries_flat_dt, strings);
+	dt->header.totalsize = sizeof(struct iseries_flat_dt);
+	dt->header.dt_strings_size = sizeof(struct blob);
+
+	/* There is no notion of hardware cpu id on iSeries */
+	dt->header.boot_cpuid_phys = smp_processor_id();
+
+	dt->dt.next = (unsigned long)&dt->dt.data;
+	dt->strings.next = (unsigned long)&dt->strings.data;
+
+	dt->header.magic = OF_DT_HEADER;
+	dt->header.version = 0x10;
+	dt->header.last_comp_version = 0x10;
+
+	dt->reserve_map[0] = 0;
+	dt->reserve_map[1] = 0;
+}
+
+void dt_check_blob(struct blob *b)
+{
+	if (b->next >= (unsigned long)&b->next) {
+		DBG("Ran out of space in flat device tree blob!\n");
+		BUG();
 	}
 }
 
+void dt_push_u32(struct iseries_flat_dt *dt, u32 value)
+{
+	*((u32*)dt->dt.next) = value;
+	dt->dt.next += sizeof(u32);
+
+	dt_check_blob(&dt->dt);
+}
+
+void dt_push_u64(struct iseries_flat_dt *dt, u64 value)
+{
+	*((u64*)dt->dt.next) = value;
+	dt->dt.next += sizeof(u64);
+
+	dt_check_blob(&dt->dt);
+}
+
+unsigned long dt_push_bytes(struct blob *blob, char *data, int len)
+{
+	unsigned long start = blob->next - (unsigned long)blob->data;
+
+	memcpy((char *)blob->next, data, len);
+	blob->next = _ALIGN(blob->next + len, 4);
+
+	dt_check_blob(blob);
+
+	return start;
+}
+
+void dt_start_node(struct iseries_flat_dt *dt, char *name)
+{
+	dt_push_u32(dt, OF_DT_BEGIN_NODE);
+	dt_push_bytes(&dt->dt, name, strlen(name) + 1);
+}
+
+#define dt_end_node(dt) dt_push_u32(dt, OF_DT_END_NODE)
+
+void dt_prop(struct iseries_flat_dt *dt, char *name, char *data, int len)
+{
+	unsigned long offset;
+
+	dt_push_u32(dt, OF_DT_PROP);
+
+	/* Length of the data */
+	dt_push_u32(dt, len);
+
+	/* Put the property name in the string blob. */
+	offset = dt_push_bytes(&dt->strings, name, strlen(name) + 1);
+
+	/* The offset of the properties name in the string blob. */
+	dt_push_u32(dt, (u32)offset);
+
+	/* The actual data. */
+	dt_push_bytes(&dt->dt, data, len);
+}
+
+void dt_prop_str(struct iseries_flat_dt *dt, char *name, char *data)
+{
+	dt_prop(dt, name, data, strlen(data) + 1); /* + 1 for NULL */
+}
+
+void dt_prop_u32(struct iseries_flat_dt *dt, char *name, u32 data)
+{
+	dt_prop(dt, name, (char *)&data, sizeof(u32));
+}
+
+void dt_prop_u64(struct iseries_flat_dt *dt, char *name, u64 data)
+{
+	dt_prop(dt, name, (char *)&data, sizeof(u64));
+}
+
+void dt_prop_u64_list(struct iseries_flat_dt *dt, char *name, u64 *data, int n)
+{
+	dt_prop(dt, name, (char *)data, sizeof(u64) * n);
+}
+
+void dt_prop_empty(struct iseries_flat_dt *dt, char *name)
+{
+	dt_prop(dt, name, NULL, 0);
+}
+
+void dt_cpus(struct iseries_flat_dt *dt)
+{
+	unsigned char buf[32];
+	unsigned char *p;
+	unsigned int i, index;
+	struct IoHriProcessorVpd *d;
+
+	/* yuck */
+	snprintf(buf, 32, "PowerPC,%s", cur_cpu_spec->cpu_name);
+	p = strchr(buf, ' ');
+	if (!p) p = buf + strlen(buf);
+
+	dt_start_node(dt, "cpus");
+	dt_prop_u32(dt, "#address-cells", 1);
+	dt_prop_u32(dt, "#size-cells", 0);
+
+	for (i = 0; i < NR_CPUS; i++) {
+		if (paca[i].lppaca.dyn_proc_status >= 2)
+			continue;
+
+		snprintf(p, 32 - (p - buf), "@%d", i);
+		dt_start_node(dt, buf);
+
+		dt_prop_str(dt, "device_type", "cpu");
+
+		index = paca[i].lppaca.dyn_hv_phys_proc_index;
+		d = &xIoHriProcessorVpd[index];
+
+		dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
+		dt_prop_u32(dt, "i-cache-line-size", d->xInstCacheOperandSize);
+
+		dt_prop_u32(dt, "d-cache-size", d->xDataL1CacheSizeKB * 1024);
+		dt_prop_u32(dt, "d-cache-line-size", d->xDataCacheOperandSize);
+
+		/* magic conversions to Hz copied from old code */
+		dt_prop_u32(dt, "clock-frequency",
+			((1UL << 34) * 1000000) / d->xProcFreq);
+		dt_prop_u32(dt, "timebase-frequency",
+			((1UL << 32) * 1000000) / d->xTimeBaseFreq);
+
+		dt_prop_u32(dt, "reg", i);
+
+		dt_end_node(dt);
+	}
+
+	dt_end_node(dt);
+}
+
+void build_flat_dt(struct iseries_flat_dt *dt)
+{
+	u64 tmp[2];
+
+	dt_init(dt);
+
+	dt_start_node(dt, "");
+
+	dt_prop_u32(dt, "#address-cells", 2);
+	dt_prop_u32(dt, "#size-cells", 2);
+
+	/* /memory */
+	dt_start_node(dt, "memory@0");
+	dt_prop_str(dt, "name", "memory");
+	dt_prop_str(dt, "device_type", "memory");
+	tmp[0] = 0;
+	tmp[1] = systemcfg->physicalMemorySize;
+	dt_prop_u64_list(dt, "reg", tmp, 2);
+	dt_end_node(dt);
+
+	/* /chosen */
+	dt_start_node(dt, "chosen");
+	dt_prop_u32(dt, "linux,platform", PLATFORM_ISERIES_LPAR);
+	dt_end_node(dt);
+
+	dt_cpus(dt);
+
+	dt_end_node(dt);
+
+	dt_push_u32(dt, OF_DT_END);
+}
+
+void * __init iSeries_early_setup(void)
+{
+	iSeries_fixup_klimit();
+
+	/*
+	 * Initialize the table which translate Linux physical addresses to
+	 * AS/400 absolute addresses
+	 */
+	build_iSeries_Memory_Map();
+
+	build_flat_dt(&iseries_dt);
+
+	return (void *) __pa(&iseries_dt);
+}
diff --git a/arch/ppc64/kernel/iSeries_setup.h b/arch/powerpc/platforms/iseries/setup.h
similarity index 95%
rename from arch/ppc64/kernel/iSeries_setup.h
rename to arch/powerpc/platforms/iseries/setup.h
index c6eb29a..6da89ae 100644
--- a/arch/ppc64/kernel/iSeries_setup.h
+++ b/arch/powerpc/platforms/iseries/setup.h
@@ -2,8 +2,6 @@
  *    Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
  *    Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
  *
- *    Module name: as400_setup.h
- *
  *    Description:
  *      Architecture- / platform-specific boot-time initialization code for
  *      the IBM AS/400 LPAR. Adapted from original code by Grant Erickson and
diff --git a/arch/ppc64/kernel/iSeries_smp.c b/arch/powerpc/platforms/iseries/smp.c
similarity index 73%
rename from arch/ppc64/kernel/iSeries_smp.c
rename to arch/powerpc/platforms/iseries/smp.c
index f74386e..f720916 100644
--- a/arch/ppc64/kernel/iSeries_smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -47,17 +47,17 @@
 
 static unsigned long iSeries_smp_message[NR_CPUS];
 
-void iSeries_smp_message_recv( struct pt_regs * regs )
+void iSeries_smp_message_recv(struct pt_regs *regs)
 {
 	int cpu = smp_processor_id();
 	int msg;
 
-	if ( num_online_cpus() < 2 )
+	if (num_online_cpus() < 2)
 		return;
 
-	for ( msg = 0; msg < 4; ++msg )
-		if ( test_and_clear_bit( msg, &iSeries_smp_message[cpu] ) )
-			smp_message_recv( msg, regs );
+	for (msg = 0; msg < 4; msg++)
+		if (test_and_clear_bit(msg, &iSeries_smp_message[cpu]))
+			smp_message_recv(msg, regs);
 }
 
 static inline void smp_iSeries_do_message(int cpu, int msg)
@@ -74,48 +74,22 @@
 		smp_iSeries_do_message(target, msg);
 	else {
 		for_each_online_cpu(i) {
-			if (target == MSG_ALL_BUT_SELF
-			    && i == smp_processor_id())
+			if ((target == MSG_ALL_BUT_SELF) &&
+					(i == smp_processor_id()))
 				continue;
 			smp_iSeries_do_message(i, msg);
 		}
 	}
 }
 
-static int smp_iSeries_numProcs(void)
-{
-	unsigned np, i;
-
-	np = 0;
-        for (i=0; i < NR_CPUS; ++i) {
-                if (paca[i].lppaca.dyn_proc_status < 2) {
-			cpu_set(i, cpu_possible_map);
-			cpu_set(i, cpu_present_map);
-			cpu_set(i, cpu_sibling_map[i]);
-                        ++np;
-                }
-        }
-	return np;
-}
-
 static int smp_iSeries_probe(void)
 {
-	unsigned i;
-	unsigned np = 0;
-
-	for (i=0; i < NR_CPUS; ++i) {
-		if (paca[i].lppaca.dyn_proc_status < 2) {
-			/*paca[i].active = 1;*/
-			++np;
-		}
-	}
-
-	return np;
+	return cpus_weight(cpu_possible_map);
 }
 
 static void smp_iSeries_kick_cpu(int nr)
 {
-	BUG_ON(nr < 0 || nr >= NR_CPUS);
+	BUG_ON((nr < 0) || (nr >= NR_CPUS));
 
 	/* Verify that our partition has a processor nr */
 	if (paca[nr].lppaca.dyn_proc_status >= 2)
@@ -144,6 +118,4 @@
 void __init smp_init_iSeries(void)
 {
 	smp_ops = &iSeries_smp_ops;
-	systemcfg->processorCount	= smp_iSeries_numProcs();
 }
-
diff --git a/arch/ppc64/kernel/iSeries_vio.c b/arch/powerpc/platforms/iseries/vio.c
similarity index 98%
rename from arch/ppc64/kernel/iSeries_vio.c
rename to arch/powerpc/platforms/iseries/vio.c
index 6b754b0..c0f7d2e 100644
--- a/arch/ppc64/kernel/iSeries_vio.c
+++ b/arch/powerpc/platforms/iseries/vio.c
@@ -14,6 +14,7 @@
 
 #include <asm/vio.h>
 #include <asm/iommu.h>
+#include <asm/tce.h>
 #include <asm/abs_addr.h>
 #include <asm/page.h>
 #include <asm/iSeries/vio.h>
diff --git a/arch/ppc64/kernel/viopath.c b/arch/powerpc/platforms/iseries/viopath.c
similarity index 99%
rename from arch/ppc64/kernel/viopath.c
rename to arch/powerpc/platforms/iseries/viopath.c
index 2a6c4f0..c0c767b 100644
--- a/arch/ppc64/kernel/viopath.c
+++ b/arch/powerpc/platforms/iseries/viopath.c
@@ -1,5 +1,4 @@
 /* -*- linux-c -*-
- *  arch/ppc64/kernel/viopath.c
  *
  *  iSeries Virtual I/O Message Path code
  *
@@ -7,7 +6,7 @@
  *           Ryan Arnold <ryanarn@us.ibm.com>
  *           Colin Devilbiss <devilbis@us.ibm.com>
  *
- * (C) Copyright 2000-2003 IBM Corporation
+ * (C) Copyright 2000-2005 IBM Corporation
  *
  * This code is used by the iSeries virtual disk, cd,
  * tape, and console to communicate with OS/400 in another
diff --git a/arch/ppc64/kernel/iSeries_VpdInfo.c b/arch/powerpc/platforms/iseries/vpdinfo.c
similarity index 97%
rename from arch/ppc64/kernel/iSeries_VpdInfo.c
rename to arch/powerpc/platforms/iseries/vpdinfo.c
index 5d92179..d8a6796 100644
--- a/arch/ppc64/kernel/iSeries_VpdInfo.c
+++ b/arch/powerpc/platforms/iseries/vpdinfo.c
@@ -1,6 +1,4 @@
 /*
- * File iSeries_vpdInfo.c created by Allan Trautman on Fri Feb  2 2001.
- *
  * This code gets the card location of the hardware
  * Copyright (C) 2001  <Allan H Trautman> <IBM Corp>
  * Copyright (C) 2005  Stephen Rothwel, IBM Corp
@@ -242,7 +240,7 @@
  */
 void __init iSeries_Device_Information(struct pci_dev *PciDev, int count)
 {
-	struct iSeries_Device_Node *DevNode = PciDev->sysdata;
+	struct device_node *DevNode = PciDev->sysdata;
 	u16 bus;
 	u8 frame;
 	char card[4];
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
new file mode 100644
index 0000000..37b7341
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_PPC_PMAC)		+= pmac_pic.o pmac_setup.o pmac_time.o \
+				   pmac_feature.o pmac_pci.o pmac_sleep.o \
+				   pmac_low_i2c.o pmac_cache.o
+obj-$(CONFIG_PMAC_BACKLIGHT)	+= pmac_backlight.o
+obj-$(CONFIG_CPU_FREQ_PMAC)	+= pmac_cpufreq.o
+ifeq ($(CONFIG_PPC_PMAC),y)
+obj-$(CONFIG_NVRAM)		+= pmac_nvram.o
+obj-$(CONFIG_SMP)		+= pmac_smp.o
+endif
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
new file mode 100644
index 0000000..40e1c50
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -0,0 +1,31 @@
+#ifndef __PMAC_H__
+#define __PMAC_H__
+
+#include <linux/pci.h>
+#include <linux/ide.h>
+
+/*
+ * Declaration for the various functions exported by the
+ * pmac_* files. Mostly for use by pmac_setup
+ */
+
+extern void pmac_get_boot_time(struct rtc_time *tm);
+extern void pmac_get_rtc_time(struct rtc_time *tm);
+extern int  pmac_set_rtc_time(struct rtc_time *tm);
+extern void pmac_read_rtc_time(void);
+extern void pmac_calibrate_decr(void);
+
+extern void pmac_pcibios_fixup(void);
+extern void pmac_pci_init(void);
+extern void pmac_setup_pci_dma(void);
+extern void pmac_check_ht_link(void);
+
+extern void pmac_setup_smp(void);
+
+extern unsigned long pmac_ide_get_base(int index);
+extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
+	unsigned long data_port, unsigned long ctrl_port, int *irq);
+
+extern void pmac_nvram_init(void);
+
+#endif /* __PMAC_H__ */
diff --git a/arch/powerpc/platforms/powermac/pmac_backlight.c b/arch/powerpc/platforms/powermac/pmac_backlight.c
new file mode 100644
index 0000000..8be2f7d
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pmac_backlight.c
@@ -0,0 +1,202 @@
+/*
+ * Miscellaneous procedures for dealing with the PowerMac hardware.
+ * Contains support for the backlight.
+ *
+ *   Copyright (C) 2000 Benjamin Herrenschmidt
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/reboot.h>
+#include <linux/nvram.h>
+#include <linux/console.h>
+#include <asm/sections.h>
+#include <asm/ptrace.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/nvram.h>
+#include <asm/backlight.h>
+
+#include <linux/adb.h>
+#include <linux/pmu.h>
+
+static struct backlight_controller *backlighter;
+static void* backlighter_data;
+static int backlight_autosave;
+static int backlight_level = BACKLIGHT_MAX;
+static int backlight_enabled = 1;
+static int backlight_req_level = -1;
+static int backlight_req_enable = -1;
+
+static void backlight_callback(void *);
+static DECLARE_WORK(backlight_work, backlight_callback, NULL);
+
+void register_backlight_controller(struct backlight_controller *ctrler,
+					  void *data, char *type)
+{
+	struct device_node* bk_node;
+	char *prop;
+	int valid = 0;
+
+	/* There's already a matching controller, bail out */
+	if (backlighter != NULL)
+		return;
+
+	bk_node = find_devices("backlight");
+
+#ifdef CONFIG_ADB_PMU
+	/* Special case for the old PowerBook since I can't test on it */
+	backlight_autosave = machine_is_compatible("AAPL,3400/2400")
+		|| machine_is_compatible("AAPL,3500");
+	if ((backlight_autosave
+	     || machine_is_compatible("AAPL,PowerBook1998")
+	     || machine_is_compatible("PowerBook1,1"))
+	    && !strcmp(type, "pmu"))
+		valid = 1;
+#endif
+	if (bk_node) {
+		prop = get_property(bk_node, "backlight-control", NULL);
+		if (prop && !strncmp(prop, type, strlen(type)))
+			valid = 1;
+	}
+	if (!valid)
+		return;
+	backlighter = ctrler;
+	backlighter_data = data;
+
+	if (bk_node && !backlight_autosave)
+		prop = get_property(bk_node, "bklt", NULL);
+	else
+		prop = NULL;
+	if (prop) {
+		backlight_level = ((*prop)+1) >> 1;
+		if (backlight_level > BACKLIGHT_MAX)
+			backlight_level = BACKLIGHT_MAX;
+	}
+
+#ifdef CONFIG_ADB_PMU
+	if (backlight_autosave) {
+		struct adb_request req;
+		pmu_request(&req, NULL, 2, 0xd9, 0);
+		while (!req.complete)
+			pmu_poll();
+		backlight_level = req.reply[0] >> 4;
+	}
+#endif
+	acquire_console_sem();
+	if (!backlighter->set_enable(1, backlight_level, data))
+		backlight_enabled = 1;
+	release_console_sem();
+
+	printk(KERN_INFO "Registered \"%s\" backlight controller,"
+	       "level: %d/15\n", type, backlight_level);
+}
+EXPORT_SYMBOL(register_backlight_controller);
+
+void unregister_backlight_controller(struct backlight_controller
+					    *ctrler, void *data)
+{
+	/* We keep the current backlight level (for now) */
+	if (ctrler == backlighter && data == backlighter_data)
+		backlighter = NULL;
+}
+EXPORT_SYMBOL(unregister_backlight_controller);
+
+static int __set_backlight_enable(int enable)
+{
+	int rc;
+
+	if (!backlighter)
+		return -ENODEV;
+	acquire_console_sem();
+	rc = backlighter->set_enable(enable, backlight_level,
+				     backlighter_data);
+	if (!rc)
+		backlight_enabled = enable;
+	release_console_sem();
+	return rc;
+}
+int set_backlight_enable(int enable)
+{
+	if (!backlighter)
+		return -ENODEV;
+	backlight_req_enable = enable;
+	schedule_work(&backlight_work);
+	return 0;
+}
+
+EXPORT_SYMBOL(set_backlight_enable);
+
+int get_backlight_enable(void)
+{
+	if (!backlighter)
+		return -ENODEV;
+	return backlight_enabled;
+}
+EXPORT_SYMBOL(get_backlight_enable);
+
+static int __set_backlight_level(int level)
+{
+	int rc = 0;
+
+	if (!backlighter)
+		return -ENODEV;
+	if (level < BACKLIGHT_MIN)
+		level = BACKLIGHT_OFF;
+	if (level > BACKLIGHT_MAX)
+		level = BACKLIGHT_MAX;
+	acquire_console_sem();
+	if (backlight_enabled)
+		rc = backlighter->set_level(level, backlighter_data);
+	if (!rc)
+		backlight_level = level;
+	release_console_sem();
+	if (!rc && !backlight_autosave) {
+		level <<=1;
+		if (level & 0x10)
+			level |= 0x01;
+		// -- todo: save to property "bklt"
+	}
+	return rc;
+}
+int set_backlight_level(int level)
+{
+	if (!backlighter)
+		return -ENODEV;
+	backlight_req_level = level;
+	schedule_work(&backlight_work);
+	return 0;
+}
+
+EXPORT_SYMBOL(set_backlight_level);
+
+int get_backlight_level(void)
+{
+	if (!backlighter)
+		return -ENODEV;
+	return backlight_level;
+}
+EXPORT_SYMBOL(get_backlight_level);
+
+static void backlight_callback(void *dummy)
+{
+	int level, enable;
+
+	do {
+		level = backlight_req_level;
+		enable = backlight_req_enable;
+		mb();
+
+		if (level >= 0)
+			__set_backlight_level(level);
+		if (enable >= 0)
+			__set_backlight_enable(enable);
+	} while(cmpxchg(&backlight_req_level, level, -1) != level ||
+		cmpxchg(&backlight_req_enable, enable, -1) != enable);
+}
diff --git a/arch/powerpc/platforms/powermac/pmac_cache.S b/arch/powerpc/platforms/powermac/pmac_cache.S
new file mode 100644
index 0000000..fb977de
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pmac_cache.S
@@ -0,0 +1,359 @@
+/*
+ * This file contains low-level cache management functions
+ * used for sleep and CPU speed changes on Apple machines.
+ * (In fact the only thing that is Apple-specific is that we assume
+ * that we can read from ROM at physical address 0xfff00000.)
+ *
+ *    Copyright (C) 2004 Paul Mackerras (paulus@samba.org) and
+ *                       Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+#include <asm/cputable.h>
+
+/*
+ * Flush and disable all data caches (dL1, L2, L3). This is used
+ * when going to sleep, when doing a PMU based cpufreq transition,
+ * or when "offlining" a CPU on SMP machines. This code is over
+ * paranoid, but I've had enough issues with various CPU revs and
+ * bugs that I decided it was worth beeing over cautious
+ */
+
+_GLOBAL(flush_disable_caches)
+#ifndef CONFIG_6xx
+	blr
+#else
+BEGIN_FTR_SECTION
+	b	flush_disable_745x
+END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
+BEGIN_FTR_SECTION
+	b	flush_disable_75x
+END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
+	b	__flush_disable_L1
+
+/* This is the code for G3 and 74[01]0 */
+flush_disable_75x:
+	mflr	r10
+
+	/* Turn off EE and DR in MSR */
+	mfmsr	r11
+	rlwinm	r0,r11,0,~MSR_EE
+	rlwinm	r0,r0,0,~MSR_DR
+	sync
+	mtmsr	r0
+	isync
+
+	/* Stop DST streams */
+BEGIN_FTR_SECTION
+	DSSALL
+	sync
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+
+	/* Stop DPM */
+	mfspr	r8,SPRN_HID0		/* Save SPRN_HID0 in r8 */
+	rlwinm	r4,r8,0,12,10		/* Turn off HID0[DPM] */
+	sync
+	mtspr	SPRN_HID0,r4		/* Disable DPM */
+	sync
+
+	/* Disp-flush L1. We have a weird problem here that I never
+	 * totally figured out. On 750FX, using the ROM for the flush
+	 * results in a non-working flush. We use that workaround for
+	 * now until I finally understand what's going on. --BenH
+	 */
+
+	/* ROM base by default */
+	lis	r4,0xfff0
+	mfpvr	r3
+	srwi	r3,r3,16
+	cmplwi	cr0,r3,0x7000
+	bne+	1f
+	/* RAM base on 750FX */
+	li	r4,0
+1:	li	r4,0x4000
+	mtctr	r4
+1:	lwz	r0,0(r4)
+	addi	r4,r4,32
+	bdnz	1b
+	sync
+	isync
+
+	/* Disable / invalidate / enable L1 data */
+	mfspr	r3,SPRN_HID0
+	rlwinm	r3,r3,0,~(HID0_DCE | HID0_ICE)
+	mtspr	SPRN_HID0,r3
+	sync
+	isync
+	ori	r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI)
+	sync
+	isync
+	mtspr	SPRN_HID0,r3
+	xori	r3,r3,(HID0_DCI|HID0_ICFI)
+	mtspr	SPRN_HID0,r3
+	sync
+
+	/* Get the current enable bit of the L2CR into r4 */
+	mfspr	r5,SPRN_L2CR
+	/* Set to data-only (pre-745x bit) */
+	oris	r3,r5,L2CR_L2DO@h
+	b	2f
+	/* When disabling L2, code must be in L1 */
+	.balign 32
+1:	mtspr	SPRN_L2CR,r3
+3:	sync
+	isync
+	b	1f
+2:	b	3f
+3:	sync
+	isync
+	b	1b
+1:	/* disp-flush L2. The interesting thing here is that the L2 can be
+	 * up to 2Mb ... so using the ROM, we'll end up wrapping back to memory
+	 * but that is probbaly fine. We disp-flush over 4Mb to be safe
+	 */
+	lis	r4,2
+	mtctr	r4
+	lis	r4,0xfff0
+1:	lwz	r0,0(r4)
+	addi	r4,r4,32
+	bdnz	1b
+	sync
+	isync
+	lis	r4,2
+	mtctr	r4
+	lis	r4,0xfff0
+1:	dcbf	0,r4
+	addi	r4,r4,32
+	bdnz	1b
+	sync
+	isync
+
+	/* now disable L2 */
+	rlwinm	r5,r5,0,~L2CR_L2E
+	b	2f
+	/* When disabling L2, code must be in L1 */
+	.balign 32
+1:	mtspr	SPRN_L2CR,r5
+3:	sync
+	isync
+	b	1f
+2:	b	3f
+3:	sync
+	isync
+	b	1b
+1:	sync
+	isync
+	/* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */
+	oris	r4,r5,L2CR_L2I@h
+	mtspr	SPRN_L2CR,r4
+	sync
+	isync
+
+	/* Wait for the invalidation to complete */
+1:	mfspr	r3,SPRN_L2CR
+	rlwinm.	r0,r3,0,31,31
+	bne	1b
+
+	/* Clear L2I */
+	xoris	r4,r4,L2CR_L2I@h
+	sync
+	mtspr	SPRN_L2CR,r4
+	sync
+
+	/* now disable the L1 data cache */
+	mfspr	r0,SPRN_HID0
+	rlwinm	r0,r0,0,~(HID0_DCE|HID0_ICE)
+	mtspr	SPRN_HID0,r0
+	sync
+	isync
+
+	/* Restore HID0[DPM] to whatever it was before */
+	sync
+	mfspr	r0,SPRN_HID0
+	rlwimi	r0,r8,0,11,11		/* Turn back HID0[DPM] */
+	mtspr	SPRN_HID0,r0
+	sync
+
+	/* restore DR and EE */
+	sync
+	mtmsr	r11
+	isync
+
+	mtlr	r10
+	blr
+
+/* This code is for 745x processors */
+flush_disable_745x:
+	/* Turn off EE and DR in MSR */
+	mfmsr	r11
+	rlwinm	r0,r11,0,~MSR_EE
+	rlwinm	r0,r0,0,~MSR_DR
+	sync
+	mtmsr	r0
+	isync
+
+	/* Stop prefetch streams */
+	DSSALL
+	sync
+
+	/* Disable L2 prefetching */
+	mfspr	r0,SPRN_MSSCR0
+	rlwinm	r0,r0,0,0,29
+	mtspr	SPRN_MSSCR0,r0
+	sync
+	isync
+	lis	r4,0
+	dcbf	0,r4
+	dcbf	0,r4
+	dcbf	0,r4
+	dcbf	0,r4
+	dcbf	0,r4
+	dcbf	0,r4
+	dcbf	0,r4
+	dcbf	0,r4
+
+	/* Due to a bug with the HW flush on some CPU revs, we occasionally
+	 * experience data corruption. I'm adding a displacement flush along
+	 * with a dcbf loop over a few Mb to "help". The problem isn't totally
+	 * fixed by this in theory, but at least, in practice, I couldn't reproduce
+	 * it even with a big hammer...
+	 */
+
+        lis     r4,0x0002
+        mtctr   r4
+ 	li      r4,0
+1:
+        lwz     r0,0(r4)
+        addi    r4,r4,32                /* Go to start of next cache line */
+        bdnz    1b
+        isync
+
+        /* Now, flush the first 4MB of memory */
+        lis     r4,0x0002
+        mtctr   r4
+	li      r4,0
+        sync
+1:
+        dcbf    0,r4
+        addi    r4,r4,32                /* Go to start of next cache line */
+        bdnz    1b
+
+	/* Flush and disable the L1 data cache */
+	mfspr	r6,SPRN_LDSTCR
+	lis	r3,0xfff0	/* read from ROM for displacement flush */
+	li	r4,0xfe		/* start with only way 0 unlocked */
+	li	r5,128		/* 128 lines in each way */
+1:	mtctr	r5
+	rlwimi	r6,r4,0,24,31
+	mtspr	SPRN_LDSTCR,r6
+	sync
+	isync
+2:	lwz	r0,0(r3)	/* touch each cache line */
+	addi	r3,r3,32
+	bdnz	2b
+	rlwinm	r4,r4,1,24,30	/* move on to the next way */
+	ori	r4,r4,1
+	cmpwi	r4,0xff		/* all done? */
+	bne	1b
+	/* now unlock the L1 data cache */
+	li	r4,0
+	rlwimi	r6,r4,0,24,31
+	sync
+	mtspr	SPRN_LDSTCR,r6
+	sync
+	isync
+
+	/* Flush the L2 cache using the hardware assist */
+	mfspr	r3,SPRN_L2CR
+	cmpwi	r3,0		/* check if it is enabled first */
+	bge	4f
+	oris	r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h
+	b	2f
+	/* When disabling/locking L2, code must be in L1 */
+	.balign 32
+1:	mtspr	SPRN_L2CR,r0	/* lock the L2 cache */
+3:	sync
+	isync
+	b	1f
+2:	b	3f
+3:	sync
+	isync
+	b	1b
+1:	sync
+	isync
+	ori	r0,r3,L2CR_L2HWF_745x
+	sync
+	mtspr	SPRN_L2CR,r0	/* set the hardware flush bit */
+3:	mfspr	r0,SPRN_L2CR	/* wait for it to go to 0 */
+	andi.	r0,r0,L2CR_L2HWF_745x
+	bne	3b
+	sync
+	rlwinm	r3,r3,0,~L2CR_L2E
+	b	2f
+	/* When disabling L2, code must be in L1 */
+	.balign 32
+1:	mtspr	SPRN_L2CR,r3	/* disable the L2 cache */
+3:	sync
+	isync
+	b	1f
+2:	b	3f
+3:	sync
+	isync
+	b	1b
+1:	sync
+	isync
+	oris	r4,r3,L2CR_L2I@h
+	mtspr	SPRN_L2CR,r4
+	sync
+	isync
+1:	mfspr	r4,SPRN_L2CR
+	andis.	r0,r4,L2CR_L2I@h
+	bne	1b
+	sync
+
+BEGIN_FTR_SECTION
+	/* Flush the L3 cache using the hardware assist */
+4:	mfspr	r3,SPRN_L3CR
+	cmpwi	r3,0		/* check if it is enabled */
+	bge	6f
+	oris	r0,r3,L3CR_L3IO@h
+	ori	r0,r0,L3CR_L3DO
+	sync
+	mtspr	SPRN_L3CR,r0	/* lock the L3 cache */
+	sync
+	isync
+	ori	r0,r0,L3CR_L3HWF
+	sync
+	mtspr	SPRN_L3CR,r0	/* set the hardware flush bit */
+5:	mfspr	r0,SPRN_L3CR	/* wait for it to go to zero */
+	andi.	r0,r0,L3CR_L3HWF
+	bne	5b
+	rlwinm	r3,r3,0,~L3CR_L3E
+	sync
+	mtspr	SPRN_L3CR,r3	/* disable the L3 cache */
+	sync
+	ori	r4,r3,L3CR_L3I
+	mtspr	SPRN_L3CR,r4
+1:	mfspr	r4,SPRN_L3CR
+	andi.	r0,r4,L3CR_L3I
+	bne	1b
+	sync
+END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
+
+6:	mfspr	r0,SPRN_HID0	/* now disable the L1 data cache */
+	rlwinm	r0,r0,0,~HID0_DCE
+	mtspr	SPRN_HID0,r0
+	sync
+	isync
+	mtmsr	r11		/* restore DR and EE */
+	isync
+	blr
+#endif	/* CONFIG_6xx */
diff --git a/arch/powerpc/platforms/powermac/pmac_cpufreq.c b/arch/powerpc/platforms/powermac/pmac_cpufreq.c
new file mode 100644
index 0000000..6d32d99
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pmac_cpufreq.c
@@ -0,0 +1,728 @@
+/*
+ *  arch/ppc/platforms/pmac_cpufreq.c
+ *
+ *  Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ *  Copyright (C) 2004        John Steele Scott <toojays@toojays.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * TODO: Need a big cleanup here. Basically, we need to have different
+ * cpufreq_driver structures for the different type of HW instead of the
+ * current mess. We also need to better deal with the detection of the
+ * type of machine.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include <linux/slab.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/sysdev.h>
+#include <linux/i2c.h>
+#include <linux/hardirq.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/pmac_feature.h>
+#include <asm/mmu_context.h>
+#include <asm/sections.h>
+#include <asm/cputable.h>
+#include <asm/time.h>
+#include <asm/system.h>
+#include <asm/mpic.h>
+#include <asm/keylargo.h>
+
+/* WARNING !!! This will cause calibrate_delay() to be called,
+ * but this is an __init function ! So you MUST go edit
+ * init/main.c to make it non-init before enabling DEBUG_FREQ
+ */
+#undef DEBUG_FREQ
+
+/*
+ * There is a problem with the core cpufreq code on SMP kernels,
+ * it won't recalculate the Bogomips properly
+ */
+#ifdef CONFIG_SMP
+#warning "WARNING, CPUFREQ not recommended on SMP kernels"
+#endif
+
+extern void low_choose_7447a_dfs(int dfs);
+extern void low_choose_750fx_pll(int pll);
+extern void low_sleep_handler(void);
+
+/*
+ * Currently, PowerMac cpufreq supports only high & low frequencies
+ * that are set by the firmware
+ */
+static unsigned int low_freq;
+static unsigned int hi_freq;
+static unsigned int cur_freq;
+static unsigned int sleep_freq;
+
+/*
+ * Different models uses different mecanisms to switch the frequency
+ */
+static int (*set_speed_proc)(int low_speed);
+static unsigned int (*get_speed_proc)(void);
+
+/*
+ * Some definitions used by the various speedprocs
+ */
+static u32 voltage_gpio;
+static u32 frequency_gpio;
+static u32 slew_done_gpio;
+static int no_schedule;
+static int has_cpu_l2lve;
+static int is_pmu_based;
+
+/* There are only two frequency states for each processor. Values
+ * are in kHz for the time being.
+ */
+#define CPUFREQ_HIGH                  0
+#define CPUFREQ_LOW                   1
+
+static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
+	{CPUFREQ_HIGH, 		0},
+	{CPUFREQ_LOW,		0},
+	{0,			CPUFREQ_TABLE_END},
+};
+
+static struct freq_attr* pmac_cpu_freqs_attr[] = {
+	&cpufreq_freq_attr_scaling_available_freqs,
+	NULL,
+};
+
+static inline void local_delay(unsigned long ms)
+{
+	if (no_schedule)
+		mdelay(ms);
+	else
+		msleep(ms);
+}
+
+static inline void wakeup_decrementer(void)
+{
+	set_dec(tb_ticks_per_jiffy);
+	/* No currently-supported powerbook has a 601,
+	 * so use get_tbl, not native
+	 */
+	last_jiffy_stamp(0) = tb_last_stamp = get_tbl();
+}
+
+#ifdef DEBUG_FREQ
+static inline void debug_calc_bogomips(void)
+{
+	/* This will cause a recalc of bogomips and display the
+	 * result. We backup/restore the value to avoid affecting the
+	 * core cpufreq framework's own calculation.
+	 */
+	extern void calibrate_delay(void);
+
+	unsigned long save_lpj = loops_per_jiffy;
+	calibrate_delay();
+	loops_per_jiffy = save_lpj;
+}
+#endif /* DEBUG_FREQ */
+
+/* Switch CPU speed under 750FX CPU control
+ */
+static int cpu_750fx_cpu_speed(int low_speed)
+{
+	u32 hid2;
+
+	if (low_speed == 0) {
+		/* ramping up, set voltage first */
+		pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
+		/* Make sure we sleep for at least 1ms */
+		local_delay(10);
+
+		/* tweak L2 for high voltage */
+		if (has_cpu_l2lve) {
+			hid2 = mfspr(SPRN_HID2);
+			hid2 &= ~0x2000;
+			mtspr(SPRN_HID2, hid2);
+		}
+	}
+#ifdef CONFIG_6xx
+	low_choose_750fx_pll(low_speed);
+#endif
+	if (low_speed == 1) {
+		/* tweak L2 for low voltage */
+		if (has_cpu_l2lve) {
+			hid2 = mfspr(SPRN_HID2);
+			hid2 |= 0x2000;
+			mtspr(SPRN_HID2, hid2);
+		}
+
+		/* ramping down, set voltage last */
+		pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
+		local_delay(10);
+	}
+
+	return 0;
+}
+
+static unsigned int cpu_750fx_get_cpu_speed(void)
+{
+	if (mfspr(SPRN_HID1) & HID1_PS)
+		return low_freq;
+	else
+		return hi_freq;
+}
+
+/* Switch CPU speed using DFS */
+static int dfs_set_cpu_speed(int low_speed)
+{
+	if (low_speed == 0) {
+		/* ramping up, set voltage first */
+		pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
+		/* Make sure we sleep for at least 1ms */
+		local_delay(1);
+	}
+
+	/* set frequency */
+#ifdef CONFIG_6xx
+	low_choose_7447a_dfs(low_speed);
+#endif
+	udelay(100);
+
+	if (low_speed == 1) {
+		/* ramping down, set voltage last */
+		pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
+		local_delay(1);
+	}
+
+	return 0;
+}
+
+static unsigned int dfs_get_cpu_speed(void)
+{
+	if (mfspr(SPRN_HID1) & HID1_DFS)
+		return low_freq;
+	else
+		return hi_freq;
+}
+
+
+/* Switch CPU speed using slewing GPIOs
+ */
+static int gpios_set_cpu_speed(int low_speed)
+{
+	int gpio, timeout = 0;
+
+	/* If ramping up, set voltage first */
+	if (low_speed == 0) {
+		pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
+		/* Delay is way too big but it's ok, we schedule */
+		local_delay(10);
+	}
+
+	/* Set frequency */
+	gpio = 	pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
+	if (low_speed == ((gpio & 0x01) == 0))
+		goto skip;
+
+	pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, frequency_gpio,
+			  low_speed ? 0x04 : 0x05);
+	udelay(200);
+	do {
+		if (++timeout > 100)
+			break;
+		local_delay(1);
+		gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, slew_done_gpio, 0);
+	} while((gpio & 0x02) == 0);
+ skip:
+	/* If ramping down, set voltage last */
+	if (low_speed == 1) {
+		pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
+		/* Delay is way too big but it's ok, we schedule */
+		local_delay(10);
+	}
+
+#ifdef DEBUG_FREQ
+	debug_calc_bogomips();
+#endif
+
+	return 0;
+}
+
+/* Switch CPU speed under PMU control
+ */
+static int pmu_set_cpu_speed(int low_speed)
+{
+	struct adb_request req;
+	unsigned long save_l2cr;
+	unsigned long save_l3cr;
+	unsigned int pic_prio;
+	unsigned long flags;
+
+	preempt_disable();
+
+#ifdef DEBUG_FREQ
+	printk(KERN_DEBUG "HID1, before: %x\n", mfspr(SPRN_HID1));
+#endif
+	pmu_suspend();
+
+	/* Disable all interrupt sources on openpic */
+ 	pic_prio = mpic_cpu_get_priority();
+	mpic_cpu_set_priority(0xf);
+
+	/* Make sure the decrementer won't interrupt us */
+	asm volatile("mtdec %0" : : "r" (0x7fffffff));
+	/* Make sure any pending DEC interrupt occuring while we did
+	 * the above didn't re-enable the DEC */
+	mb();
+	asm volatile("mtdec %0" : : "r" (0x7fffffff));
+
+	/* We can now disable MSR_EE */
+	local_irq_save(flags);
+
+	/* Giveup the FPU & vec */
+	enable_kernel_fp();
+
+#ifdef CONFIG_ALTIVEC
+	if (cpu_has_feature(CPU_FTR_ALTIVEC))
+		enable_kernel_altivec();
+#endif /* CONFIG_ALTIVEC */
+
+	/* Save & disable L2 and L3 caches */
+	save_l3cr = _get_L3CR();	/* (returns -1 if not available) */
+	save_l2cr = _get_L2CR();	/* (returns -1 if not available) */
+
+	/* Send the new speed command. My assumption is that this command
+	 * will cause PLL_CFG[0..3] to be changed next time CPU goes to sleep
+	 */
+	pmu_request(&req, NULL, 6, PMU_CPU_SPEED, 'W', 'O', 'O', 'F', low_speed);
+	while (!req.complete)
+		pmu_poll();
+
+	/* Prepare the northbridge for the speed transition */
+	pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,1);
+
+	/* Call low level code to backup CPU state and recover from
+	 * hardware reset
+	 */
+	low_sleep_handler();
+
+	/* Restore the northbridge */
+	pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,0);
+
+	/* Restore L2 cache */
+	if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
+ 		_set_L2CR(save_l2cr);
+	/* Restore L3 cache */
+	if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0)
+ 		_set_L3CR(save_l3cr);
+
+	/* Restore userland MMU context */
+	set_context(current->active_mm->context, current->active_mm->pgd);
+
+#ifdef DEBUG_FREQ
+	printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
+#endif
+
+	/* Restore low level PMU operations */
+	pmu_unlock();
+
+	/* Restore decrementer */
+	wakeup_decrementer();
+
+	/* Restore interrupts */
+ 	mpic_cpu_set_priority(pic_prio);
+
+	/* Let interrupts flow again ... */
+	local_irq_restore(flags);
+
+#ifdef DEBUG_FREQ
+	debug_calc_bogomips();
+#endif
+
+	pmu_resume();
+
+	preempt_enable();
+
+	return 0;
+}
+
+static int do_set_cpu_speed(int speed_mode, int notify)
+{
+	struct cpufreq_freqs freqs;
+	unsigned long l3cr;
+	static unsigned long prev_l3cr;
+
+	freqs.old = cur_freq;
+	freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
+	freqs.cpu = smp_processor_id();
+
+	if (freqs.old == freqs.new)
+		return 0;
+
+	if (notify)
+		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	if (speed_mode == CPUFREQ_LOW &&
+	    cpu_has_feature(CPU_FTR_L3CR)) {
+		l3cr = _get_L3CR();
+		if (l3cr & L3CR_L3E) {
+			prev_l3cr = l3cr;
+			_set_L3CR(0);
+		}
+	}
+	set_speed_proc(speed_mode == CPUFREQ_LOW);
+	if (speed_mode == CPUFREQ_HIGH &&
+	    cpu_has_feature(CPU_FTR_L3CR)) {
+		l3cr = _get_L3CR();
+		if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr)
+			_set_L3CR(prev_l3cr);
+	}
+	if (notify)
+		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
+
+	return 0;
+}
+
+static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
+{
+	return cur_freq;
+}
+
+static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
+{
+	return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
+}
+
+static int pmac_cpufreq_target(	struct cpufreq_policy *policy,
+					unsigned int target_freq,
+					unsigned int relation)
+{
+	unsigned int    newstate = 0;
+
+	if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs,
+			target_freq, relation, &newstate))
+		return -EINVAL;
+
+	return do_set_cpu_speed(newstate, 1);
+}
+
+unsigned int pmac_get_one_cpufreq(int i)
+{
+	/* Supports only one CPU for now */
+	return (i == 0) ? cur_freq : 0;
+}
+
+static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+	if (policy->cpu != 0)
+		return -ENODEV;
+
+	policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+	policy->cpuinfo.transition_latency	= CPUFREQ_ETERNAL;
+	policy->cur = cur_freq;
+
+	cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu);
+	return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
+}
+
+static u32 read_gpio(struct device_node *np)
+{
+	u32 *reg = (u32 *)get_property(np, "reg", NULL);
+	u32 offset;
+
+	if (reg == NULL)
+		return 0;
+	/* That works for all keylargos but shall be fixed properly
+	 * some day... The problem is that it seems we can't rely
+	 * on the "reg" property of the GPIO nodes, they are either
+	 * relative to the base of KeyLargo or to the base of the
+	 * GPIO space, and the device-tree doesn't help.
+	 */
+	offset = *reg;
+	if (offset < KEYLARGO_GPIO_LEVELS0)
+		offset += KEYLARGO_GPIO_LEVELS0;
+	return offset;
+}
+
+static int pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg)
+{
+	/* Ok, this could be made a bit smarter, but let's be robust for now. We
+	 * always force a speed change to high speed before sleep, to make sure
+	 * we have appropriate voltage and/or bus speed for the wakeup process,
+	 * and to make sure our loops_per_jiffies are "good enough", that is will
+	 * not cause too short delays if we sleep in low speed and wake in high
+	 * speed..
+	 */
+	no_schedule = 1;
+	sleep_freq = cur_freq;
+	if (cur_freq == low_freq && !is_pmu_based)
+		do_set_cpu_speed(CPUFREQ_HIGH, 0);
+	return 0;
+}
+
+static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
+{
+	/* If we resume, first check if we have a get() function */
+	if (get_speed_proc)
+		cur_freq = get_speed_proc();
+	else
+		cur_freq = 0;
+
+	/* We don't, hrm... we don't really know our speed here, best
+	 * is that we force a switch to whatever it was, which is
+	 * probably high speed due to our suspend() routine
+	 */
+	do_set_cpu_speed(sleep_freq == low_freq ?
+			 CPUFREQ_LOW : CPUFREQ_HIGH, 0);
+
+	no_schedule = 0;
+	return 0;
+}
+
+static struct cpufreq_driver pmac_cpufreq_driver = {
+	.verify 	= pmac_cpufreq_verify,
+	.target 	= pmac_cpufreq_target,
+	.get		= pmac_cpufreq_get_speed,
+	.init		= pmac_cpufreq_cpu_init,
+	.suspend	= pmac_cpufreq_suspend,
+	.resume		= pmac_cpufreq_resume,
+	.flags		= CPUFREQ_PM_NO_WARN,
+	.attr		= pmac_cpu_freqs_attr,
+	.name		= "powermac",
+	.owner		= THIS_MODULE,
+};
+
+
+static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
+{
+	struct device_node *volt_gpio_np = of_find_node_by_name(NULL,
+								"voltage-gpio");
+	struct device_node *freq_gpio_np = of_find_node_by_name(NULL,
+								"frequency-gpio");
+	struct device_node *slew_done_gpio_np = of_find_node_by_name(NULL,
+								     "slewing-done");
+	u32 *value;
+
+	/*
+	 * Check to see if it's GPIO driven or PMU only
+	 *
+	 * The way we extract the GPIO address is slightly hackish, but it
+	 * works well enough for now. We need to abstract the whole GPIO
+	 * stuff sooner or later anyway
+	 */
+
+	if (volt_gpio_np)
+		voltage_gpio = read_gpio(volt_gpio_np);
+	if (freq_gpio_np)
+		frequency_gpio = read_gpio(freq_gpio_np);
+	if (slew_done_gpio_np)
+		slew_done_gpio = read_gpio(slew_done_gpio_np);
+
+	/* If we use the frequency GPIOs, calculate the min/max speeds based
+	 * on the bus frequencies
+	 */
+	if (frequency_gpio && slew_done_gpio) {
+		int lenp, rc;
+		u32 *freqs, *ratio;
+
+		freqs = (u32 *)get_property(cpunode, "bus-frequencies", &lenp);
+		lenp /= sizeof(u32);
+		if (freqs == NULL || lenp != 2) {
+			printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n");
+			return 1;
+		}
+		ratio = (u32 *)get_property(cpunode, "processor-to-bus-ratio*2", NULL);
+		if (ratio == NULL) {
+			printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n");
+			return 1;
+		}
+
+		/* Get the min/max bus frequencies */
+		low_freq = min(freqs[0], freqs[1]);
+		hi_freq = max(freqs[0], freqs[1]);
+
+		/* Grrrr.. It _seems_ that the device-tree is lying on the low bus
+		 * frequency, it claims it to be around 84Mhz on some models while
+		 * it appears to be approx. 101Mhz on all. Let's hack around here...
+		 * fortunately, we don't need to be too precise
+		 */
+		if (low_freq < 98000000)
+			low_freq = 101000000;
+			
+		/* Convert those to CPU core clocks */
+		low_freq = (low_freq * (*ratio)) / 2000;
+		hi_freq = (hi_freq * (*ratio)) / 2000;
+
+		/* Now we get the frequencies, we read the GPIO to see what is out current
+		 * speed
+		 */
+		rc = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
+		cur_freq = (rc & 0x01) ? hi_freq : low_freq;
+
+		set_speed_proc = gpios_set_cpu_speed;
+		return 1;
+	}
+
+	/* If we use the PMU, look for the min & max frequencies in the
+	 * device-tree
+	 */
+	value = (u32 *)get_property(cpunode, "min-clock-frequency", NULL);
+	if (!value)
+		return 1;
+	low_freq = (*value) / 1000;
+	/* The PowerBook G4 12" (PowerBook6,1) has an error in the device-tree
+	 * here */
+	if (low_freq < 100000)
+		low_freq *= 10;
+
+	value = (u32 *)get_property(cpunode, "max-clock-frequency", NULL);
+	if (!value)
+		return 1;
+	hi_freq = (*value) / 1000;
+	set_speed_proc = pmu_set_cpu_speed;
+	is_pmu_based = 1;
+
+	return 0;
+}
+
+static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
+{
+	struct device_node *volt_gpio_np;
+
+	if (get_property(cpunode, "dynamic-power-step", NULL) == NULL)
+		return 1;
+
+	volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
+	if (volt_gpio_np)
+		voltage_gpio = read_gpio(volt_gpio_np);
+	if (!voltage_gpio){
+		printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n");
+		return 1;
+	}
+
+	/* OF only reports the high frequency */
+	hi_freq = cur_freq;
+	low_freq = cur_freq/2;
+
+	/* Read actual frequency from CPU */
+	cur_freq = dfs_get_cpu_speed();
+	set_speed_proc = dfs_set_cpu_speed;
+	get_speed_proc = dfs_get_cpu_speed;
+
+	return 0;
+}
+
+static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
+{
+	struct device_node *volt_gpio_np;
+	u32 pvr, *value;
+
+	if (get_property(cpunode, "dynamic-power-step", NULL) == NULL)
+		return 1;
+
+	hi_freq = cur_freq;
+	value = (u32 *)get_property(cpunode, "reduced-clock-frequency", NULL);
+	if (!value)
+		return 1;
+	low_freq = (*value) / 1000;
+
+	volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
+	if (volt_gpio_np)
+		voltage_gpio = read_gpio(volt_gpio_np);
+
+	pvr = mfspr(SPRN_PVR);
+	has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
+
+	set_speed_proc = cpu_750fx_cpu_speed;
+	get_speed_proc = cpu_750fx_get_cpu_speed;
+	cur_freq = cpu_750fx_get_cpu_speed();
+
+	return 0;
+}
+
+/* Currently, we support the following machines:
+ *
+ *  - Titanium PowerBook 1Ghz (PMU based, 667Mhz & 1Ghz)
+ *  - Titanium PowerBook 800 (PMU based, 667Mhz & 800Mhz)
+ *  - Titanium PowerBook 400 (PMU based, 300Mhz & 400Mhz)
+ *  - Titanium PowerBook 500 (PMU based, 300Mhz & 500Mhz)
+ *  - iBook2 500/600 (PMU based, 400Mhz & 500/600Mhz)
+ *  - iBook2 700 (CPU based, 400Mhz & 700Mhz, support low voltage)
+ *  - Recent MacRISC3 laptops
+ *  - All new machines with 7447A CPUs
+ */
+static int __init pmac_cpufreq_setup(void)
+{
+	struct device_node	*cpunode;
+	u32			*value;
+
+	if (strstr(cmd_line, "nocpufreq"))
+		return 0;
+
+	/* Assume only one CPU */
+	cpunode = find_type_devices("cpu");
+	if (!cpunode)
+		goto out;
+
+	/* Get current cpu clock freq */
+	value = (u32 *)get_property(cpunode, "clock-frequency", NULL);
+	if (!value)
+		goto out;
+	cur_freq = (*value) / 1000;
+
+	/*  Check for 7447A based MacRISC3 */
+	if (machine_is_compatible("MacRISC3") &&
+	    get_property(cpunode, "dynamic-power-step", NULL) &&
+	    PVR_VER(mfspr(SPRN_PVR)) == 0x8003) {
+		pmac_cpufreq_init_7447A(cpunode);
+	/* Check for other MacRISC3 machines */
+	} else if (machine_is_compatible("PowerBook3,4") ||
+		   machine_is_compatible("PowerBook3,5") ||
+		   machine_is_compatible("MacRISC3")) {
+		pmac_cpufreq_init_MacRISC3(cpunode);
+	/* Else check for iBook2 500/600 */
+	} else if (machine_is_compatible("PowerBook4,1")) {
+		hi_freq = cur_freq;
+		low_freq = 400000;
+		set_speed_proc = pmu_set_cpu_speed;
+		is_pmu_based = 1;
+	}
+	/* Else check for TiPb 400 & 500 */
+	else if (machine_is_compatible("PowerBook3,2")) {
+		/* We only know about the 400 MHz and the 500Mhz model
+		 * they both have 300 MHz as low frequency
+		 */
+		if (cur_freq < 350000 || cur_freq > 550000)
+			goto out;
+		hi_freq = cur_freq;
+		low_freq = 300000;
+		set_speed_proc = pmu_set_cpu_speed;
+		is_pmu_based = 1;
+	}
+	/* Else check for 750FX */
+	else if (PVR_VER(mfspr(SPRN_PVR)) == 0x7000)
+		pmac_cpufreq_init_750FX(cpunode);
+out:
+	if (set_speed_proc == NULL)
+		return -ENODEV;
+
+	pmac_cpu_freqs[CPUFREQ_LOW].frequency = low_freq;
+	pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq;
+
+	printk(KERN_INFO "Registering PowerMac CPU frequency driver\n");
+	printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
+	       low_freq/1000, hi_freq/1000, cur_freq/1000);
+
+	return cpufreq_register_driver(&pmac_cpufreq_driver);
+}
+
+module_init(pmac_cpufreq_setup);
+
diff --git a/arch/powerpc/platforms/powermac/pmac_feature.c b/arch/powerpc/platforms/powermac/pmac_feature.c
new file mode 100644
index 0000000..2cba670
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pmac_feature.c
@@ -0,0 +1,3062 @@
+/*
+ *  arch/ppc/platforms/pmac_feature.c
+ *
+ *  Copyright (C) 1996-2001 Paul Mackerras (paulus@cs.anu.edu.au)
+ *                          Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ *  TODO:
+ *
+ *   - Replace mdelay with some schedule loop if possible
+ *   - Shorten some obfuscated delays on some routines (like modem
+ *     power)
+ *   - Refcount some clocks (see darwin)
+ *   - Split split split...
+ *
+ */
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <asm/sections.h>
+#include <asm/errno.h>
+#include <asm/ohare.h>
+#include <asm/heathrow.h>
+#include <asm/keylargo.h>
+#include <asm/uninorth.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/dbdma.h>
+#include <asm/pci-bridge.h>
+#include <asm/pmac_low_i2c.h>
+
+#undef DEBUG_FEATURE
+
+#ifdef DEBUG_FEATURE
+#define DBG(fmt...) printk(KERN_DEBUG fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+#ifdef CONFIG_6xx
+extern int powersave_lowspeed;
+#endif
+
+extern int powersave_nap;
+extern struct device_node *k2_skiplist[2];
+
+
+/*
+ * We use a single global lock to protect accesses. Each driver has
+ * to take care of its own locking
+ */
+static DEFINE_SPINLOCK(feature_lock);
+
+#define LOCK(flags)	spin_lock_irqsave(&feature_lock, flags);
+#define UNLOCK(flags)	spin_unlock_irqrestore(&feature_lock, flags);
+
+
+/*
+ * Instance of some macio stuffs
+ */
+struct macio_chip macio_chips[MAX_MACIO_CHIPS];
+
+struct macio_chip *macio_find(struct device_node *child, int type)
+{
+	while(child) {
+		int	i;
+
+		for (i=0; i < MAX_MACIO_CHIPS && macio_chips[i].of_node; i++)
+			if (child == macio_chips[i].of_node &&
+			    (!type || macio_chips[i].type == type))
+				return &macio_chips[i];
+		child = child->parent;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(macio_find);
+
+static const char *macio_names[] =
+{
+	"Unknown",
+	"Grand Central",
+	"OHare",
+	"OHareII",
+	"Heathrow",
+	"Gatwick",
+	"Paddington",
+	"Keylargo",
+	"Pangea",
+	"Intrepid",
+	"K2"
+};
+
+
+
+/*
+ * Uninorth reg. access. Note that Uni-N regs are big endian
+ */
+
+#define UN_REG(r)	(uninorth_base + ((r) >> 2))
+#define UN_IN(r)	(in_be32(UN_REG(r)))
+#define UN_OUT(r,v)	(out_be32(UN_REG(r), (v)))
+#define UN_BIS(r,v)	(UN_OUT((r), UN_IN(r) | (v)))
+#define UN_BIC(r,v)	(UN_OUT((r), UN_IN(r) & ~(v)))
+
+static struct device_node *uninorth_node;
+static u32 __iomem *uninorth_base;
+static u32 uninorth_rev;
+static int uninorth_u3;
+static void __iomem *u3_ht;
+
+/*
+ * For each motherboard family, we have a table of functions pointers
+ * that handle the various features.
+ */
+
+typedef long (*feature_call)(struct device_node *node, long param, long value);
+
+struct feature_table_entry {
+	unsigned int	selector;
+	feature_call	function;
+};
+
+struct pmac_mb_def
+{
+	const char*			model_string;
+	const char*			model_name;
+	int				model_id;
+	struct feature_table_entry*	features;
+	unsigned long			board_flags;
+};
+static struct pmac_mb_def pmac_mb;
+
+/*
+ * Here are the chip specific feature functions
+ */
+
+static inline int simple_feature_tweak(struct device_node *node, int type,
+				       int reg, u32 mask, int value)
+{
+	struct macio_chip*	macio;
+	unsigned long		flags;
+
+	macio = macio_find(node, type);
+	if (!macio)
+		return -ENODEV;
+	LOCK(flags);
+	if (value)
+		MACIO_BIS(reg, mask);
+	else
+		MACIO_BIC(reg, mask);
+	(void)MACIO_IN32(reg);
+	UNLOCK(flags);
+
+	return 0;
+}
+
+#ifndef CONFIG_POWER4
+
+static long ohare_htw_scc_enable(struct device_node *node, long param,
+				 long value)
+{
+	struct macio_chip*	macio;
+	unsigned long		chan_mask;
+	unsigned long		fcr;
+	unsigned long		flags;
+	int			htw, trans;
+	unsigned long		rmask;
+
+	macio = macio_find(node, 0);
+	if (!macio)
+		return -ENODEV;
+	if (!strcmp(node->name, "ch-a"))
+		chan_mask = MACIO_FLAG_SCCA_ON;
+	else if (!strcmp(node->name, "ch-b"))
+		chan_mask = MACIO_FLAG_SCCB_ON;
+	else
+		return -ENODEV;
+
+	htw = (macio->type == macio_heathrow || macio->type == macio_paddington
+		|| macio->type == macio_gatwick);
+	/* On these machines, the HRW_SCC_TRANS_EN_N bit mustn't be touched */
+	trans = (pmac_mb.model_id != PMAC_TYPE_YOSEMITE &&
+		 pmac_mb.model_id != PMAC_TYPE_YIKES);
+	if (value) {
+#ifdef CONFIG_ADB_PMU
+		if ((param & 0xfff) == PMAC_SCC_IRDA)
+			pmu_enable_irled(1);
+#endif /* CONFIG_ADB_PMU */
+		LOCK(flags);
+		fcr = MACIO_IN32(OHARE_FCR);
+		/* Check if scc cell need enabling */
+		if (!(fcr & OH_SCC_ENABLE)) {
+			fcr |= OH_SCC_ENABLE;
+			if (htw) {
+				/* Side effect: this will also power up the
+				 * modem, but it's too messy to figure out on which
+				 * ports this controls the tranceiver and on which
+				 * it controls the modem
+				 */
+				if (trans)
+					fcr &= ~HRW_SCC_TRANS_EN_N;
+				MACIO_OUT32(OHARE_FCR, fcr);
+				fcr |= (rmask = HRW_RESET_SCC);
+				MACIO_OUT32(OHARE_FCR, fcr);
+			} else {
+				fcr |= (rmask = OH_SCC_RESET);
+				MACIO_OUT32(OHARE_FCR, fcr);
+			}
+			UNLOCK(flags);
+			(void)MACIO_IN32(OHARE_FCR);
+			mdelay(15);
+			LOCK(flags);
+			fcr &= ~rmask;
+			MACIO_OUT32(OHARE_FCR, fcr);
+		}
+		if (chan_mask & MACIO_FLAG_SCCA_ON)
+			fcr |= OH_SCCA_IO;
+		if (chan_mask & MACIO_FLAG_SCCB_ON)
+			fcr |= OH_SCCB_IO;
+		MACIO_OUT32(OHARE_FCR, fcr);
+		macio->flags |= chan_mask;
+		UNLOCK(flags);
+		if (param & PMAC_SCC_FLAG_XMON)
+			macio->flags |= MACIO_FLAG_SCC_LOCKED;
+	} else {
+		if (macio->flags & MACIO_FLAG_SCC_LOCKED)
+			return -EPERM;
+		LOCK(flags);
+		fcr = MACIO_IN32(OHARE_FCR);
+		if (chan_mask & MACIO_FLAG_SCCA_ON)
+			fcr &= ~OH_SCCA_IO;
+		if (chan_mask & MACIO_FLAG_SCCB_ON)
+			fcr &= ~OH_SCCB_IO;
+		MACIO_OUT32(OHARE_FCR, fcr);
+		if ((fcr & (OH_SCCA_IO | OH_SCCB_IO)) == 0) {
+			fcr &= ~OH_SCC_ENABLE;
+			if (htw && trans)
+				fcr |= HRW_SCC_TRANS_EN_N;
+			MACIO_OUT32(OHARE_FCR, fcr);
+		}
+		macio->flags &= ~(chan_mask);
+		UNLOCK(flags);
+		mdelay(10);
+#ifdef CONFIG_ADB_PMU
+		if ((param & 0xfff) == PMAC_SCC_IRDA)
+			pmu_enable_irled(0);
+#endif /* CONFIG_ADB_PMU */
+	}
+	return 0;
+}
+
+static long ohare_floppy_enable(struct device_node *node, long param,
+				long value)
+{
+	return simple_feature_tweak(node, macio_ohare,
+		OHARE_FCR, OH_FLOPPY_ENABLE, value);
+}
+
+static long ohare_mesh_enable(struct device_node *node, long param, long value)
+{
+	return simple_feature_tweak(node, macio_ohare,
+		OHARE_FCR, OH_MESH_ENABLE, value);
+}
+
+static long ohare_ide_enable(struct device_node *node, long param, long value)
+{
+	switch(param) {
+	case 0:
+		/* For some reason, setting the bit in set_initial_features()
+		 * doesn't stick. I'm still investigating... --BenH.
+		 */
+		if (value)
+			simple_feature_tweak(node, macio_ohare,
+				OHARE_FCR, OH_IOBUS_ENABLE, 1);
+		return simple_feature_tweak(node, macio_ohare,
+			OHARE_FCR, OH_IDE0_ENABLE, value);
+	case 1:
+		return simple_feature_tweak(node, macio_ohare,
+			OHARE_FCR, OH_BAY_IDE_ENABLE, value);
+	default:
+		return -ENODEV;
+	}
+}
+
+static long ohare_ide_reset(struct device_node *node, long param, long value)
+{
+	switch(param) {
+	case 0:
+		return simple_feature_tweak(node, macio_ohare,
+			OHARE_FCR, OH_IDE0_RESET_N, !value);
+	case 1:
+		return simple_feature_tweak(node, macio_ohare,
+			OHARE_FCR, OH_IDE1_RESET_N, !value);
+	default:
+		return -ENODEV;
+	}
+}
+
+static long ohare_sleep_state(struct device_node *node, long param, long value)
+{
+	struct macio_chip*	macio = &macio_chips[0];
+
+	if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
+		return -EPERM;
+	if (value == 1) {
+		MACIO_BIC(OHARE_FCR, OH_IOBUS_ENABLE);
+	} else if (value == 0) {
+		MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE);
+	}
+
+	return 0;
+}
+
+static long heathrow_modem_enable(struct device_node *node, long param,
+				  long value)
+{
+	struct macio_chip*	macio;
+	u8			gpio;
+	unsigned long		flags;
+
+	macio = macio_find(node, macio_unknown);
+	if (!macio)
+		return -ENODEV;
+	gpio = MACIO_IN8(HRW_GPIO_MODEM_RESET) & ~1;
+	if (!value) {
+		LOCK(flags);
+		MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio);
+		UNLOCK(flags);
+		(void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
+		mdelay(250);
+	}
+	if (pmac_mb.model_id != PMAC_TYPE_YOSEMITE &&
+	    pmac_mb.model_id != PMAC_TYPE_YIKES) {
+		LOCK(flags);
+		if (value)
+			MACIO_BIC(HEATHROW_FCR, HRW_SCC_TRANS_EN_N);
+		else
+			MACIO_BIS(HEATHROW_FCR, HRW_SCC_TRANS_EN_N);
+		UNLOCK(flags);
+		(void)MACIO_IN32(HEATHROW_FCR);
+		mdelay(250);
+	}
+	if (value) {
+		LOCK(flags);
+		MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio | 1);
+		(void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
+		UNLOCK(flags); mdelay(250); LOCK(flags);
+		MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio);
+		(void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
+		UNLOCK(flags); mdelay(250); LOCK(flags);
+		MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio | 1);
+		(void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
+		UNLOCK(flags); mdelay(250);
+	}
+	return 0;
+}
+
+static long heathrow_floppy_enable(struct device_node *node, long param,
+				   long value)
+{
+	return simple_feature_tweak(node, macio_unknown,
+		HEATHROW_FCR,
+		HRW_SWIM_ENABLE|HRW_BAY_FLOPPY_ENABLE,
+		value);
+}
+
+static long heathrow_mesh_enable(struct device_node *node, long param,
+				 long value)
+{
+	struct macio_chip*	macio;
+	unsigned long		flags;
+
+	macio = macio_find(node, macio_unknown);
+	if (!macio)
+		return -ENODEV;
+	LOCK(flags);
+	/* Set clear mesh cell enable */
+	if (value)
+		MACIO_BIS(HEATHROW_FCR, HRW_MESH_ENABLE);
+	else
+		MACIO_BIC(HEATHROW_FCR, HRW_MESH_ENABLE);
+	(void)MACIO_IN32(HEATHROW_FCR);
+	udelay(10);
+	/* Set/Clear termination power */
+	if (value)
+		MACIO_BIC(HEATHROW_MBCR, 0x04000000);
+	else
+		MACIO_BIS(HEATHROW_MBCR, 0x04000000);
+	(void)MACIO_IN32(HEATHROW_MBCR);
+	udelay(10);
+	UNLOCK(flags);
+
+	return 0;
+}
+
+static long heathrow_ide_enable(struct device_node *node, long param,
+				long value)
+{
+	switch(param) {
+	case 0:
+		return simple_feature_tweak(node, macio_unknown,
+			HEATHROW_FCR, HRW_IDE0_ENABLE, value);
+	case 1:
+		return simple_feature_tweak(node, macio_unknown,
+			HEATHROW_FCR, HRW_BAY_IDE_ENABLE, value);
+	default:
+		return -ENODEV;
+	}
+}
+
+static long heathrow_ide_reset(struct device_node *node, long param,
+			       long value)
+{
+	switch(param) {
+	case 0:
+		return simple_feature_tweak(node, macio_unknown,
+			HEATHROW_FCR, HRW_IDE0_RESET_N, !value);
+	case 1:
+		return simple_feature_tweak(node, macio_unknown,
+			HEATHROW_FCR, HRW_IDE1_RESET_N, !value);
+	default:
+		return -ENODEV;
+	}
+}
+
+static long heathrow_bmac_enable(struct device_node *node, long param,
+				 long value)
+{
+	struct macio_chip*	macio;
+	unsigned long		flags;
+
+	macio = macio_find(node, 0);
+	if (!macio)
+		return -ENODEV;
+	if (value) {
+		LOCK(flags);
+		MACIO_BIS(HEATHROW_FCR, HRW_BMAC_IO_ENABLE);
+		MACIO_BIS(HEATHROW_FCR, HRW_BMAC_RESET);
+		UNLOCK(flags);
+		(void)MACIO_IN32(HEATHROW_FCR);
+		mdelay(10);
+		LOCK(flags);
+		MACIO_BIC(HEATHROW_FCR, HRW_BMAC_RESET);
+		UNLOCK(flags);
+		(void)MACIO_IN32(HEATHROW_FCR);
+		mdelay(10);
+	} else {
+		LOCK(flags);
+		MACIO_BIC(HEATHROW_FCR, HRW_BMAC_IO_ENABLE);
+		UNLOCK(flags);
+	}
+	return 0;
+}
+
+static long heathrow_sound_enable(struct device_node *node, long param,
+				  long value)
+{
+	struct macio_chip*	macio;
+	unsigned long		flags;
+
+	/* B&W G3 and Yikes don't support that properly (the
+	 * sound appear to never come back after beeing shut down).
+	 */
+	if (pmac_mb.model_id == PMAC_TYPE_YOSEMITE ||
+	    pmac_mb.model_id == PMAC_TYPE_YIKES)
+		return 0;
+
+	macio = macio_find(node, 0);
+	if (!macio)
+		return -ENODEV;
+	if (value) {
+		LOCK(flags);
+		MACIO_BIS(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
+		MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N);
+		UNLOCK(flags);
+		(void)MACIO_IN32(HEATHROW_FCR);
+	} else {
+		LOCK(flags);
+		MACIO_BIS(HEATHROW_FCR, HRW_SOUND_POWER_N);
+		MACIO_BIC(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
+		UNLOCK(flags);
+	}
+	return 0;
+}
+
+static u32 save_fcr[6];
+static u32 save_mbcr;
+static u32 save_gpio_levels[2];
+static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT];
+static u8 save_gpio_normal[KEYLARGO_GPIO_CNT];
+static u32 save_unin_clock_ctl;
+static struct dbdma_regs save_dbdma[13];
+static struct dbdma_regs save_alt_dbdma[13];
+
+static void dbdma_save(struct macio_chip *macio, struct dbdma_regs *save)
+{
+	int i;
+
+	/* Save state & config of DBDMA channels */
+	for (i = 0; i < 13; i++) {
+		volatile struct dbdma_regs __iomem * chan = (void __iomem *)
+			(macio->base + ((0x8000+i*0x100)>>2));
+		save[i].cmdptr_hi = in_le32(&chan->cmdptr_hi);
+		save[i].cmdptr = in_le32(&chan->cmdptr);
+		save[i].intr_sel = in_le32(&chan->intr_sel);
+		save[i].br_sel = in_le32(&chan->br_sel);
+		save[i].wait_sel = in_le32(&chan->wait_sel);
+	}
+}
+
+static void dbdma_restore(struct macio_chip *macio, struct dbdma_regs *save)
+{
+	int i;
+
+	/* Save state & config of DBDMA channels */
+	for (i = 0; i < 13; i++) {
+		volatile struct dbdma_regs __iomem * chan = (void __iomem *)
+			(macio->base + ((0x8000+i*0x100)>>2));
+		out_le32(&chan->control, (ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)<<16);
+		while (in_le32(&chan->status) & ACTIVE)
+			mb();
+		out_le32(&chan->cmdptr_hi, save[i].cmdptr_hi);
+		out_le32(&chan->cmdptr, save[i].cmdptr);
+		out_le32(&chan->intr_sel, save[i].intr_sel);
+		out_le32(&chan->br_sel, save[i].br_sel);
+		out_le32(&chan->wait_sel, save[i].wait_sel);
+	}
+}
+
+static void heathrow_sleep(struct macio_chip *macio, int secondary)
+{
+	if (secondary) {
+		dbdma_save(macio, save_alt_dbdma);
+		save_fcr[2] = MACIO_IN32(0x38);
+		save_fcr[3] = MACIO_IN32(0x3c);
+	} else {
+		dbdma_save(macio, save_dbdma);
+		save_fcr[0] = MACIO_IN32(0x38);
+		save_fcr[1] = MACIO_IN32(0x3c);
+		save_mbcr = MACIO_IN32(0x34);
+		/* Make sure sound is shut down */
+		MACIO_BIS(HEATHROW_FCR, HRW_SOUND_POWER_N);
+		MACIO_BIC(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
+		/* This seems to be necessary as well or the fan
+		 * keeps coming up and battery drains fast */
+		MACIO_BIC(HEATHROW_FCR, HRW_IOBUS_ENABLE);
+		MACIO_BIC(HEATHROW_FCR, HRW_IDE0_RESET_N);
+		/* Make sure eth is down even if module or sleep
+		 * won't work properly */
+		MACIO_BIC(HEATHROW_FCR, HRW_BMAC_IO_ENABLE | HRW_BMAC_RESET);
+	}
+	/* Make sure modem is shut down */
+	MACIO_OUT8(HRW_GPIO_MODEM_RESET,
+		MACIO_IN8(HRW_GPIO_MODEM_RESET) & ~1);
+	MACIO_BIS(HEATHROW_FCR, HRW_SCC_TRANS_EN_N);
+	MACIO_BIC(HEATHROW_FCR, OH_SCCA_IO|OH_SCCB_IO|HRW_SCC_ENABLE);
+
+	/* Let things settle */
+	(void)MACIO_IN32(HEATHROW_FCR);
+}
+
+static void heathrow_wakeup(struct macio_chip *macio, int secondary)
+{
+	if (secondary) {
+		MACIO_OUT32(0x38, save_fcr[2]);
+		(void)MACIO_IN32(0x38);
+		mdelay(1);
+		MACIO_OUT32(0x3c, save_fcr[3]);
+		(void)MACIO_IN32(0x38);
+		mdelay(10);
+		dbdma_restore(macio, save_alt_dbdma);
+	} else {
+		MACIO_OUT32(0x38, save_fcr[0] | HRW_IOBUS_ENABLE);
+		(void)MACIO_IN32(0x38);
+		mdelay(1);
+		MACIO_OUT32(0x3c, save_fcr[1]);
+		(void)MACIO_IN32(0x38);
+		mdelay(1);
+		MACIO_OUT32(0x34, save_mbcr);
+		(void)MACIO_IN32(0x38);
+		mdelay(10);
+		dbdma_restore(macio, save_dbdma);
+	}
+}
+
+static long heathrow_sleep_state(struct device_node *node, long param,
+				 long value)
+{
+	if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
+		return -EPERM;
+	if (value == 1) {
+		if (macio_chips[1].type == macio_gatwick)
+			heathrow_sleep(&macio_chips[0], 1);
+		heathrow_sleep(&macio_chips[0], 0);
+	} else if (value == 0) {
+		heathrow_wakeup(&macio_chips[0], 0);
+		if (macio_chips[1].type == macio_gatwick)
+			heathrow_wakeup(&macio_chips[0], 1);
+	}
+	return 0;
+}
+
+static long core99_scc_enable(struct device_node *node, long param, long value)
+{
+	struct macio_chip*	macio;
+	unsigned long		flags;
+	unsigned long		chan_mask;
+	u32			fcr;
+
+	macio = macio_find(node, 0);
+	if (!macio)
+		return -ENODEV;
+	if (!strcmp(node->name, "ch-a"))
+		chan_mask = MACIO_FLAG_SCCA_ON;
+	else if (!strcmp(node->name, "ch-b"))
+		chan_mask = MACIO_FLAG_SCCB_ON;
+	else
+		return -ENODEV;
+
+	if (value) {
+		int need_reset_scc = 0;
+		int need_reset_irda = 0;
+
+		LOCK(flags);
+		fcr = MACIO_IN32(KEYLARGO_FCR0);
+		/* Check if scc cell need enabling */
+		if (!(fcr & KL0_SCC_CELL_ENABLE)) {
+			fcr |= KL0_SCC_CELL_ENABLE;
+			need_reset_scc = 1;
+		}
+		if (chan_mask & MACIO_FLAG_SCCA_ON) {
+			fcr |= KL0_SCCA_ENABLE;
+			/* Don't enable line drivers for I2S modem */
+			if ((param & 0xfff) == PMAC_SCC_I2S1)
+				fcr &= ~KL0_SCC_A_INTF_ENABLE;
+			else
+				fcr |= KL0_SCC_A_INTF_ENABLE;
+		}
+		if (chan_mask & MACIO_FLAG_SCCB_ON) {
+			fcr |= KL0_SCCB_ENABLE;
+			/* Perform irda specific inits */
+			if ((param & 0xfff) == PMAC_SCC_IRDA) {
+				fcr &= ~KL0_SCC_B_INTF_ENABLE;
+				fcr |= KL0_IRDA_ENABLE;
+				fcr |= KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE;
+				fcr |= KL0_IRDA_SOURCE1_SEL;
+				fcr &= ~(KL0_IRDA_FAST_CONNECT|KL0_IRDA_DEFAULT1|KL0_IRDA_DEFAULT0);
+				fcr &= ~(KL0_IRDA_SOURCE2_SEL|KL0_IRDA_HIGH_BAND);
+				need_reset_irda = 1;
+			} else
+				fcr |= KL0_SCC_B_INTF_ENABLE;
+		}
+		MACIO_OUT32(KEYLARGO_FCR0, fcr);
+		macio->flags |= chan_mask;
+		if (need_reset_scc)  {
+			MACIO_BIS(KEYLARGO_FCR0, KL0_SCC_RESET);
+			(void)MACIO_IN32(KEYLARGO_FCR0);
+			UNLOCK(flags);
+			mdelay(15);
+			LOCK(flags);
+			MACIO_BIC(KEYLARGO_FCR0, KL0_SCC_RESET);
+		}
+		if (need_reset_irda)  {
+			MACIO_BIS(KEYLARGO_FCR0, KL0_IRDA_RESET);
+			(void)MACIO_IN32(KEYLARGO_FCR0);
+			UNLOCK(flags);
+			mdelay(15);
+			LOCK(flags);
+			MACIO_BIC(KEYLARGO_FCR0, KL0_IRDA_RESET);
+		}
+		UNLOCK(flags);
+		if (param & PMAC_SCC_FLAG_XMON)
+			macio->flags |= MACIO_FLAG_SCC_LOCKED;
+	} else {
+		if (macio->flags & MACIO_FLAG_SCC_LOCKED)
+			return -EPERM;
+		LOCK(flags);
+		fcr = MACIO_IN32(KEYLARGO_FCR0);
+		if (chan_mask & MACIO_FLAG_SCCA_ON)
+			fcr &= ~KL0_SCCA_ENABLE;
+		if (chan_mask & MACIO_FLAG_SCCB_ON) {
+			fcr &= ~KL0_SCCB_ENABLE;
+			/* Perform irda specific clears */
+			if ((param & 0xfff) == PMAC_SCC_IRDA) {
+				fcr &= ~KL0_IRDA_ENABLE;
+				fcr &= ~(KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE);
+				fcr &= ~(KL0_IRDA_FAST_CONNECT|KL0_IRDA_DEFAULT1|KL0_IRDA_DEFAULT0);
+				fcr &= ~(KL0_IRDA_SOURCE1_SEL|KL0_IRDA_SOURCE2_SEL|KL0_IRDA_HIGH_BAND);
+			}
+		}
+		MACIO_OUT32(KEYLARGO_FCR0, fcr);
+		if ((fcr & (KL0_SCCA_ENABLE | KL0_SCCB_ENABLE)) == 0) {
+			fcr &= ~KL0_SCC_CELL_ENABLE;
+			MACIO_OUT32(KEYLARGO_FCR0, fcr);
+		}
+		macio->flags &= ~(chan_mask);
+		UNLOCK(flags);
+		mdelay(10);
+	}
+	return 0;
+}
+
+static long
+core99_modem_enable(struct device_node *node, long param, long value)
+{
+	struct macio_chip*	macio;
+	u8			gpio;
+	unsigned long		flags;
+
+	/* Hack for internal USB modem */
+	if (node == NULL) {
+		if (macio_chips[0].type != macio_keylargo)
+			return -ENODEV;
+		node = macio_chips[0].of_node;
+	}
+	macio = macio_find(node, 0);
+	if (!macio)
+		return -ENODEV;
+	gpio = MACIO_IN8(KL_GPIO_MODEM_RESET);
+	gpio |= KEYLARGO_GPIO_OUTPUT_ENABLE;
+	gpio &= ~KEYLARGO_GPIO_OUTOUT_DATA;
+
+	if (!value) {
+		LOCK(flags);
+		MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
+		UNLOCK(flags);
+		(void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+		mdelay(250);
+	}
+	LOCK(flags);
+	if (value) {
+		MACIO_BIC(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
+		UNLOCK(flags);
+		(void)MACIO_IN32(KEYLARGO_FCR2);
+		mdelay(250);
+	} else {
+		MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
+		UNLOCK(flags);
+	}
+	if (value) {
+		LOCK(flags);
+		MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
+		(void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+		UNLOCK(flags); mdelay(250); LOCK(flags);
+		MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
+		(void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+		UNLOCK(flags); mdelay(250); LOCK(flags);
+		MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
+		(void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+		UNLOCK(flags); mdelay(250);
+	}
+	return 0;
+}
+
+static long
+pangea_modem_enable(struct device_node *node, long param, long value)
+{
+	struct macio_chip*	macio;
+	u8			gpio;
+	unsigned long		flags;
+
+	/* Hack for internal USB modem */
+	if (node == NULL) {
+		if (macio_chips[0].type != macio_pangea &&
+		    macio_chips[0].type != macio_intrepid)
+			return -ENODEV;
+		node = macio_chips[0].of_node;
+	}
+	macio = macio_find(node, 0);
+	if (!macio)
+		return -ENODEV;
+	gpio = MACIO_IN8(KL_GPIO_MODEM_RESET);
+	gpio |= KEYLARGO_GPIO_OUTPUT_ENABLE;
+	gpio &= ~KEYLARGO_GPIO_OUTOUT_DATA;
+
+	if (!value) {
+		LOCK(flags);
+		MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
+		UNLOCK(flags);
+		(void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+		mdelay(250);
+	}
+	LOCK(flags);
+	if (value) {
+		MACIO_OUT8(KL_GPIO_MODEM_POWER,
+			KEYLARGO_GPIO_OUTPUT_ENABLE);
+		UNLOCK(flags);
+		(void)MACIO_IN32(KEYLARGO_FCR2);
+		mdelay(250);
+	} else {
+		MACIO_OUT8(KL_GPIO_MODEM_POWER,
+			KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA);
+		UNLOCK(flags);
+	}
+	if (value) {
+		LOCK(flags);
+		MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
+		(void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+		UNLOCK(flags); mdelay(250); LOCK(flags);
+		MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
+		(void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+		UNLOCK(flags); mdelay(250); LOCK(flags);
+		MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
+		(void)MACIO_IN8(KL_GPIO_MODEM_RESET);
+		UNLOCK(flags); mdelay(250);
+	}
+	return 0;
+}
+
+static long
+core99_ata100_enable(struct device_node *node, long value)
+{
+	unsigned long flags;
+	struct pci_dev *pdev = NULL;
+	u8 pbus, pid;
+
+	if (uninorth_rev < 0x24)
+		return -ENODEV;
+
+	LOCK(flags);
+	if (value)
+		UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_ATA100);
+	else
+		UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_ATA100);
+	(void)UN_IN(UNI_N_CLOCK_CNTL);
+	UNLOCK(flags);
+	udelay(20);
+
+	if (value) {
+		if (pci_device_from_OF_node(node, &pbus, &pid) == 0)
+			pdev = pci_find_slot(pbus, pid);
+		if (pdev == NULL)
+			return 0;
+		pci_enable_device(pdev);
+		pci_set_master(pdev);
+	}
+	return 0;
+}
+
+static long
+core99_ide_enable(struct device_node *node, long param, long value)
+{
+	/* Bus ID 0 to 2 are KeyLargo based IDE, busID 3 is U2
+	 * based ata-100
+	 */
+	switch(param) {
+	    case 0:
+		return simple_feature_tweak(node, macio_unknown,
+			KEYLARGO_FCR1, KL1_EIDE0_ENABLE, value);
+	    case 1:
+		return simple_feature_tweak(node, macio_unknown,
+			KEYLARGO_FCR1, KL1_EIDE1_ENABLE, value);
+	    case 2:
+		return simple_feature_tweak(node, macio_unknown,
+			KEYLARGO_FCR1, KL1_UIDE_ENABLE, value);
+	    case 3:
+		return core99_ata100_enable(node, value);
+	    default:
+		return -ENODEV;
+	}
+}
+
+static long
+core99_ide_reset(struct device_node *node, long param, long value)
+{
+	switch(param) {
+	    case 0:
+		return simple_feature_tweak(node, macio_unknown,
+			KEYLARGO_FCR1, KL1_EIDE0_RESET_N, !value);
+	    case 1:
+		return simple_feature_tweak(node, macio_unknown,
+			KEYLARGO_FCR1, KL1_EIDE1_RESET_N, !value);
+	    case 2:
+		return simple_feature_tweak(node, macio_unknown,
+			KEYLARGO_FCR1, KL1_UIDE_RESET_N, !value);
+	    default:
+		return -ENODEV;
+	}
+}
+
+static long
+core99_gmac_enable(struct device_node *node, long param, long value)
+{
+	unsigned long flags;
+
+	LOCK(flags);
+	if (value)
+		UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_GMAC);
+	else
+		UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_GMAC);
+	(void)UN_IN(UNI_N_CLOCK_CNTL);
+	UNLOCK(flags);
+	udelay(20);
+
+	return 0;
+}
+
+static long
+core99_gmac_phy_reset(struct device_node *node, long param, long value)
+{
+	unsigned long flags;
+	struct macio_chip *macio;
+
+	macio = &macio_chips[0];
+	if (macio->type != macio_keylargo && macio->type != macio_pangea &&
+	    macio->type != macio_intrepid)
+		return -ENODEV;
+
+	LOCK(flags);
+	MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, KEYLARGO_GPIO_OUTPUT_ENABLE);
+	(void)MACIO_IN8(KL_GPIO_ETH_PHY_RESET);
+	UNLOCK(flags);
+	mdelay(10);
+	LOCK(flags);
+	MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, /*KEYLARGO_GPIO_OUTPUT_ENABLE | */
+		KEYLARGO_GPIO_OUTOUT_DATA);
+	UNLOCK(flags);
+	mdelay(10);
+
+	return 0;
+}
+
+static long
+core99_sound_chip_enable(struct device_node *node, long param, long value)
+{
+	struct macio_chip*	macio;
+	unsigned long		flags;
+
+	macio = macio_find(node, 0);
+	if (!macio)
+		return -ENODEV;
+
+	/* Do a better probe code, screamer G4 desktops &
+	 * iMacs can do that too, add a recalibrate  in
+	 * the driver as well
+	 */
+	if (pmac_mb.model_id == PMAC_TYPE_PISMO ||
+	    pmac_mb.model_id == PMAC_TYPE_TITANIUM) {
+		LOCK(flags);
+		if (value)
+			MACIO_OUT8(KL_GPIO_SOUND_POWER,
+				KEYLARGO_GPIO_OUTPUT_ENABLE |
+				KEYLARGO_GPIO_OUTOUT_DATA);
+		else
+			MACIO_OUT8(KL_GPIO_SOUND_POWER,
+				KEYLARGO_GPIO_OUTPUT_ENABLE);
+		(void)MACIO_IN8(KL_GPIO_SOUND_POWER);
+		UNLOCK(flags);
+	}
+	return 0;
+}
+
+static long
+core99_airport_enable(struct device_node *node, long param, long value)
+{
+	struct macio_chip*	macio;
+	unsigned long		flags;
+	int			state;
+
+	macio = macio_find(node, 0);
+	if (!macio)
+		return -ENODEV;
+
+	/* Hint: we allow passing of macio itself for the sake of the
+	 * sleep code
+	 */
+	if (node != macio->of_node &&
+	    (!node->parent || node->parent != macio->of_node))
+		return -ENODEV;
+	state = (macio->flags & MACIO_FLAG_AIRPORT_ON) != 0;
+	if (value == state)
+		return 0;
+	if (value) {
+		/* This code is a reproduction of OF enable-cardslot
+		 * and init-wireless methods, slightly hacked until
+		 * I got it working.
+		 */
+		LOCK(flags);
+		MACIO_OUT8(KEYLARGO_GPIO_0+0xf, 5);
+		(void)MACIO_IN8(KEYLARGO_GPIO_0+0xf);
+		UNLOCK(flags);
+		mdelay(10);
+		LOCK(flags);
+		MACIO_OUT8(KEYLARGO_GPIO_0+0xf, 4);
+		(void)MACIO_IN8(KEYLARGO_GPIO_0+0xf);
+		UNLOCK(flags);
+
+		mdelay(10);
+
+		LOCK(flags);
+		MACIO_BIC(KEYLARGO_FCR2, KL2_CARDSEL_16);
+		(void)MACIO_IN32(KEYLARGO_FCR2);
+		udelay(10);
+		MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xb, 0);
+		(void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xb);
+		udelay(10);
+		MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xa, 0x28);
+		(void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xa);
+		udelay(10);
+		MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xd, 0x28);
+		(void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xd);
+		udelay(10);
+		MACIO_OUT8(KEYLARGO_GPIO_0+0xd, 0x28);
+		(void)MACIO_IN8(KEYLARGO_GPIO_0+0xd);
+		udelay(10);
+		MACIO_OUT8(KEYLARGO_GPIO_0+0xe, 0x28);
+		(void)MACIO_IN8(KEYLARGO_GPIO_0+0xe);
+		UNLOCK(flags);
+		udelay(10);
+		MACIO_OUT32(0x1c000, 0);
+		mdelay(1);
+		MACIO_OUT8(0x1a3e0, 0x41);
+		(void)MACIO_IN8(0x1a3e0);
+		udelay(10);
+		LOCK(flags);
+		MACIO_BIS(KEYLARGO_FCR2, KL2_CARDSEL_16);
+		(void)MACIO_IN32(KEYLARGO_FCR2);
+		UNLOCK(flags);
+		mdelay(100);
+
+		macio->flags |= MACIO_FLAG_AIRPORT_ON;
+	} else {
+		LOCK(flags);
+		MACIO_BIC(KEYLARGO_FCR2, KL2_CARDSEL_16);
+		(void)MACIO_IN32(KEYLARGO_FCR2);
+		MACIO_OUT8(KL_GPIO_AIRPORT_0, 0);
+		MACIO_OUT8(KL_GPIO_AIRPORT_1, 0);
+		MACIO_OUT8(KL_GPIO_AIRPORT_2, 0);
+		MACIO_OUT8(KL_GPIO_AIRPORT_3, 0);
+		MACIO_OUT8(KL_GPIO_AIRPORT_4, 0);
+		(void)MACIO_IN8(KL_GPIO_AIRPORT_4);
+		UNLOCK(flags);
+
+		macio->flags &= ~MACIO_FLAG_AIRPORT_ON;
+	}
+	return 0;
+}
+
+#ifdef CONFIG_SMP
+static long
+core99_reset_cpu(struct device_node *node, long param, long value)
+{
+	unsigned int reset_io = 0;
+	unsigned long flags;
+	struct macio_chip *macio;
+	struct device_node *np;
+	const int dflt_reset_lines[] = {	KL_GPIO_RESET_CPU0,
+						KL_GPIO_RESET_CPU1,
+						KL_GPIO_RESET_CPU2,
+						KL_GPIO_RESET_CPU3 };
+
+	macio = &macio_chips[0];
+	if (macio->type != macio_keylargo)
+		return -ENODEV;
+
+	np = find_path_device("/cpus");
+	if (np == NULL)
+		return -ENODEV;
+	for (np = np->child; np != NULL; np = np->sibling) {
+		u32 *num = (u32 *)get_property(np, "reg", NULL);
+		u32 *rst = (u32 *)get_property(np, "soft-reset", NULL);
+		if (num == NULL || rst == NULL)
+			continue;
+		if (param == *num) {
+			reset_io = *rst;
+			break;
+		}
+	}
+	if (np == NULL || reset_io == 0)
+		reset_io = dflt_reset_lines[param];
+
+	LOCK(flags);
+	MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE);
+	(void)MACIO_IN8(reset_io);
+	udelay(1);
+	MACIO_OUT8(reset_io, 0);
+	(void)MACIO_IN8(reset_io);
+	UNLOCK(flags);
+
+	return 0;
+}
+#endif /* CONFIG_SMP */
+
+static long
+core99_usb_enable(struct device_node *node, long param, long value)
+{
+	struct macio_chip *macio;
+	unsigned long flags;
+	char *prop;
+	int number;
+	u32 reg;
+
+	macio = &macio_chips[0];
+	if (macio->type != macio_keylargo && macio->type != macio_pangea &&
+	    macio->type != macio_intrepid)
+		return -ENODEV;
+
+	prop = (char *)get_property(node, "AAPL,clock-id", NULL);
+	if (!prop)
+		return -ENODEV;
+	if (strncmp(prop, "usb0u048", 8) == 0)
+		number = 0;
+	else if (strncmp(prop, "usb1u148", 8) == 0)
+		number = 2;
+	else if (strncmp(prop, "usb2u248", 8) == 0)
+		number = 4;
+	else
+		return -ENODEV;
+
+	/* Sorry for the brute-force locking, but this is only used during
+	 * sleep and the timing seem to be critical
+	 */
+	LOCK(flags);
+	if (value) {
+		/* Turn ON */
+		if (number == 0) {
+			MACIO_BIC(KEYLARGO_FCR0, (KL0_USB0_PAD_SUSPEND0 | KL0_USB0_PAD_SUSPEND1));
+			(void)MACIO_IN32(KEYLARGO_FCR0);
+			UNLOCK(flags);
+			mdelay(1);
+			LOCK(flags);
+			MACIO_BIS(KEYLARGO_FCR0, KL0_USB0_CELL_ENABLE);
+		} else if (number == 2) {
+			MACIO_BIC(KEYLARGO_FCR0, (KL0_USB1_PAD_SUSPEND0 | KL0_USB1_PAD_SUSPEND1));
+			UNLOCK(flags);
+			(void)MACIO_IN32(KEYLARGO_FCR0);
+			mdelay(1);
+			LOCK(flags);
+			MACIO_BIS(KEYLARGO_FCR0, KL0_USB1_CELL_ENABLE);
+		} else if (number == 4) {
+			MACIO_BIC(KEYLARGO_FCR1, (KL1_USB2_PAD_SUSPEND0 | KL1_USB2_PAD_SUSPEND1));
+			UNLOCK(flags);
+			(void)MACIO_IN32(KEYLARGO_FCR1);
+			mdelay(1);
+			LOCK(flags);
+			MACIO_BIS(KEYLARGO_FCR1, KL1_USB2_CELL_ENABLE);
+		}
+		if (number < 4) {
+			reg = MACIO_IN32(KEYLARGO_FCR4);
+			reg &=	~(KL4_PORT_WAKEUP_ENABLE(number) | KL4_PORT_RESUME_WAKE_EN(number) |
+				KL4_PORT_CONNECT_WAKE_EN(number) | KL4_PORT_DISCONNECT_WAKE_EN(number));
+			reg &=	~(KL4_PORT_WAKEUP_ENABLE(number+1) | KL4_PORT_RESUME_WAKE_EN(number+1) |
+				KL4_PORT_CONNECT_WAKE_EN(number+1) | KL4_PORT_DISCONNECT_WAKE_EN(number+1));
+			MACIO_OUT32(KEYLARGO_FCR4, reg);
+			(void)MACIO_IN32(KEYLARGO_FCR4);
+			udelay(10);
+		} else {
+			reg = MACIO_IN32(KEYLARGO_FCR3);
+			reg &=	~(KL3_IT_PORT_WAKEUP_ENABLE(0) | KL3_IT_PORT_RESUME_WAKE_EN(0) |
+				KL3_IT_PORT_CONNECT_WAKE_EN(0) | KL3_IT_PORT_DISCONNECT_WAKE_EN(0));
+			reg &=	~(KL3_IT_PORT_WAKEUP_ENABLE(1) | KL3_IT_PORT_RESUME_WAKE_EN(1) |
+				KL3_IT_PORT_CONNECT_WAKE_EN(1) | KL3_IT_PORT_DISCONNECT_WAKE_EN(1));
+			MACIO_OUT32(KEYLARGO_FCR3, reg);
+			(void)MACIO_IN32(KEYLARGO_FCR3);
+			udelay(10);
+		}
+		if (macio->type == macio_intrepid) {
+			/* wait for clock stopped bits to clear */
+			u32 test0 = 0, test1 = 0;
+			u32 status0, status1;
+			int timeout = 1000;
+
+			UNLOCK(flags);
+			switch (number) {
+			case 0:
+				test0 = UNI_N_CLOCK_STOPPED_USB0;
+				test1 = UNI_N_CLOCK_STOPPED_USB0PCI;
+				break;
+			case 2:
+				test0 = UNI_N_CLOCK_STOPPED_USB1;
+				test1 = UNI_N_CLOCK_STOPPED_USB1PCI;
+				break;
+			case 4:
+				test0 = UNI_N_CLOCK_STOPPED_USB2;
+				test1 = UNI_N_CLOCK_STOPPED_USB2PCI;
+				break;
+			}
+			do {
+				if (--timeout <= 0) {
+					printk(KERN_ERR "core99_usb_enable: "
+					       "Timeout waiting for clocks\n");
+					break;
+				}
+				mdelay(1);
+				status0 = UN_IN(UNI_N_CLOCK_STOP_STATUS0);
+				status1 = UN_IN(UNI_N_CLOCK_STOP_STATUS1);
+			} while ((status0 & test0) | (status1 & test1));
+			LOCK(flags);
+		}
+	} else {
+		/* Turn OFF */
+		if (number < 4) {
+			reg = MACIO_IN32(KEYLARGO_FCR4);
+			reg |=	KL4_PORT_WAKEUP_ENABLE(number) | KL4_PORT_RESUME_WAKE_EN(number) |
+				KL4_PORT_CONNECT_WAKE_EN(number) | KL4_PORT_DISCONNECT_WAKE_EN(number);
+			reg |=	KL4_PORT_WAKEUP_ENABLE(number+1) | KL4_PORT_RESUME_WAKE_EN(number+1) |
+				KL4_PORT_CONNECT_WAKE_EN(number+1) | KL4_PORT_DISCONNECT_WAKE_EN(number+1);
+			MACIO_OUT32(KEYLARGO_FCR4, reg);
+			(void)MACIO_IN32(KEYLARGO_FCR4);
+			udelay(1);
+		} else {
+			reg = MACIO_IN32(KEYLARGO_FCR3);
+			reg |=	KL3_IT_PORT_WAKEUP_ENABLE(0) | KL3_IT_PORT_RESUME_WAKE_EN(0) |
+				KL3_IT_PORT_CONNECT_WAKE_EN(0) | KL3_IT_PORT_DISCONNECT_WAKE_EN(0);
+			reg |=	KL3_IT_PORT_WAKEUP_ENABLE(1) | KL3_IT_PORT_RESUME_WAKE_EN(1) |
+				KL3_IT_PORT_CONNECT_WAKE_EN(1) | KL3_IT_PORT_DISCONNECT_WAKE_EN(1);
+			MACIO_OUT32(KEYLARGO_FCR3, reg);
+			(void)MACIO_IN32(KEYLARGO_FCR3);
+			udelay(1);
+		}
+		if (number == 0) {
+			if (macio->type != macio_intrepid)
+				MACIO_BIC(KEYLARGO_FCR0, KL0_USB0_CELL_ENABLE);
+			(void)MACIO_IN32(KEYLARGO_FCR0);
+			udelay(1);
+			MACIO_BIS(KEYLARGO_FCR0, (KL0_USB0_PAD_SUSPEND0 | KL0_USB0_PAD_SUSPEND1));
+			(void)MACIO_IN32(KEYLARGO_FCR0);
+		} else if (number == 2) {
+			if (macio->type != macio_intrepid)
+				MACIO_BIC(KEYLARGO_FCR0, KL0_USB1_CELL_ENABLE);
+			(void)MACIO_IN32(KEYLARGO_FCR0);
+			udelay(1);
+			MACIO_BIS(KEYLARGO_FCR0, (KL0_USB1_PAD_SUSPEND0 | KL0_USB1_PAD_SUSPEND1));
+			(void)MACIO_IN32(KEYLARGO_FCR0);
+		} else if (number == 4) {
+			udelay(1);
+			MACIO_BIS(KEYLARGO_FCR1, (KL1_USB2_PAD_SUSPEND0 | KL1_USB2_PAD_SUSPEND1));
+			(void)MACIO_IN32(KEYLARGO_FCR1);
+		}
+		udelay(1);
+	}
+	UNLOCK(flags);
+
+	return 0;
+}
+
+static long
+core99_firewire_enable(struct device_node *node, long param, long value)
+{
+	unsigned long flags;
+	struct macio_chip *macio;
+
+	macio = &macio_chips[0];
+	if (macio->type != macio_keylargo && macio->type != macio_pangea &&
+	    macio->type != macio_intrepid)
+		return -ENODEV;
+	if (!(macio->flags & MACIO_FLAG_FW_SUPPORTED))
+		return -ENODEV;
+
+	LOCK(flags);
+	if (value) {
+		UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW);
+		(void)UN_IN(UNI_N_CLOCK_CNTL);
+	} else {
+		UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW);
+		(void)UN_IN(UNI_N_CLOCK_CNTL);
+	}
+	UNLOCK(flags);
+	mdelay(1);
+
+	return 0;
+}
+
+static long
+core99_firewire_cable_power(struct device_node *node, long param, long value)
+{
+	unsigned long flags;
+	struct macio_chip *macio;
+
+	/* Trick: we allow NULL node */
+	if ((pmac_mb.board_flags & PMAC_MB_HAS_FW_POWER) == 0)
+		return -ENODEV;
+	macio = &macio_chips[0];
+	if (macio->type != macio_keylargo && macio->type != macio_pangea &&
+	    macio->type != macio_intrepid)
+		return -ENODEV;
+	if (!(macio->flags & MACIO_FLAG_FW_SUPPORTED))
+		return -ENODEV;
+
+	LOCK(flags);
+	if (value) {
+		MACIO_OUT8(KL_GPIO_FW_CABLE_POWER , 0);
+		MACIO_IN8(KL_GPIO_FW_CABLE_POWER);
+		udelay(10);
+	} else {
+		MACIO_OUT8(KL_GPIO_FW_CABLE_POWER , 4);
+		MACIO_IN8(KL_GPIO_FW_CABLE_POWER); udelay(10);
+	}
+	UNLOCK(flags);
+	mdelay(1);
+
+	return 0;
+}
+
+static long
+intrepid_aack_delay_enable(struct device_node *node, long param, long value)
+{
+	unsigned long flags;
+
+	if (uninorth_rev < 0xd2)
+		return -ENODEV;
+
+	LOCK(flags);
+	if (param)
+		UN_BIS(UNI_N_AACK_DELAY, UNI_N_AACK_DELAY_ENABLE);
+	else
+		UN_BIC(UNI_N_AACK_DELAY, UNI_N_AACK_DELAY_ENABLE);
+	UNLOCK(flags);
+
+	return 0;
+}
+
+
+#endif /* CONFIG_POWER4 */
+
+static long
+core99_read_gpio(struct device_node *node, long param, long value)
+{
+	struct macio_chip *macio = &macio_chips[0];
+
+	return MACIO_IN8(param);
+}
+
+
+static long
+core99_write_gpio(struct device_node *node, long param, long value)
+{
+	struct macio_chip *macio = &macio_chips[0];
+
+	MACIO_OUT8(param, (u8)(value & 0xff));
+	return 0;
+}
+
+#ifdef CONFIG_POWER4
+static long g5_gmac_enable(struct device_node *node, long param, long value)
+{
+	struct macio_chip *macio = &macio_chips[0];
+	unsigned long flags;
+
+	if (node == NULL)
+		return -ENODEV;
+
+	LOCK(flags);
+	if (value) {
+		MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
+		mb();
+		k2_skiplist[0] = NULL;
+	} else {
+		k2_skiplist[0] = node;
+		mb();
+		MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
+	}
+	
+	UNLOCK(flags);
+	mdelay(1);
+
+	return 0;
+}
+
+static long g5_fw_enable(struct device_node *node, long param, long value)
+{
+	struct macio_chip *macio = &macio_chips[0];
+	unsigned long flags;
+
+	if (node == NULL)
+		return -ENODEV;
+
+	LOCK(flags);
+	if (value) {
+		MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
+		mb();
+		k2_skiplist[1] = NULL;
+	} else {
+		k2_skiplist[1] = node;
+		mb();
+		MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
+	}
+	
+	UNLOCK(flags);
+	mdelay(1);
+
+	return 0;
+}
+
+static long g5_mpic_enable(struct device_node *node, long param, long value)
+{
+	unsigned long flags;
+
+	if (node->parent == NULL || strcmp(node->parent->name, "u3"))
+		return 0;
+
+	LOCK(flags);
+	UN_BIS(U3_TOGGLE_REG, U3_MPIC_RESET | U3_MPIC_OUTPUT_ENABLE);
+	UNLOCK(flags);
+
+	return 0;
+}
+
+static long g5_eth_phy_reset(struct device_node *node, long param, long value)
+{
+	struct macio_chip *macio = &macio_chips[0];
+	struct device_node *phy;
+	int need_reset;
+
+	/*
+	 * We must not reset the combo PHYs, only the BCM5221 found in
+	 * the iMac G5.
+	 */
+	phy = of_get_next_child(node, NULL);
+	if (!phy)
+		return -ENODEV;
+	need_reset = device_is_compatible(phy, "B5221");
+	of_node_put(phy);
+	if (!need_reset)
+		return 0;
+
+	/* PHY reset is GPIO 29, not in device-tree unfortunately */
+	MACIO_OUT8(K2_GPIO_EXTINT_0 + 29,
+		   KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA);
+	/* Thankfully, this is now always called at a time when we can
+	 * schedule by sungem.
+	 */
+	msleep(10);
+	MACIO_OUT8(K2_GPIO_EXTINT_0 + 29, 0);
+
+	return 0;
+}
+
+static long g5_i2s_enable(struct device_node *node, long param, long value)
+{
+	/* Very crude implementation for now */
+	struct macio_chip *macio = &macio_chips[0];
+	unsigned long flags;
+
+	if (value == 0)
+		return 0; /* don't disable yet */
+
+	LOCK(flags);
+	MACIO_BIS(KEYLARGO_FCR3, KL3_CLK45_ENABLE | KL3_CLK49_ENABLE |
+		  KL3_I2S0_CLK18_ENABLE);
+	udelay(10);
+	MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_I2S0_CELL_ENABLE |
+		  K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE);
+	udelay(10);
+	MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_I2S0_RESET);
+	UNLOCK(flags);
+	udelay(10);
+
+	return 0;
+}
+
+
+#ifdef CONFIG_SMP
+static long g5_reset_cpu(struct device_node *node, long param, long value)
+{
+	unsigned int reset_io = 0;
+	unsigned long flags;
+	struct macio_chip *macio;
+	struct device_node *np;
+
+	macio = &macio_chips[0];
+	if (macio->type != macio_keylargo2)
+		return -ENODEV;
+
+	np = find_path_device("/cpus");
+	if (np == NULL)
+		return -ENODEV;
+	for (np = np->child; np != NULL; np = np->sibling) {
+		u32 *num = (u32 *)get_property(np, "reg", NULL);
+		u32 *rst = (u32 *)get_property(np, "soft-reset", NULL);
+		if (num == NULL || rst == NULL)
+			continue;
+		if (param == *num) {
+			reset_io = *rst;
+			break;
+		}
+	}
+	if (np == NULL || reset_io == 0)
+		return -ENODEV;
+
+	LOCK(flags);
+	MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE);
+	(void)MACIO_IN8(reset_io);
+	udelay(1);
+	MACIO_OUT8(reset_io, 0);
+	(void)MACIO_IN8(reset_io);
+	UNLOCK(flags);
+
+	return 0;
+}
+#endif /* CONFIG_SMP */
+
+/*
+ * This can be called from pmac_smp so isn't static
+ *
+ * This takes the second CPU off the bus on dual CPU machines
+ * running UP
+ */
+void g5_phy_disable_cpu1(void)
+{
+	UN_OUT(U3_API_PHY_CONFIG_1, 0);
+}
+#endif /* CONFIG_POWER4 */
+
+#ifndef CONFIG_POWER4
+
+static void
+keylargo_shutdown(struct macio_chip *macio, int sleep_mode)
+{
+	u32 temp;
+
+	if (sleep_mode) {
+		mdelay(1);
+		MACIO_BIS(KEYLARGO_FCR0, KL0_USB_REF_SUSPEND);
+		(void)MACIO_IN32(KEYLARGO_FCR0);
+		mdelay(1);
+	}
+
+	MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE |
+				KL0_SCC_CELL_ENABLE |
+				KL0_IRDA_ENABLE | KL0_IRDA_CLK32_ENABLE |
+				KL0_IRDA_CLK19_ENABLE);
+
+	MACIO_BIC(KEYLARGO_MBCR, KL_MBCR_MB0_DEV_MASK);
+	MACIO_BIS(KEYLARGO_MBCR, KL_MBCR_MB0_IDE_ENABLE);
+
+	MACIO_BIC(KEYLARGO_FCR1,
+		KL1_AUDIO_SEL_22MCLK | KL1_AUDIO_CLK_ENABLE_BIT |
+		KL1_AUDIO_CLK_OUT_ENABLE | KL1_AUDIO_CELL_ENABLE |
+		KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
+		KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
+		KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE |
+		KL1_EIDE0_ENABLE | KL1_EIDE0_RESET_N |
+		KL1_EIDE1_ENABLE | KL1_EIDE1_RESET_N |
+		KL1_UIDE_ENABLE);
+
+	MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
+	MACIO_BIC(KEYLARGO_FCR2, KL2_IOBUS_ENABLE);
+
+	temp = MACIO_IN32(KEYLARGO_FCR3);
+	if (macio->rev >= 2) {
+		temp |= KL3_SHUTDOWN_PLL2X;
+		if (sleep_mode)
+			temp |= KL3_SHUTDOWN_PLL_TOTAL;
+	}
+
+	temp |= KL3_SHUTDOWN_PLLKW6 | KL3_SHUTDOWN_PLLKW4 |
+		KL3_SHUTDOWN_PLLKW35;
+	if (sleep_mode)
+		temp |= KL3_SHUTDOWN_PLLKW12;
+	temp &= ~(KL3_CLK66_ENABLE | KL3_CLK49_ENABLE | KL3_CLK45_ENABLE
+		| KL3_CLK31_ENABLE | KL3_I2S1_CLK18_ENABLE | KL3_I2S0_CLK18_ENABLE);
+	if (sleep_mode)
+		temp &= ~(KL3_TIMER_CLK18_ENABLE | KL3_VIA_CLK16_ENABLE);
+	MACIO_OUT32(KEYLARGO_FCR3, temp);
+
+	/* Flush posted writes & wait a bit */
+	(void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
+}
+
+static void
+pangea_shutdown(struct macio_chip *macio, int sleep_mode)
+{
+	u32 temp;
+
+	MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE |
+				KL0_SCC_CELL_ENABLE |
+				KL0_USB0_CELL_ENABLE | KL0_USB1_CELL_ENABLE);
+
+	MACIO_BIC(KEYLARGO_FCR1,
+		KL1_AUDIO_SEL_22MCLK | KL1_AUDIO_CLK_ENABLE_BIT |
+		KL1_AUDIO_CLK_OUT_ENABLE | KL1_AUDIO_CELL_ENABLE |
+		KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
+		KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
+		KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE |
+		KL1_UIDE_ENABLE);
+	if (pmac_mb.board_flags & PMAC_MB_MOBILE)
+		MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N);
+
+	MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
+
+	temp = MACIO_IN32(KEYLARGO_FCR3);
+	temp |= KL3_SHUTDOWN_PLLKW6 | KL3_SHUTDOWN_PLLKW4 |
+		KL3_SHUTDOWN_PLLKW35;
+	temp &= ~(KL3_CLK49_ENABLE | KL3_CLK45_ENABLE | KL3_CLK31_ENABLE
+		| KL3_I2S0_CLK18_ENABLE | KL3_I2S1_CLK18_ENABLE);
+	if (sleep_mode)
+		temp &= ~(KL3_VIA_CLK16_ENABLE | KL3_TIMER_CLK18_ENABLE);
+	MACIO_OUT32(KEYLARGO_FCR3, temp);
+
+	/* Flush posted writes & wait a bit */
+	(void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
+}
+
+static void
+intrepid_shutdown(struct macio_chip *macio, int sleep_mode)
+{
+	u32 temp;
+
+	MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE |
+		  KL0_SCC_CELL_ENABLE);
+
+	MACIO_BIC(KEYLARGO_FCR1,
+		  /*KL1_USB2_CELL_ENABLE |*/
+		KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
+		KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
+		KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE);
+	if (pmac_mb.board_flags & PMAC_MB_MOBILE)
+		MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N);
+
+	temp = MACIO_IN32(KEYLARGO_FCR3);
+	temp &= ~(KL3_CLK49_ENABLE | KL3_CLK45_ENABLE |
+		  KL3_I2S1_CLK18_ENABLE | KL3_I2S0_CLK18_ENABLE);
+	if (sleep_mode)
+		temp &= ~(KL3_TIMER_CLK18_ENABLE | KL3_IT_VIA_CLK32_ENABLE);
+	MACIO_OUT32(KEYLARGO_FCR3, temp);
+
+	/* Flush posted writes & wait a bit */
+	(void)MACIO_IN32(KEYLARGO_FCR0);
+	mdelay(10);
+}
+
+
+void pmac_tweak_clock_spreading(int enable)
+{
+	struct macio_chip *macio = &macio_chips[0];
+
+	/* Hack for doing clock spreading on some machines PowerBooks and
+	 * iBooks. This implements the "platform-do-clockspreading" OF
+	 * property as decoded manually on various models. For safety, we also
+	 * check the product ID in the device-tree in cases we'll whack the i2c
+	 * chip to make reasonably sure we won't set wrong values in there
+	 *
+	 * Of course, ultimately, we have to implement a real parser for
+	 * the platform-do-* stuff...
+	 */
+
+	if (macio->type == macio_intrepid) {
+		if (enable)
+			UN_OUT(UNI_N_CLOCK_SPREADING, 2);
+		else
+			UN_OUT(UNI_N_CLOCK_SPREADING, 0);
+		mdelay(40);
+	}
+
+	while (machine_is_compatible("PowerBook5,2") ||
+	       machine_is_compatible("PowerBook5,3") ||
+	       machine_is_compatible("PowerBook6,2") ||
+	       machine_is_compatible("PowerBook6,3")) {
+		struct device_node *ui2c = of_find_node_by_type(NULL, "i2c");
+		struct device_node *dt = of_find_node_by_name(NULL, "device-tree");
+		u8 buffer[9];
+		u32 *productID;
+		int i, rc, changed = 0;
+
+		if (dt == NULL)
+			break;
+		productID = (u32 *)get_property(dt, "pid#", NULL);
+		if (productID == NULL)
+			break;
+		while(ui2c) {
+			struct device_node *p = of_get_parent(ui2c);
+			if (p && !strcmp(p->name, "uni-n"))
+				break;
+			ui2c = of_find_node_by_type(ui2c, "i2c");
+		}
+		if (ui2c == NULL)
+			break;
+		DBG("Trying to bump clock speed for PID: %08x...\n", *productID);
+		rc = pmac_low_i2c_open(ui2c, 1);
+		if (rc != 0)
+			break;
+		pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined);
+		rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9);
+		DBG("read result: %d,", rc);
+		if (rc != 0) {
+			pmac_low_i2c_close(ui2c);
+			break;
+		}
+		for (i=0; i<9; i++)
+			DBG(" %02x", buffer[i]);
+		DBG("\n");
+
+		switch(*productID) {
+		case 0x1182:	/* AlBook 12" rev 2 */
+		case 0x1183:	/* iBook G4 12" */
+			buffer[0] = (buffer[0] & 0x8f) | 0x70;
+			buffer[2] = (buffer[2] & 0x7f) | 0x00;
+			buffer[5] = (buffer[5] & 0x80) | 0x31;
+			buffer[6] = (buffer[6] & 0x40) | 0xb0;
+			buffer[7] = (buffer[7] & 0x00) | (enable ? 0xc0 : 0xba);
+			buffer[8] = (buffer[8] & 0x00) | 0x30;
+			changed = 1;
+			break;
+		case 0x3142:	/* AlBook 15" (ATI M10) */
+		case 0x3143:	/* AlBook 17" (ATI M10) */
+			buffer[0] = (buffer[0] & 0xaf) | 0x50;
+			buffer[2] = (buffer[2] & 0x7f) | 0x00;
+			buffer[5] = (buffer[5] & 0x80) | 0x31;
+			buffer[6] = (buffer[6] & 0x40) | 0xb0;
+			buffer[7] = (buffer[7] & 0x00) | (enable ? 0xd0 : 0xc0);
+			buffer[8] = (buffer[8] & 0x00) | 0x30;
+			changed = 1;
+			break;
+		default:
+			DBG("i2c-hwclock: Machine model not handled\n");
+			break;
+		}
+		if (!changed) {
+			pmac_low_i2c_close(ui2c);
+			break;
+		}
+		pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub);
+		rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9);
+		DBG("write result: %d,", rc);
+		pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined);
+		rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9);
+		DBG("read result: %d,", rc);
+		if (rc != 0) {
+			pmac_low_i2c_close(ui2c);
+			break;
+		}
+		for (i=0; i<9; i++)
+			DBG(" %02x", buffer[i]);
+		pmac_low_i2c_close(ui2c);
+		break;
+	}
+}
+
+
+static int
+core99_sleep(void)
+{
+	struct macio_chip *macio;
+	int i;
+
+	macio = &macio_chips[0];
+	if (macio->type != macio_keylargo && macio->type != macio_pangea &&
+	    macio->type != macio_intrepid)
+		return -ENODEV;
+
+	/* We power off the wireless slot in case it was not done
+	 * by the driver. We don't power it on automatically however
+	 */
+	if (macio->flags & MACIO_FLAG_AIRPORT_ON)
+		core99_airport_enable(macio->of_node, 0, 0);
+
+	/* We power off the FW cable. Should be done by the driver... */
+	if (macio->flags & MACIO_FLAG_FW_SUPPORTED) {
+		core99_firewire_enable(NULL, 0, 0);
+		core99_firewire_cable_power(NULL, 0, 0);
+	}
+
+	/* We make sure int. modem is off (in case driver lost it) */
+	if (macio->type == macio_keylargo)
+		core99_modem_enable(macio->of_node, 0, 0);
+	else
+		pangea_modem_enable(macio->of_node, 0, 0);
+
+	/* We make sure the sound is off as well */
+	core99_sound_chip_enable(macio->of_node, 0, 0);
+
+	/*
+	 * Save various bits of KeyLargo
+	 */
+
+	/* Save the state of the various GPIOs */
+	save_gpio_levels[0] = MACIO_IN32(KEYLARGO_GPIO_LEVELS0);
+	save_gpio_levels[1] = MACIO_IN32(KEYLARGO_GPIO_LEVELS1);
+	for (i=0; i<KEYLARGO_GPIO_EXTINT_CNT; i++)
+		save_gpio_extint[i] = MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+i);
+	for (i=0; i<KEYLARGO_GPIO_CNT; i++)
+		save_gpio_normal[i] = MACIO_IN8(KEYLARGO_GPIO_0+i);
+
+	/* Save the FCRs */
+	if (macio->type == macio_keylargo)
+		save_mbcr = MACIO_IN32(KEYLARGO_MBCR);
+	save_fcr[0] = MACIO_IN32(KEYLARGO_FCR0);
+	save_fcr[1] = MACIO_IN32(KEYLARGO_FCR1);
+	save_fcr[2] = MACIO_IN32(KEYLARGO_FCR2);
+	save_fcr[3] = MACIO_IN32(KEYLARGO_FCR3);
+	save_fcr[4] = MACIO_IN32(KEYLARGO_FCR4);
+	if (macio->type == macio_pangea || macio->type == macio_intrepid)
+		save_fcr[5] = MACIO_IN32(KEYLARGO_FCR5);
+
+	/* Save state & config of DBDMA channels */
+	dbdma_save(macio, save_dbdma);
+
+	/*
+	 * Turn off as much as we can
+	 */
+	if (macio->type == macio_pangea)
+		pangea_shutdown(macio, 1);
+	else if (macio->type == macio_intrepid)
+		intrepid_shutdown(macio, 1);
+	else if (macio->type == macio_keylargo)
+		keylargo_shutdown(macio, 1);
+
+	/*
+	 * Put the host bridge to sleep
+	 */
+
+	save_unin_clock_ctl = UN_IN(UNI_N_CLOCK_CNTL);
+	/* Note: do not switch GMAC off, driver does it when necessary, WOL must keep it
+	 * enabled !
+	 */
+	UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl &
+	       ~(/*UNI_N_CLOCK_CNTL_GMAC|*/UNI_N_CLOCK_CNTL_FW/*|UNI_N_CLOCK_CNTL_PCI*/));
+	udelay(100);
+	UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_SLEEPING);
+	UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_SLEEP);
+	mdelay(10);
+
+	/*
+	 * FIXME: A bit of black magic with OpenPIC (don't ask me why)
+	 */
+	if (pmac_mb.model_id == PMAC_TYPE_SAWTOOTH) {
+		MACIO_BIS(0x506e0, 0x00400000);
+		MACIO_BIS(0x506e0, 0x80000000);
+	}
+	return 0;
+}
+
+static int
+core99_wake_up(void)
+{
+	struct macio_chip *macio;
+	int i;
+
+	macio = &macio_chips[0];
+	if (macio->type != macio_keylargo && macio->type != macio_pangea &&
+	    macio->type != macio_intrepid)
+		return -ENODEV;
+
+	/*
+	 * Wakeup the host bridge
+	 */
+	UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_NORMAL);
+	udelay(10);
+	UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_RUNNING);
+	udelay(10);
+
+	/*
+	 * Restore KeyLargo
+	 */
+
+	if (macio->type == macio_keylargo) {
+		MACIO_OUT32(KEYLARGO_MBCR, save_mbcr);
+		(void)MACIO_IN32(KEYLARGO_MBCR); udelay(10);
+	}
+	MACIO_OUT32(KEYLARGO_FCR0, save_fcr[0]);
+	(void)MACIO_IN32(KEYLARGO_FCR0); udelay(10);
+	MACIO_OUT32(KEYLARGO_FCR1, save_fcr[1]);
+	(void)MACIO_IN32(KEYLARGO_FCR1); udelay(10);
+	MACIO_OUT32(KEYLARGO_FCR2, save_fcr[2]);
+	(void)MACIO_IN32(KEYLARGO_FCR2); udelay(10);
+	MACIO_OUT32(KEYLARGO_FCR3, save_fcr[3]);
+	(void)MACIO_IN32(KEYLARGO_FCR3); udelay(10);
+	MACIO_OUT32(KEYLARGO_FCR4, save_fcr[4]);
+	(void)MACIO_IN32(KEYLARGO_FCR4); udelay(10);
+	if (macio->type == macio_pangea || macio->type == macio_intrepid) {
+		MACIO_OUT32(KEYLARGO_FCR5, save_fcr[5]);
+		(void)MACIO_IN32(KEYLARGO_FCR5); udelay(10);
+	}
+
+	dbdma_restore(macio, save_dbdma);
+
+	MACIO_OUT32(KEYLARGO_GPIO_LEVELS0, save_gpio_levels[0]);
+	MACIO_OUT32(KEYLARGO_GPIO_LEVELS1, save_gpio_levels[1]);
+	for (i=0; i<KEYLARGO_GPIO_EXTINT_CNT; i++)
+		MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+i, save_gpio_extint[i]);
+	for (i=0; i<KEYLARGO_GPIO_CNT; i++)
+		MACIO_OUT8(KEYLARGO_GPIO_0+i, save_gpio_normal[i]);
+
+	/* FIXME more black magic with OpenPIC ... */
+	if (pmac_mb.model_id == PMAC_TYPE_SAWTOOTH) {
+		MACIO_BIC(0x506e0, 0x00400000);
+		MACIO_BIC(0x506e0, 0x80000000);
+	}
+
+	UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl);
+	udelay(100);
+
+	return 0;
+}
+
+static long
+core99_sleep_state(struct device_node *node, long param, long value)
+{
+	/* Param == 1 means to enter the "fake sleep" mode that is
+	 * used for CPU speed switch
+	 */
+	if (param == 1) {
+		if (value == 1) {
+			UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_SLEEPING);
+			UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_IDLE2);
+		} else {
+			UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_NORMAL);
+			udelay(10);
+			UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_RUNNING);
+			udelay(10);
+		}
+		return 0;
+	}
+	if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
+		return -EPERM;
+
+	if (value == 1)
+		return core99_sleep();
+	else if (value == 0)
+		return core99_wake_up();
+	return 0;
+}
+
+#endif /* CONFIG_POWER4 */
+
+static long
+generic_dev_can_wake(struct device_node *node, long param, long value)
+{
+	/* Todo: eventually check we are really dealing with on-board
+	 * video device ...
+	 */
+
+	if (pmac_mb.board_flags & PMAC_MB_MAY_SLEEP)
+		pmac_mb.board_flags |= PMAC_MB_CAN_SLEEP;
+	return 0;
+}
+
+static long generic_get_mb_info(struct device_node *node, long param, long value)
+{
+	switch(param) {
+		case PMAC_MB_INFO_MODEL:
+			return pmac_mb.model_id;
+		case PMAC_MB_INFO_FLAGS:
+			return pmac_mb.board_flags;
+		case PMAC_MB_INFO_NAME:
+			/* hack hack hack... but should work */
+			*((const char **)value) = pmac_mb.model_name;
+			return 0;
+	}
+	return -EINVAL;
+}
+
+
+/*
+ * Table definitions
+ */
+
+/* Used on any machine
+ */
+static struct feature_table_entry any_features[] = {
+	{ PMAC_FTR_GET_MB_INFO,		generic_get_mb_info },
+	{ PMAC_FTR_DEVICE_CAN_WAKE,	generic_dev_can_wake },
+	{ 0, NULL }
+};
+
+#ifndef CONFIG_POWER4
+
+/* OHare based motherboards. Currently, we only use these on the
+ * 2400,3400 and 3500 series powerbooks. Some older desktops seem
+ * to have issues with turning on/off those asic cells
+ */
+static struct feature_table_entry ohare_features[] = {
+	{ PMAC_FTR_SCC_ENABLE,		ohare_htw_scc_enable },
+	{ PMAC_FTR_SWIM3_ENABLE,	ohare_floppy_enable },
+	{ PMAC_FTR_MESH_ENABLE,		ohare_mesh_enable },
+	{ PMAC_FTR_IDE_ENABLE,		ohare_ide_enable},
+	{ PMAC_FTR_IDE_RESET,		ohare_ide_reset},
+	{ PMAC_FTR_SLEEP_STATE,		ohare_sleep_state },
+	{ 0, NULL }
+};
+
+/* Heathrow desktop machines (Beige G3).
+ * Separated as some features couldn't be properly tested
+ * and the serial port control bits appear to confuse it.
+ */
+static struct feature_table_entry heathrow_desktop_features[] = {
+	{ PMAC_FTR_SWIM3_ENABLE,	heathrow_floppy_enable },
+	{ PMAC_FTR_MESH_ENABLE,		heathrow_mesh_enable },
+	{ PMAC_FTR_IDE_ENABLE,		heathrow_ide_enable },
+	{ PMAC_FTR_IDE_RESET,		heathrow_ide_reset },
+	{ PMAC_FTR_BMAC_ENABLE,		heathrow_bmac_enable },
+	{ 0, NULL }
+};
+
+/* Heathrow based laptop, that is the Wallstreet and mainstreet
+ * powerbooks.
+ */
+static struct feature_table_entry heathrow_laptop_features[] = {
+	{ PMAC_FTR_SCC_ENABLE,		ohare_htw_scc_enable },
+	{ PMAC_FTR_MODEM_ENABLE,	heathrow_modem_enable },
+	{ PMAC_FTR_SWIM3_ENABLE,	heathrow_floppy_enable },
+	{ PMAC_FTR_MESH_ENABLE,		heathrow_mesh_enable },
+	{ PMAC_FTR_IDE_ENABLE,		heathrow_ide_enable },
+	{ PMAC_FTR_IDE_RESET,		heathrow_ide_reset },
+	{ PMAC_FTR_BMAC_ENABLE,		heathrow_bmac_enable },
+	{ PMAC_FTR_SOUND_CHIP_ENABLE,	heathrow_sound_enable },
+	{ PMAC_FTR_SLEEP_STATE,		heathrow_sleep_state },
+	{ 0, NULL }
+};
+
+/* Paddington based machines
+ * The lombard (101) powerbook, first iMac models, B&W G3 and Yikes G4.
+ */
+static struct feature_table_entry paddington_features[] = {
+	{ PMAC_FTR_SCC_ENABLE,		ohare_htw_scc_enable },
+	{ PMAC_FTR_MODEM_ENABLE,	heathrow_modem_enable },
+	{ PMAC_FTR_SWIM3_ENABLE,	heathrow_floppy_enable },
+	{ PMAC_FTR_MESH_ENABLE,		heathrow_mesh_enable },
+	{ PMAC_FTR_IDE_ENABLE,		heathrow_ide_enable },
+	{ PMAC_FTR_IDE_RESET,		heathrow_ide_reset },
+	{ PMAC_FTR_BMAC_ENABLE,		heathrow_bmac_enable },
+	{ PMAC_FTR_SOUND_CHIP_ENABLE,	heathrow_sound_enable },
+	{ PMAC_FTR_SLEEP_STATE,		heathrow_sleep_state },
+	{ 0, NULL }
+};
+
+/* Core99 & MacRISC 2 machines (all machines released since the
+ * iBook (included), that is all AGP machines, except pangea
+ * chipset. The pangea chipset is the "combo" UniNorth/KeyLargo
+ * used on iBook2 & iMac "flow power".
+ */
+static struct feature_table_entry core99_features[] = {
+	{ PMAC_FTR_SCC_ENABLE,		core99_scc_enable },
+	{ PMAC_FTR_MODEM_ENABLE,	core99_modem_enable },
+	{ PMAC_FTR_IDE_ENABLE,		core99_ide_enable },
+	{ PMAC_FTR_IDE_RESET,		core99_ide_reset },
+	{ PMAC_FTR_GMAC_ENABLE,		core99_gmac_enable },
+	{ PMAC_FTR_GMAC_PHY_RESET,	core99_gmac_phy_reset },
+	{ PMAC_FTR_SOUND_CHIP_ENABLE,	core99_sound_chip_enable },
+	{ PMAC_FTR_AIRPORT_ENABLE,	core99_airport_enable },
+	{ PMAC_FTR_USB_ENABLE,		core99_usb_enable },
+	{ PMAC_FTR_1394_ENABLE,		core99_firewire_enable },
+	{ PMAC_FTR_1394_CABLE_POWER,	core99_firewire_cable_power },
+	{ PMAC_FTR_SLEEP_STATE,		core99_sleep_state },
+#ifdef CONFIG_SMP
+	{ PMAC_FTR_RESET_CPU,		core99_reset_cpu },
+#endif /* CONFIG_SMP */
+	{ PMAC_FTR_READ_GPIO,		core99_read_gpio },
+	{ PMAC_FTR_WRITE_GPIO,		core99_write_gpio },
+	{ 0, NULL }
+};
+
+/* RackMac
+ */
+static struct feature_table_entry rackmac_features[] = {
+	{ PMAC_FTR_SCC_ENABLE,		core99_scc_enable },
+	{ PMAC_FTR_IDE_ENABLE,		core99_ide_enable },
+	{ PMAC_FTR_IDE_RESET,		core99_ide_reset },
+	{ PMAC_FTR_GMAC_ENABLE,		core99_gmac_enable },
+	{ PMAC_FTR_GMAC_PHY_RESET,	core99_gmac_phy_reset },
+	{ PMAC_FTR_USB_ENABLE,		core99_usb_enable },
+	{ PMAC_FTR_1394_ENABLE,		core99_firewire_enable },
+	{ PMAC_FTR_1394_CABLE_POWER,	core99_firewire_cable_power },
+	{ PMAC_FTR_SLEEP_STATE,		core99_sleep_state },
+#ifdef CONFIG_SMP
+	{ PMAC_FTR_RESET_CPU,		core99_reset_cpu },
+#endif /* CONFIG_SMP */
+	{ PMAC_FTR_READ_GPIO,		core99_read_gpio },
+	{ PMAC_FTR_WRITE_GPIO,		core99_write_gpio },
+	{ 0, NULL }
+};
+
+/* Pangea features
+ */
+static struct feature_table_entry pangea_features[] = {
+	{ PMAC_FTR_SCC_ENABLE,		core99_scc_enable },
+	{ PMAC_FTR_MODEM_ENABLE,	pangea_modem_enable },
+	{ PMAC_FTR_IDE_ENABLE,		core99_ide_enable },
+	{ PMAC_FTR_IDE_RESET,		core99_ide_reset },
+	{ PMAC_FTR_GMAC_ENABLE,		core99_gmac_enable },
+	{ PMAC_FTR_GMAC_PHY_RESET,	core99_gmac_phy_reset },
+	{ PMAC_FTR_SOUND_CHIP_ENABLE,	core99_sound_chip_enable },
+	{ PMAC_FTR_AIRPORT_ENABLE,	core99_airport_enable },
+	{ PMAC_FTR_USB_ENABLE,		core99_usb_enable },
+	{ PMAC_FTR_1394_ENABLE,		core99_firewire_enable },
+	{ PMAC_FTR_1394_CABLE_POWER,	core99_firewire_cable_power },
+	{ PMAC_FTR_SLEEP_STATE,		core99_sleep_state },
+	{ PMAC_FTR_READ_GPIO,		core99_read_gpio },
+	{ PMAC_FTR_WRITE_GPIO,		core99_write_gpio },
+	{ 0, NULL }
+};
+
+/* Intrepid features
+ */
+static struct feature_table_entry intrepid_features[] = {
+	{ PMAC_FTR_SCC_ENABLE,		core99_scc_enable },
+	{ PMAC_FTR_MODEM_ENABLE,	pangea_modem_enable },
+	{ PMAC_FTR_IDE_ENABLE,		core99_ide_enable },
+	{ PMAC_FTR_IDE_RESET,		core99_ide_reset },
+	{ PMAC_FTR_GMAC_ENABLE,		core99_gmac_enable },
+	{ PMAC_FTR_GMAC_PHY_RESET,	core99_gmac_phy_reset },
+	{ PMAC_FTR_SOUND_CHIP_ENABLE,	core99_sound_chip_enable },
+	{ PMAC_FTR_AIRPORT_ENABLE,	core99_airport_enable },
+	{ PMAC_FTR_USB_ENABLE,		core99_usb_enable },
+	{ PMAC_FTR_1394_ENABLE,		core99_firewire_enable },
+	{ PMAC_FTR_1394_CABLE_POWER,	core99_firewire_cable_power },
+	{ PMAC_FTR_SLEEP_STATE,		core99_sleep_state },
+	{ PMAC_FTR_READ_GPIO,		core99_read_gpio },
+	{ PMAC_FTR_WRITE_GPIO,		core99_write_gpio },
+	{ PMAC_FTR_AACK_DELAY_ENABLE,	intrepid_aack_delay_enable },
+	{ 0, NULL }
+};
+
+#else /* CONFIG_POWER4 */
+
+/* G5 features
+ */
+static struct feature_table_entry g5_features[] = {
+	{ PMAC_FTR_GMAC_ENABLE,		g5_gmac_enable },
+	{ PMAC_FTR_1394_ENABLE,		g5_fw_enable },
+	{ PMAC_FTR_ENABLE_MPIC,		g5_mpic_enable },
+	{ PMAC_FTR_GMAC_PHY_RESET,	g5_eth_phy_reset },
+	{ PMAC_FTR_SOUND_CHIP_ENABLE,	g5_i2s_enable },
+#ifdef CONFIG_SMP
+	{ PMAC_FTR_RESET_CPU,		g5_reset_cpu },
+#endif /* CONFIG_SMP */
+	{ PMAC_FTR_READ_GPIO,		core99_read_gpio },
+	{ PMAC_FTR_WRITE_GPIO,		core99_write_gpio },
+	{ 0, NULL }
+};
+
+#endif /* CONFIG_POWER4 */
+
+static struct pmac_mb_def pmac_mb_defs[] = {
+#ifndef CONFIG_POWER4
+	/*
+	 * Desktops
+	 */
+
+	{	"AAPL,8500",			"PowerMac 8500/8600",
+		PMAC_TYPE_PSURGE,		NULL,
+		0
+	},
+	{	"AAPL,9500",			"PowerMac 9500/9600",
+		PMAC_TYPE_PSURGE,		NULL,
+		0
+	},
+	{	"AAPL,7200",			"PowerMac 7200",
+		PMAC_TYPE_PSURGE,		NULL,
+		0
+	},
+	{	"AAPL,7300",			"PowerMac 7200/7300",
+		PMAC_TYPE_PSURGE,		NULL,
+		0
+	},
+	{	"AAPL,7500",			"PowerMac 7500",
+		PMAC_TYPE_PSURGE,		NULL,
+		0
+	},
+	{	"AAPL,ShinerESB",		"Apple Network Server",
+		PMAC_TYPE_ANS,			NULL,
+		0
+	},
+	{	"AAPL,e407",			"Alchemy",
+		PMAC_TYPE_ALCHEMY,		NULL,
+		0
+	},
+	{	"AAPL,e411",			"Gazelle",
+		PMAC_TYPE_GAZELLE,		NULL,
+		0
+	},
+	{	"AAPL,Gossamer",		"PowerMac G3 (Gossamer)",
+		PMAC_TYPE_GOSSAMER,		heathrow_desktop_features,
+		0
+	},
+	{	"AAPL,PowerMac G3",		"PowerMac G3 (Silk)",
+		PMAC_TYPE_SILK,			heathrow_desktop_features,
+		0
+	},
+	{	"PowerMac1,1",			"Blue&White G3",
+		PMAC_TYPE_YOSEMITE,		paddington_features,
+		0
+	},
+	{	"PowerMac1,2",			"PowerMac G4 PCI Graphics",
+		PMAC_TYPE_YIKES,		paddington_features,
+		0
+	},
+	{	"PowerMac2,1",			"iMac FireWire",
+		PMAC_TYPE_FW_IMAC,		core99_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
+	},
+	{	"PowerMac2,2",			"iMac FireWire",
+		PMAC_TYPE_FW_IMAC,		core99_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
+	},
+	{	"PowerMac3,1",			"PowerMac G4 AGP Graphics",
+		PMAC_TYPE_SAWTOOTH,		core99_features,
+		PMAC_MB_OLD_CORE99
+	},
+	{	"PowerMac3,2",			"PowerMac G4 AGP Graphics",
+		PMAC_TYPE_SAWTOOTH,		core99_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
+	},
+	{	"PowerMac3,3",			"PowerMac G4 AGP Graphics",
+		PMAC_TYPE_SAWTOOTH,		core99_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
+	},
+	{	"PowerMac3,4",			"PowerMac G4 Silver",
+		PMAC_TYPE_QUICKSILVER,		core99_features,
+		PMAC_MB_MAY_SLEEP
+	},
+	{	"PowerMac3,5",			"PowerMac G4 Silver",
+		PMAC_TYPE_QUICKSILVER,		core99_features,
+		PMAC_MB_MAY_SLEEP
+	},
+	{	"PowerMac3,6",			"PowerMac G4 Windtunnel",
+		PMAC_TYPE_WINDTUNNEL,		core99_features,
+		PMAC_MB_MAY_SLEEP,
+	},
+	{	"PowerMac4,1",			"iMac \"Flower Power\"",
+		PMAC_TYPE_PANGEA_IMAC,		pangea_features,
+		PMAC_MB_MAY_SLEEP
+	},
+	{	"PowerMac4,2",			"Flat panel iMac",
+		PMAC_TYPE_FLAT_PANEL_IMAC,	pangea_features,
+		PMAC_MB_CAN_SLEEP
+	},
+	{	"PowerMac4,4",			"eMac",
+		PMAC_TYPE_EMAC,			core99_features,
+		PMAC_MB_MAY_SLEEP
+	},
+	{	"PowerMac5,1",			"PowerMac G4 Cube",
+		PMAC_TYPE_CUBE,			core99_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
+	},
+	{	"PowerMac6,1",			"Flat panel iMac",
+		PMAC_TYPE_UNKNOWN_INTREPID,	intrepid_features,
+		PMAC_MB_MAY_SLEEP,
+	},
+	{	"PowerMac6,3",			"Flat panel iMac",
+		PMAC_TYPE_UNKNOWN_INTREPID,	intrepid_features,
+		PMAC_MB_MAY_SLEEP,
+	},
+	{	"PowerMac6,4",			"eMac",
+		PMAC_TYPE_UNKNOWN_INTREPID,	intrepid_features,
+		PMAC_MB_MAY_SLEEP,
+	},
+	{	"PowerMac10,1",			"Mac mini",
+		PMAC_TYPE_UNKNOWN_INTREPID,	intrepid_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER,
+	},
+	{	"iMac,1",			"iMac (first generation)",
+		PMAC_TYPE_ORIG_IMAC,		paddington_features,
+		0
+	},
+
+	/*
+	 * Xserve's
+	 */
+
+	{	"RackMac1,1",			"XServe",
+		PMAC_TYPE_RACKMAC,		rackmac_features,
+		0,
+	},
+	{	"RackMac1,2",			"XServe rev. 2",
+		PMAC_TYPE_RACKMAC,		rackmac_features,
+		0,
+	},
+
+	/*
+	 * Laptops
+	 */
+
+	{	"AAPL,3400/2400",		"PowerBook 3400",
+		PMAC_TYPE_HOOPER,		ohare_features,
+		PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
+	},
+	{	"AAPL,3500",			"PowerBook 3500",
+		PMAC_TYPE_KANGA,		ohare_features,
+		PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
+	},
+	{	"AAPL,PowerBook1998",		"PowerBook Wallstreet",
+		PMAC_TYPE_WALLSTREET,		heathrow_laptop_features,
+		PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
+	},
+	{	"PowerBook1,1",			"PowerBook 101 (Lombard)",
+		PMAC_TYPE_101_PBOOK,		paddington_features,
+		PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
+	},
+	{	"PowerBook2,1",			"iBook (first generation)",
+		PMAC_TYPE_ORIG_IBOOK,		core99_features,
+		PMAC_MB_CAN_SLEEP | PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE
+	},
+	{	"PowerBook2,2",			"iBook FireWire",
+		PMAC_TYPE_FW_IBOOK,		core99_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER |
+		PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE
+	},
+	{	"PowerBook3,1",			"PowerBook Pismo",
+		PMAC_TYPE_PISMO,		core99_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER |
+		PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE
+	},
+	{	"PowerBook3,2",			"PowerBook Titanium",
+		PMAC_TYPE_TITANIUM,		core99_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+	},
+	{	"PowerBook3,3",			"PowerBook Titanium II",
+		PMAC_TYPE_TITANIUM2,		core99_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+	},
+	{	"PowerBook3,4",			"PowerBook Titanium III",
+		PMAC_TYPE_TITANIUM3,		core99_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+	},
+	{	"PowerBook3,5",			"PowerBook Titanium IV",
+		PMAC_TYPE_TITANIUM4,		core99_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+	},
+	{	"PowerBook4,1",			"iBook 2",
+		PMAC_TYPE_IBOOK2,		pangea_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+	},
+	{	"PowerBook4,2",			"iBook 2",
+		PMAC_TYPE_IBOOK2,		pangea_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+	},
+	{	"PowerBook4,3",			"iBook 2 rev. 2",
+		PMAC_TYPE_IBOOK2,		pangea_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
+	},
+	{	"PowerBook5,1",			"PowerBook G4 17\"",
+		PMAC_TYPE_UNKNOWN_INTREPID,	intrepid_features,
+		PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+	},
+	{	"PowerBook5,2",			"PowerBook G4 15\"",
+		PMAC_TYPE_UNKNOWN_INTREPID,	intrepid_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+	},
+	{	"PowerBook5,3",			"PowerBook G4 17\"",
+		PMAC_TYPE_UNKNOWN_INTREPID,	intrepid_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+	},
+	{	"PowerBook5,4",			"PowerBook G4 15\"",
+		PMAC_TYPE_UNKNOWN_INTREPID,	intrepid_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+	},
+	{	"PowerBook5,5",			"PowerBook G4 17\"",
+		PMAC_TYPE_UNKNOWN_INTREPID,	intrepid_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+	},
+	{	"PowerBook5,6",			"PowerBook G4 15\"",
+		PMAC_TYPE_UNKNOWN_INTREPID,	intrepid_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+	},
+	{	"PowerBook5,7",			"PowerBook G4 17\"",
+		PMAC_TYPE_UNKNOWN_INTREPID,	intrepid_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+	},
+	{	"PowerBook6,1",			"PowerBook G4 12\"",
+		PMAC_TYPE_UNKNOWN_INTREPID,	intrepid_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+	},
+	{	"PowerBook6,2",			"PowerBook G4",
+		PMAC_TYPE_UNKNOWN_INTREPID,	intrepid_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+	},
+	{	"PowerBook6,3",			"iBook G4",
+		PMAC_TYPE_UNKNOWN_INTREPID,	intrepid_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+	},
+	{	"PowerBook6,4",			"PowerBook G4 12\"",
+		PMAC_TYPE_UNKNOWN_INTREPID,	intrepid_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+	},
+	{	"PowerBook6,5",			"iBook G4",
+		PMAC_TYPE_UNKNOWN_INTREPID,	intrepid_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+	},
+	{	"PowerBook6,8",			"PowerBook G4 12\"",
+		PMAC_TYPE_UNKNOWN_INTREPID,	intrepid_features,
+		PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
+	},
+#else /* CONFIG_POWER4 */
+	{	"PowerMac7,2",			"PowerMac G5",
+		PMAC_TYPE_POWERMAC_G5,		g5_features,
+		0,
+	},
+#ifdef CONFIG_PPC64
+	{	"PowerMac7,3",			"PowerMac G5",
+		PMAC_TYPE_POWERMAC_G5,		g5_features,
+		0,
+	},
+	{	"PowerMac8,1",			"iMac G5",
+		PMAC_TYPE_IMAC_G5,		g5_features,
+		0,
+	},
+	{	"PowerMac9,1",			"PowerMac G5",
+		PMAC_TYPE_POWERMAC_G5_U3L,	g5_features,
+		0,
+	},
+	{       "RackMac3,1",                   "XServe G5",
+		PMAC_TYPE_XSERVE_G5,		g5_features,
+		0,
+	},
+#endif /* CONFIG_PPC64 */
+#endif /* CONFIG_POWER4 */
+};
+
+/*
+ * The toplevel feature_call callback
+ */
+long pmac_do_feature_call(unsigned int selector, ...)
+{
+	struct device_node *node;
+	long param, value;
+	int i;
+	feature_call func = NULL;
+	va_list args;
+
+	if (pmac_mb.features)
+		for (i=0; pmac_mb.features[i].function; i++)
+			if (pmac_mb.features[i].selector == selector) {
+				func = pmac_mb.features[i].function;
+				break;
+			}
+	if (!func)
+		for (i=0; any_features[i].function; i++)
+			if (any_features[i].selector == selector) {
+				func = any_features[i].function;
+				break;
+			}
+	if (!func)
+		return -ENODEV;
+
+	va_start(args, selector);
+	node = (struct device_node*)va_arg(args, void*);
+	param = va_arg(args, long);
+	value = va_arg(args, long);
+	va_end(args);
+
+	return func(node, param, value);
+}
+
+static int __init probe_motherboard(void)
+{
+	int i;
+	struct macio_chip *macio = &macio_chips[0];
+	const char *model = NULL;
+	struct device_node *dt;
+
+	/* Lookup known motherboard type in device-tree. First try an
+	 * exact match on the "model" property, then try a "compatible"
+	 * match is none is found.
+	 */
+	dt = find_devices("device-tree");
+	if (dt != NULL)
+		model = (const char *) get_property(dt, "model", NULL);
+	for(i=0; model && i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
+	    if (strcmp(model, pmac_mb_defs[i].model_string) == 0) {
+		pmac_mb = pmac_mb_defs[i];
+		goto found;
+	    }
+	}
+	for(i=0; i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
+	    if (machine_is_compatible(pmac_mb_defs[i].model_string)) {
+		pmac_mb = pmac_mb_defs[i];
+		goto found;
+	    }
+	}
+
+	/* Fallback to selection depending on mac-io chip type */
+	switch(macio->type) {
+#ifndef CONFIG_POWER4
+	    case macio_grand_central:
+		pmac_mb.model_id = PMAC_TYPE_PSURGE;
+		pmac_mb.model_name = "Unknown PowerSurge";
+		break;
+	    case macio_ohare:
+		pmac_mb.model_id = PMAC_TYPE_UNKNOWN_OHARE;
+		pmac_mb.model_name = "Unknown OHare-based";
+		break;
+	    case macio_heathrow:
+		pmac_mb.model_id = PMAC_TYPE_UNKNOWN_HEATHROW;
+		pmac_mb.model_name = "Unknown Heathrow-based";
+		pmac_mb.features = heathrow_desktop_features;
+		break;
+	    case macio_paddington:
+		pmac_mb.model_id = PMAC_TYPE_UNKNOWN_PADDINGTON;
+		pmac_mb.model_name = "Unknown Paddington-based";
+		pmac_mb.features = paddington_features;
+		break;
+	    case macio_keylargo:
+		pmac_mb.model_id = PMAC_TYPE_UNKNOWN_CORE99;
+		pmac_mb.model_name = "Unknown Keylargo-based";
+		pmac_mb.features = core99_features;
+		break;
+	    case macio_pangea:
+		pmac_mb.model_id = PMAC_TYPE_UNKNOWN_PANGEA;
+		pmac_mb.model_name = "Unknown Pangea-based";
+		pmac_mb.features = pangea_features;
+		break;
+	    case macio_intrepid:
+		pmac_mb.model_id = PMAC_TYPE_UNKNOWN_INTREPID;
+		pmac_mb.model_name = "Unknown Intrepid-based";
+		pmac_mb.features = intrepid_features;
+		break;
+#else /* CONFIG_POWER4 */
+	case macio_keylargo2:
+		pmac_mb.model_id = PMAC_TYPE_UNKNOWN_K2;
+		pmac_mb.model_name = "Unknown K2-based";
+		pmac_mb.features = g5_features;
+		break;
+#endif /* CONFIG_POWER4 */
+	default:
+		return -ENODEV;
+	}
+found:
+#ifndef CONFIG_POWER4
+	/* Fixup Hooper vs. Comet */
+	if (pmac_mb.model_id == PMAC_TYPE_HOOPER) {
+		u32 __iomem * mach_id_ptr = ioremap(0xf3000034, 4);
+		if (!mach_id_ptr)
+			return -ENODEV;
+		/* Here, I used to disable the media-bay on comet. It
+		 * appears this is wrong, the floppy connector is actually
+		 * a kind of media-bay and works with the current driver.
+		 */
+		if (__raw_readl(mach_id_ptr) & 0x20000000UL)
+			pmac_mb.model_id = PMAC_TYPE_COMET;
+		iounmap(mach_id_ptr);
+	}
+#endif /* CONFIG_POWER4 */
+
+#ifdef CONFIG_6xx
+	/* Set default value of powersave_nap on machines that support it.
+	 * It appears that uninorth rev 3 has a problem with it, we don't
+	 * enable it on those. In theory, the flush-on-lock property is
+	 * supposed to be set when not supported, but I'm not very confident
+	 * that all Apple OF revs did it properly, I do it the paranoid way.
+	 */
+	while (uninorth_base && uninorth_rev > 3) {
+		struct device_node *np = find_path_device("/cpus");
+		if (!np || !np->child) {
+			printk(KERN_WARNING "Can't find CPU(s) in device tree !\n");
+			break;
+		}
+		np = np->child;
+		/* Nap mode not supported on SMP */
+		if (np->sibling)
+			break;
+		/* Nap mode not supported if flush-on-lock property is present */
+		if (get_property(np, "flush-on-lock", NULL))
+			break;
+		powersave_nap = 1;
+		printk(KERN_INFO "Processor NAP mode on idle enabled.\n");
+		break;
+	}
+
+	/* On CPUs that support it (750FX), lowspeed by default during
+	 * NAP mode
+	 */
+	powersave_lowspeed = 1;
+#endif /* CONFIG_6xx */
+#ifdef CONFIG_POWER4
+	powersave_nap = 1;
+#endif
+	/* Check for "mobile" machine */
+	if (model && (strncmp(model, "PowerBook", 9) == 0
+		   || strncmp(model, "iBook", 5) == 0))
+		pmac_mb.board_flags |= PMAC_MB_MOBILE;
+
+
+	printk(KERN_INFO "PowerMac motherboard: %s\n", pmac_mb.model_name);
+	return 0;
+}
+
+/* Initialize the Core99 UniNorth host bridge and memory controller
+ */
+static void __init probe_uninorth(void)
+{
+	unsigned long actrl;
+
+	/* Locate core99 Uni-N */
+	uninorth_node = of_find_node_by_name(NULL, "uni-n");
+	/* Locate G5 u3 */
+	if (uninorth_node == NULL) {
+		uninorth_node = of_find_node_by_name(NULL, "u3");
+		uninorth_u3 = 1;
+	}
+	if (uninorth_node && uninorth_node->n_addrs > 0) {
+		unsigned long address = uninorth_node->addrs[0].address;
+		uninorth_base = ioremap(address, 0x40000);
+		uninorth_rev = in_be32(UN_REG(UNI_N_VERSION));
+		if (uninorth_u3)
+			u3_ht = ioremap(address + U3_HT_CONFIG_BASE, 0x1000);
+	} else
+		uninorth_node = NULL;
+
+	if (!uninorth_node)
+		return;
+
+	printk(KERN_INFO "Found %s memory controller & host bridge, revision: %d\n",
+	       uninorth_u3 ? "U3" : "UniNorth", uninorth_rev);
+	printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base);
+
+	/* Set the arbitrer QAck delay according to what Apple does
+	 */
+	if (uninorth_rev < 0x11) {
+		actrl = UN_IN(UNI_N_ARB_CTRL) & ~UNI_N_ARB_CTRL_QACK_DELAY_MASK;
+		actrl |= ((uninorth_rev < 3) ? UNI_N_ARB_CTRL_QACK_DELAY105 :
+			UNI_N_ARB_CTRL_QACK_DELAY) << UNI_N_ARB_CTRL_QACK_DELAY_SHIFT;
+		UN_OUT(UNI_N_ARB_CTRL, actrl);
+	}
+
+	/* Some more magic as done by them in recent MacOS X on UniNorth
+	 * revs 1.5 to 2.O and Pangea. Seem to toggle the UniN Maxbus/PCI
+	 * memory timeout
+	 */
+	if ((uninorth_rev >= 0x11 && uninorth_rev <= 0x24) || uninorth_rev == 0xc0)
+		UN_OUT(0x2160, UN_IN(0x2160) & 0x00ffffff);
+}
+
+static void __init probe_one_macio(const char *name, const char *compat, int type)
+{
+	struct device_node*	node;
+	int			i;
+	volatile u32 __iomem *	base;
+	u32*			revp;
+
+	node = find_devices(name);
+	if (!node || !node->n_addrs)
+		return;
+	if (compat)
+		do {
+			if (device_is_compatible(node, compat))
+				break;
+			node = node->next;
+		} while (node);
+	if (!node)
+		return;
+	for(i=0; i<MAX_MACIO_CHIPS; i++) {
+		if (!macio_chips[i].of_node)
+			break;
+		if (macio_chips[i].of_node == node)
+			return;
+	}
+	if (i >= MAX_MACIO_CHIPS) {
+		printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n");
+		printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name);
+		return;
+	}
+	base = ioremap(node->addrs[0].address, node->addrs[0].size);
+	if (!base) {
+		printk(KERN_ERR "pmac_feature: Can't map mac-io chip !\n");
+		return;
+	}
+	if (type == macio_keylargo) {
+		u32 *did = (u32 *)get_property(node, "device-id", NULL);
+		if (*did == 0x00000025)
+			type = macio_pangea;
+		if (*did == 0x0000003e)
+			type = macio_intrepid;
+	}
+	macio_chips[i].of_node	= node;
+	macio_chips[i].type	= type;
+	macio_chips[i].base	= base;
+	macio_chips[i].flags	= MACIO_FLAG_SCCB_ON | MACIO_FLAG_SCCB_ON;
+	macio_chips[i].name	= macio_names[type];
+	revp = (u32 *)get_property(node, "revision-id", NULL);
+	if (revp)
+		macio_chips[i].rev = *revp;
+	printk(KERN_INFO "Found a %s mac-io controller, rev: %d, mapped at 0x%p\n",
+		macio_names[type], macio_chips[i].rev, macio_chips[i].base);
+}
+
+static int __init
+probe_macios(void)
+{
+	/* Warning, ordering is important */
+	probe_one_macio("gc", NULL, macio_grand_central);
+	probe_one_macio("ohare", NULL, macio_ohare);
+	probe_one_macio("pci106b,7", NULL, macio_ohareII);
+	probe_one_macio("mac-io", "keylargo", macio_keylargo);
+	probe_one_macio("mac-io", "paddington", macio_paddington);
+	probe_one_macio("mac-io", "gatwick", macio_gatwick);
+	probe_one_macio("mac-io", "heathrow", macio_heathrow);
+	probe_one_macio("mac-io", "K2-Keylargo", macio_keylargo2);
+
+	/* Make sure the "main" macio chip appear first */
+	if (macio_chips[0].type == macio_gatwick
+	    && macio_chips[1].type == macio_heathrow) {
+		struct macio_chip temp = macio_chips[0];
+		macio_chips[0] = macio_chips[1];
+		macio_chips[1] = temp;
+	}
+	if (macio_chips[0].type == macio_ohareII
+	    && macio_chips[1].type == macio_ohare) {
+		struct macio_chip temp = macio_chips[0];
+		macio_chips[0] = macio_chips[1];
+		macio_chips[1] = temp;
+	}
+	macio_chips[0].lbus.index = 0;
+	macio_chips[1].lbus.index = 1;
+
+	return (macio_chips[0].of_node == NULL) ? -ENODEV : 0;
+}
+
+static void __init
+initial_serial_shutdown(struct device_node *np)
+{
+	int len;
+	struct slot_names_prop {
+		int	count;
+		char	name[1];
+	} *slots;
+	char *conn;
+	int port_type = PMAC_SCC_ASYNC;
+	int modem = 0;
+
+	slots = (struct slot_names_prop *)get_property(np, "slot-names", &len);
+	conn = get_property(np, "AAPL,connector", &len);
+	if (conn && (strcmp(conn, "infrared") == 0))
+		port_type = PMAC_SCC_IRDA;
+	else if (device_is_compatible(np, "cobalt"))
+		modem = 1;
+	else if (slots && slots->count > 0) {
+		if (strcmp(slots->name, "IrDA") == 0)
+			port_type = PMAC_SCC_IRDA;
+		else if (strcmp(slots->name, "Modem") == 0)
+			modem = 1;
+	}
+	if (modem)
+		pmac_call_feature(PMAC_FTR_MODEM_ENABLE, np, 0, 0);
+	pmac_call_feature(PMAC_FTR_SCC_ENABLE, np, port_type, 0);
+}
+
+static void __init
+set_initial_features(void)
+{
+	struct device_node *np;
+
+	/* That hack appears to be necessary for some StarMax motherboards
+	 * but I'm not too sure it was audited for side-effects on other
+	 * ohare based machines...
+	 * Since I still have difficulties figuring the right way to
+	 * differenciate them all and since that hack was there for a long
+	 * time, I'll keep it around
+	 */
+	if (macio_chips[0].type == macio_ohare && !find_devices("via-pmu")) {
+		struct macio_chip *macio = &macio_chips[0];
+		MACIO_OUT32(OHARE_FCR, STARMAX_FEATURES);
+	} else if (macio_chips[0].type == macio_ohare) {
+		struct macio_chip *macio = &macio_chips[0];
+		MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE);
+	} else if (macio_chips[1].type == macio_ohare) {
+		struct macio_chip *macio = &macio_chips[1];
+		MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE);
+	}
+
+#ifdef CONFIG_POWER4
+	if (macio_chips[0].type == macio_keylargo2) {
+#ifndef CONFIG_SMP
+		/* On SMP machines running UP, we have the second CPU eating
+		 * bus cycles. We need to take it off the bus. This is done
+		 * from pmac_smp for SMP kernels running on one CPU
+		 */
+		np = of_find_node_by_type(NULL, "cpu");
+		if (np != NULL)
+			np = of_find_node_by_type(np, "cpu");
+		if (np != NULL) {
+			g5_phy_disable_cpu1();
+			of_node_put(np);
+		}
+#endif /* CONFIG_SMP */
+		/* Enable GMAC for now for PCI probing. It will be disabled
+		 * later on after PCI probe
+		 */
+		np = of_find_node_by_name(NULL, "ethernet");
+		while(np) {
+			if (device_is_compatible(np, "K2-GMAC"))
+				g5_gmac_enable(np, 0, 1);
+			np = of_find_node_by_name(np, "ethernet");
+		}
+
+		/* Enable FW before PCI probe. Will be disabled later on
+		 * Note: We should have a batter way to check that we are
+		 * dealing with uninorth internal cell and not a PCI cell
+		 * on the external PCI. The code below works though.
+		 */
+		np = of_find_node_by_name(NULL, "firewire");
+		while(np) {
+			if (device_is_compatible(np, "pci106b,5811")) {
+				macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED;
+				g5_fw_enable(np, 0, 1);
+			}
+			np = of_find_node_by_name(np, "firewire");
+		}
+	}
+#else /* CONFIG_POWER4 */
+
+	if (macio_chips[0].type == macio_keylargo ||
+	    macio_chips[0].type == macio_pangea ||
+	    macio_chips[0].type == macio_intrepid) {
+		/* Enable GMAC for now for PCI probing. It will be disabled
+		 * later on after PCI probe
+		 */
+		np = of_find_node_by_name(NULL, "ethernet");
+		while(np) {
+			if (np->parent
+			    && device_is_compatible(np->parent, "uni-north")
+			    && device_is_compatible(np, "gmac"))
+				core99_gmac_enable(np, 0, 1);
+			np = of_find_node_by_name(np, "ethernet");
+		}
+
+		/* Enable FW before PCI probe. Will be disabled later on
+		 * Note: We should have a batter way to check that we are
+		 * dealing with uninorth internal cell and not a PCI cell
+		 * on the external PCI. The code below works though.
+		 */
+		np = of_find_node_by_name(NULL, "firewire");
+		while(np) {
+			if (np->parent
+			    && device_is_compatible(np->parent, "uni-north")
+			    && (device_is_compatible(np, "pci106b,18") ||
+			        device_is_compatible(np, "pci106b,30") ||
+			        device_is_compatible(np, "pci11c1,5811"))) {
+				macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED;
+				core99_firewire_enable(np, 0, 1);
+			}
+			np = of_find_node_by_name(np, "firewire");
+		}
+
+		/* Enable ATA-100 before PCI probe. */
+		np = of_find_node_by_name(NULL, "ata-6");
+		while(np) {
+			if (np->parent
+			    && device_is_compatible(np->parent, "uni-north")
+			    && device_is_compatible(np, "kauai-ata")) {
+				core99_ata100_enable(np, 1);
+			}
+			np = of_find_node_by_name(np, "ata-6");
+		}
+
+		/* Switch airport off */
+		np = find_devices("radio");
+		while(np) {
+			if (np && np->parent == macio_chips[0].of_node) {
+				macio_chips[0].flags |= MACIO_FLAG_AIRPORT_ON;
+				core99_airport_enable(np, 0, 0);
+			}
+			np = np->next;
+		}
+	}
+
+	/* On all machines that support sound PM, switch sound off */
+	if (macio_chips[0].of_node)
+		pmac_do_feature_call(PMAC_FTR_SOUND_CHIP_ENABLE,
+			macio_chips[0].of_node, 0, 0);
+
+	/* While on some desktop G3s, we turn it back on */
+	if (macio_chips[0].of_node && macio_chips[0].type == macio_heathrow
+		&& (pmac_mb.model_id == PMAC_TYPE_GOSSAMER ||
+		    pmac_mb.model_id == PMAC_TYPE_SILK)) {
+		struct macio_chip *macio = &macio_chips[0];
+		MACIO_BIS(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
+		MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N);
+	}
+
+	/* Some machine models need the clock chip to be properly setup for
+	 * clock spreading now. This should be a platform function but we
+	 * don't do these at the moment
+	 */
+	pmac_tweak_clock_spreading(1);
+
+#endif /* CONFIG_POWER4 */
+
+	/* On all machines, switch modem & serial ports off */
+	np = find_devices("ch-a");
+	while(np) {
+		initial_serial_shutdown(np);
+		np = np->next;
+	}
+	np = find_devices("ch-b");
+	while(np) {
+		initial_serial_shutdown(np);
+		np = np->next;
+	}
+}
+
+void __init
+pmac_feature_init(void)
+{
+	/* Detect the UniNorth memory controller */
+	probe_uninorth();
+
+	/* Probe mac-io controllers */
+	if (probe_macios()) {
+		printk(KERN_WARNING "No mac-io chip found\n");
+		return;
+	}
+
+	/* Setup low-level i2c stuffs */
+	pmac_init_low_i2c();
+
+	/* Probe machine type */
+	if (probe_motherboard())
+		printk(KERN_WARNING "Unknown PowerMac !\n");
+
+	/* Set some initial features (turn off some chips that will
+	 * be later turned on)
+	 */
+	set_initial_features();
+}
+
+int __init pmac_feature_late_init(void)
+{
+#if 0
+	struct device_node *np;
+
+	/* Request some resources late */
+	if (uninorth_node)
+		request_OF_resource(uninorth_node, 0, NULL);
+	np = find_devices("hammerhead");
+	if (np)
+		request_OF_resource(np, 0, NULL);
+	np = find_devices("interrupt-controller");
+	if (np)
+		request_OF_resource(np, 0, NULL);
+#endif
+	return 0;
+}
+
+device_initcall(pmac_feature_late_init);
+
+#if 0
+static void dump_HT_speeds(char *name, u32 cfg, u32 frq)
+{
+	int	freqs[16] = { 200,300,400,500,600,800,1000,0,0,0,0,0,0,0,0,0 };
+	int	bits[8] = { 8,16,0,32,2,4,0,0 };
+	int	freq = (frq >> 8) & 0xf;
+
+	if (freqs[freq] == 0)
+		printk("%s: Unknown HT link frequency %x\n", name, freq);
+	else
+		printk("%s: %d MHz on main link, (%d in / %d out) bits width\n",
+		       name, freqs[freq],
+		       bits[(cfg >> 28) & 0x7], bits[(cfg >> 24) & 0x7]);
+}
+
+void __init pmac_check_ht_link(void)
+{
+#if 0 /* Disabled for now */
+	u32	ufreq, freq, ucfg, cfg;
+	struct device_node *pcix_node;
+	u8	px_bus, px_devfn;
+	struct pci_controller *px_hose;
+
+	(void)in_be32(u3_ht + U3_HT_LINK_COMMAND);
+	ucfg = cfg = in_be32(u3_ht + U3_HT_LINK_CONFIG);
+	ufreq = freq = in_be32(u3_ht + U3_HT_LINK_FREQ);
+	dump_HT_speeds("U3 HyperTransport", cfg, freq);
+
+	pcix_node = of_find_compatible_node(NULL, "pci", "pci-x");
+	if (pcix_node == NULL) {
+		printk("No PCI-X bridge found\n");
+		return;
+	}
+	if (pci_device_from_OF_node(pcix_node, &px_bus, &px_devfn) != 0) {
+		printk("PCI-X bridge found but not matched to pci\n");
+		return;
+	}
+	px_hose = pci_find_hose_for_OF_device(pcix_node);
+	if (px_hose == NULL) {
+		printk("PCI-X bridge found but not matched to host\n");
+		return;
+	}	
+	early_read_config_dword(px_hose, px_bus, px_devfn, 0xc4, &cfg);
+	early_read_config_dword(px_hose, px_bus, px_devfn, 0xcc, &freq);
+	dump_HT_speeds("PCI-X HT Uplink", cfg, freq);
+	early_read_config_dword(px_hose, px_bus, px_devfn, 0xc8, &cfg);
+	early_read_config_dword(px_hose, px_bus, px_devfn, 0xd0, &freq);
+	dump_HT_speeds("PCI-X HT Downlink", cfg, freq);
+#endif
+}
+
+#endif /* CONFIG_POWER4 */
+
+/*
+ * Early video resume hook
+ */
+
+static void (*pmac_early_vresume_proc)(void *data);
+static void *pmac_early_vresume_data;
+
+void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
+{
+	if (_machine != _MACH_Pmac)
+		return;
+	preempt_disable();
+	pmac_early_vresume_proc = proc;
+	pmac_early_vresume_data = data;
+	preempt_enable();
+}
+EXPORT_SYMBOL(pmac_set_early_video_resume);
+
+void pmac_call_early_video_resume(void)
+{
+	if (pmac_early_vresume_proc)
+		pmac_early_vresume_proc(pmac_early_vresume_data);
+}
+
+/*
+ * AGP related suspend/resume code
+ */
+
+static struct pci_dev *pmac_agp_bridge;
+static int (*pmac_agp_suspend)(struct pci_dev *bridge);
+static int (*pmac_agp_resume)(struct pci_dev *bridge);
+
+void pmac_register_agp_pm(struct pci_dev *bridge,
+				 int (*suspend)(struct pci_dev *bridge),
+				 int (*resume)(struct pci_dev *bridge))
+{
+	if (suspend || resume) {
+		pmac_agp_bridge = bridge;
+		pmac_agp_suspend = suspend;
+		pmac_agp_resume = resume;
+		return;
+	}
+	if (bridge != pmac_agp_bridge)
+		return;
+	pmac_agp_suspend = pmac_agp_resume = NULL;
+	return;
+}
+EXPORT_SYMBOL(pmac_register_agp_pm);
+
+void pmac_suspend_agp_for_card(struct pci_dev *dev)
+{
+	if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL)
+		return;
+	if (pmac_agp_bridge->bus != dev->bus)
+		return;
+	pmac_agp_suspend(pmac_agp_bridge);
+}
+EXPORT_SYMBOL(pmac_suspend_agp_for_card);
+
+void pmac_resume_agp_for_card(struct pci_dev *dev)
+{
+	if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL)
+		return;
+	if (pmac_agp_bridge->bus != dev->bus)
+		return;
+	pmac_agp_resume(pmac_agp_bridge);
+}
+EXPORT_SYMBOL(pmac_resume_agp_for_card);
diff --git a/arch/powerpc/platforms/powermac/pmac_low_i2c.c b/arch/powerpc/platforms/powermac/pmac_low_i2c.c
new file mode 100644
index 0000000..f3f39e8
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pmac_low_i2c.c
@@ -0,0 +1,523 @@
+/*
+ *  arch/ppc/platforms/pmac_low_i2c.c
+ *
+ *  Copyright (C) 2003 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ *  This file contains some low-level i2c access routines that
+ *  need to be used by various bits of the PowerMac platform code
+ *  at times where the real asynchronous & interrupt driven driver
+ *  cannot be used. The API borrows some semantics from the darwin
+ *  driver in order to ease the implementation of the platform
+ *  properties parser
+ */
+
+#undef DEBUG
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include <asm/keylargo.h>
+#include <asm/uninorth.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/pmac_low_i2c.h>
+
+#define MAX_LOW_I2C_HOST	4
+
+#ifdef DEBUG
+#define DBG(x...) do {\
+		printk(KERN_DEBUG "KW:" x);	\
+	} while(0)
+#else
+#define DBG(x...)
+#endif
+
+struct low_i2c_host;
+
+typedef int (*low_i2c_func_t)(struct low_i2c_host *host, u8 addr, u8 sub, u8 *data, int len);
+
+struct low_i2c_host
+{
+	struct device_node	*np;		/* OF device node */
+	struct semaphore	mutex;		/* Access mutex for use by i2c-keywest */
+	low_i2c_func_t		func;		/* Access function */
+	unsigned int		is_open : 1;	/* Poor man's access control */
+	int			mode;		/* Current mode */
+	int			channel;	/* Current channel */
+	int			num_channels;	/* Number of channels */
+	void __iomem		*base;		/* For keywest-i2c, base address */
+	int			bsteps;		/* And register stepping */
+	int			speed;		/* And speed */
+};
+
+static struct low_i2c_host	low_i2c_hosts[MAX_LOW_I2C_HOST];
+
+/* No locking is necessary on allocation, we are running way before
+ * anything can race with us
+ */
+static struct low_i2c_host *find_low_i2c_host(struct device_node *np)
+{
+	int i;
+
+	for (i = 0; i < MAX_LOW_I2C_HOST; i++)
+		if (low_i2c_hosts[i].np == np)
+			return &low_i2c_hosts[i];
+	return NULL;
+}
+
+/*
+ *
+ * i2c-keywest implementation (UniNorth, U2, U3, Keylargo's)
+ *
+ */
+
+/*
+ * Keywest i2c definitions borrowed from drivers/i2c/i2c-keywest.h,
+ * should be moved somewhere in include/asm-ppc/
+ */
+/* Register indices */
+typedef enum {
+	reg_mode = 0,
+	reg_control,
+	reg_status,
+	reg_isr,
+	reg_ier,
+	reg_addr,
+	reg_subaddr,
+	reg_data
+} reg_t;
+
+
+/* Mode register */
+#define KW_I2C_MODE_100KHZ	0x00
+#define KW_I2C_MODE_50KHZ	0x01
+#define KW_I2C_MODE_25KHZ	0x02
+#define KW_I2C_MODE_DUMB	0x00
+#define KW_I2C_MODE_STANDARD	0x04
+#define KW_I2C_MODE_STANDARDSUB	0x08
+#define KW_I2C_MODE_COMBINED	0x0C
+#define KW_I2C_MODE_MODE_MASK	0x0C
+#define KW_I2C_MODE_CHAN_MASK	0xF0
+
+/* Control register */
+#define KW_I2C_CTL_AAK		0x01
+#define KW_I2C_CTL_XADDR	0x02
+#define KW_I2C_CTL_STOP		0x04
+#define KW_I2C_CTL_START	0x08
+
+/* Status register */
+#define KW_I2C_STAT_BUSY	0x01
+#define KW_I2C_STAT_LAST_AAK	0x02
+#define KW_I2C_STAT_LAST_RW	0x04
+#define KW_I2C_STAT_SDA		0x08
+#define KW_I2C_STAT_SCL		0x10
+
+/* IER & ISR registers */
+#define KW_I2C_IRQ_DATA		0x01
+#define KW_I2C_IRQ_ADDR		0x02
+#define KW_I2C_IRQ_STOP		0x04
+#define KW_I2C_IRQ_START	0x08
+#define KW_I2C_IRQ_MASK		0x0F
+
+/* State machine states */
+enum {
+	state_idle,
+	state_addr,
+	state_read,
+	state_write,
+	state_stop,
+	state_dead
+};
+
+#define WRONG_STATE(name) do {\
+		printk(KERN_DEBUG "KW: wrong state. Got %s, state: %s (isr: %02x)\n", \
+		       name, __kw_state_names[state], isr); \
+	} while(0)
+
+static const char *__kw_state_names[] = {
+	"state_idle",
+	"state_addr",
+	"state_read",
+	"state_write",
+	"state_stop",
+	"state_dead"
+};
+
+static inline u8 __kw_read_reg(struct low_i2c_host *host, reg_t reg)
+{
+	return readb(host->base + (((unsigned int)reg) << host->bsteps));
+}
+
+static inline void __kw_write_reg(struct low_i2c_host *host, reg_t reg, u8 val)
+{
+	writeb(val, host->base + (((unsigned)reg) << host->bsteps));
+	(void)__kw_read_reg(host, reg_subaddr);
+}
+
+#define kw_write_reg(reg, val)	__kw_write_reg(host, reg, val) 
+#define kw_read_reg(reg)	__kw_read_reg(host, reg) 
+
+
+/* Don't schedule, the g5 fan controller is too
+ * timing sensitive
+ */
+static u8 kw_wait_interrupt(struct low_i2c_host* host)
+{
+	int i, j;
+	u8 isr;
+	
+	for (i = 0; i < 100000; i++) {
+		isr = kw_read_reg(reg_isr) & KW_I2C_IRQ_MASK;
+		if (isr != 0)
+			return isr;
+
+		/* This code is used with the timebase frozen, we cannot rely
+		 * on udelay ! For now, just use a bogus loop
+		 */
+		for (j = 1; j < 10000; j++)
+			mb();
+	}
+	return isr;
+}
+
+static int kw_handle_interrupt(struct low_i2c_host *host, int state, int rw, int *rc, u8 **data, int *len, u8 isr)
+{
+	u8 ack;
+
+	DBG("kw_handle_interrupt(%s, isr: %x)\n", __kw_state_names[state], isr);
+
+	if (isr == 0) {
+		if (state != state_stop) {
+			DBG("KW: Timeout !\n");
+			*rc = -EIO;
+			goto stop;
+		}
+		if (state == state_stop) {
+			ack = kw_read_reg(reg_status);
+			if (!(ack & KW_I2C_STAT_BUSY)) {
+				state = state_idle;
+				kw_write_reg(reg_ier, 0x00);
+			}
+		}
+		return state;
+	}
+
+	if (isr & KW_I2C_IRQ_ADDR) {
+		ack = kw_read_reg(reg_status);
+		if (state != state_addr) {
+			kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR);
+			WRONG_STATE("KW_I2C_IRQ_ADDR"); 
+			*rc = -EIO;
+			goto stop;
+		}
+		if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {			
+			*rc = -ENODEV;
+			DBG("KW: NAK on address\n");
+			return state_stop;		     
+		} else {
+			if (rw) {
+				state = state_read;
+				if (*len > 1)
+					kw_write_reg(reg_control, KW_I2C_CTL_AAK);
+			} else {
+				state = state_write;
+				kw_write_reg(reg_data, **data);
+				(*data)++; (*len)--;
+			}
+		}
+		kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR);
+	}
+
+	if (isr & KW_I2C_IRQ_DATA) {
+		if (state == state_read) {
+			**data = kw_read_reg(reg_data);
+			(*data)++; (*len)--;
+			kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
+			if ((*len) == 0)
+				state = state_stop;
+			else if ((*len) == 1)
+				kw_write_reg(reg_control, 0);
+		} else if (state == state_write) {
+			ack = kw_read_reg(reg_status);
+			if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
+				DBG("KW: nack on data write\n");
+				*rc = -EIO;
+				goto stop;
+			} else if (*len) {
+				kw_write_reg(reg_data, **data);
+				(*data)++; (*len)--;
+			} else {
+				kw_write_reg(reg_control, KW_I2C_CTL_STOP);
+				state = state_stop;
+				*rc = 0;
+			}
+			kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
+		} else {
+			kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
+			WRONG_STATE("KW_I2C_IRQ_DATA"); 
+			if (state != state_stop) {
+				*rc = -EIO;
+				goto stop;
+			}
+		}
+	}
+
+	if (isr & KW_I2C_IRQ_STOP) {
+		kw_write_reg(reg_isr, KW_I2C_IRQ_STOP);
+		if (state != state_stop) {
+			WRONG_STATE("KW_I2C_IRQ_STOP");
+			*rc = -EIO;
+		}
+		return state_idle;
+	}
+
+	if (isr & KW_I2C_IRQ_START)
+		kw_write_reg(reg_isr, KW_I2C_IRQ_START);
+
+	return state;
+
+ stop:
+	kw_write_reg(reg_control, KW_I2C_CTL_STOP);	
+	return state_stop;
+}
+
+static int keywest_low_i2c_func(struct low_i2c_host *host, u8 addr, u8 subaddr, u8 *data, int len)
+{
+	u8 mode_reg = host->speed;
+	int state = state_addr;
+	int rc = 0;
+
+	/* Setup mode & subaddress if any */
+	switch(host->mode) {
+	case pmac_low_i2c_mode_dumb:
+		printk(KERN_ERR "low_i2c: Dumb mode not supported !\n");
+		return -EINVAL;
+	case pmac_low_i2c_mode_std:
+		mode_reg |= KW_I2C_MODE_STANDARD;
+		break;
+	case pmac_low_i2c_mode_stdsub:
+		mode_reg |= KW_I2C_MODE_STANDARDSUB;
+		break;
+	case pmac_low_i2c_mode_combined:
+		mode_reg |= KW_I2C_MODE_COMBINED;
+		break;
+	}
+
+	/* Setup channel & clear pending irqs */
+	kw_write_reg(reg_isr, kw_read_reg(reg_isr));
+	kw_write_reg(reg_mode, mode_reg | (host->channel << 4));
+	kw_write_reg(reg_status, 0);
+
+	/* Set up address and r/w bit */
+	kw_write_reg(reg_addr, addr);
+
+	/* Set up the sub address */
+	if ((mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_STANDARDSUB
+	    || (mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_COMBINED)
+		kw_write_reg(reg_subaddr, subaddr);
+
+	/* Start sending address & disable interrupt*/
+	kw_write_reg(reg_ier, 0 /*KW_I2C_IRQ_MASK*/);
+	kw_write_reg(reg_control, KW_I2C_CTL_XADDR);
+
+	/* State machine, to turn into an interrupt handler */
+	while(state != state_idle) {
+		u8 isr = kw_wait_interrupt(host);
+		state = kw_handle_interrupt(host, state, addr & 1, &rc, &data, &len, isr);
+	}
+
+	return rc;
+}
+
+static void keywest_low_i2c_add(struct device_node *np)
+{
+	struct low_i2c_host	*host = find_low_i2c_host(NULL);
+	u32			*psteps, *prate, steps, aoffset = 0;
+	struct device_node	*parent;
+
+	if (host == NULL) {
+		printk(KERN_ERR "low_i2c: Can't allocate host for %s\n",
+		       np->full_name);
+		return;
+	}
+	memset(host, 0, sizeof(*host));
+
+	init_MUTEX(&host->mutex);
+	host->np = of_node_get(np);	
+	psteps = (u32 *)get_property(np, "AAPL,address-step", NULL);
+	steps = psteps ? (*psteps) : 0x10;
+	for (host->bsteps = 0; (steps & 0x01) == 0; host->bsteps++)
+		steps >>= 1;
+	parent = of_get_parent(np);
+	host->num_channels = 1;
+	if (parent && parent->name[0] == 'u') {
+		host->num_channels = 2;
+		aoffset = 3;
+	}
+	/* Select interface rate */
+	host->speed = KW_I2C_MODE_100KHZ;
+	prate = (u32 *)get_property(np, "AAPL,i2c-rate", NULL);
+	if (prate) switch(*prate) {
+	case 100:
+		host->speed = KW_I2C_MODE_100KHZ;
+		break;
+	case 50:
+		host->speed = KW_I2C_MODE_50KHZ;
+		break;
+	case 25:
+		host->speed = KW_I2C_MODE_25KHZ;
+		break;
+	}	
+
+	host->mode = pmac_low_i2c_mode_std;
+	host->base = ioremap(np->addrs[0].address + aoffset,
+						np->addrs[0].size);
+	host->func = keywest_low_i2c_func;
+}
+
+/*
+ *
+ * PMU implementation
+ *
+ */
+
+
+#ifdef CONFIG_ADB_PMU
+
+static int pmu_low_i2c_func(struct low_i2c_host *host, u8 addr, u8 sub, u8 *data, int len)
+{
+	// TODO
+	return -ENODEV;
+}
+
+static void pmu_low_i2c_add(struct device_node *np)
+{
+	struct low_i2c_host	*host = find_low_i2c_host(NULL);
+
+	if (host == NULL) {
+		printk(KERN_ERR "low_i2c: Can't allocate host for %s\n",
+		       np->full_name);
+		return;
+	}
+	memset(host, 0, sizeof(*host));
+
+	init_MUTEX(&host->mutex);
+	host->np = of_node_get(np);	
+	host->num_channels = 3;
+	host->mode = pmac_low_i2c_mode_std;
+	host->func = pmu_low_i2c_func;
+}
+
+#endif /* CONFIG_ADB_PMU */
+
+void __init pmac_init_low_i2c(void)
+{
+	struct device_node *np;
+
+	/* Probe keywest-i2c busses */
+	np = of_find_compatible_node(NULL, "i2c", "keywest-i2c");
+	while(np) {
+		keywest_low_i2c_add(np);
+		np = of_find_compatible_node(np, "i2c", "keywest-i2c");
+	}
+
+#ifdef CONFIG_ADB_PMU
+	/* Probe PMU busses */
+	np = of_find_node_by_name(NULL, "via-pmu");
+	if (np)
+		pmu_low_i2c_add(np);
+#endif /* CONFIG_ADB_PMU */
+
+	/* TODO: Add CUDA support as well */
+}
+
+int pmac_low_i2c_lock(struct device_node *np)
+{
+	struct low_i2c_host *host = find_low_i2c_host(np);
+
+	if (!host)
+		return -ENODEV;
+	down(&host->mutex);
+	return 0;
+}
+EXPORT_SYMBOL(pmac_low_i2c_lock);
+
+int pmac_low_i2c_unlock(struct device_node *np)
+{
+	struct low_i2c_host *host = find_low_i2c_host(np);
+
+	if (!host)
+		return -ENODEV;
+	up(&host->mutex);
+	return 0;
+}
+EXPORT_SYMBOL(pmac_low_i2c_unlock);
+
+
+int pmac_low_i2c_open(struct device_node *np, int channel)
+{
+	struct low_i2c_host *host = find_low_i2c_host(np);
+
+	if (!host)
+		return -ENODEV;
+
+	if (channel >= host->num_channels)
+		return -EINVAL;
+
+	down(&host->mutex);
+	host->is_open = 1;
+	host->channel = channel;
+
+	return 0;
+}
+EXPORT_SYMBOL(pmac_low_i2c_open);
+
+int pmac_low_i2c_close(struct device_node *np)
+{
+	struct low_i2c_host *host = find_low_i2c_host(np);
+
+	if (!host)
+		return -ENODEV;
+
+	host->is_open = 0;
+	up(&host->mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL(pmac_low_i2c_close);
+
+int pmac_low_i2c_setmode(struct device_node *np, int mode)
+{
+	struct low_i2c_host *host = find_low_i2c_host(np);
+
+	if (!host)
+		return -ENODEV;
+	WARN_ON(!host->is_open);
+	host->mode = mode;
+
+	return 0;
+}
+EXPORT_SYMBOL(pmac_low_i2c_setmode);
+
+int pmac_low_i2c_xfer(struct device_node *np, u8 addrdir, u8 subaddr, u8 *data, int len)
+{
+	struct low_i2c_host *host = find_low_i2c_host(np);
+
+	if (!host)
+		return -ENODEV;
+	WARN_ON(!host->is_open);
+
+	return host->func(host, addrdir, subaddr, data, len);
+}
+EXPORT_SYMBOL(pmac_low_i2c_xfer);
+
diff --git a/arch/powerpc/platforms/powermac/pmac_nvram.c b/arch/powerpc/platforms/powermac/pmac_nvram.c
new file mode 100644
index 0000000..8c9b008
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pmac_nvram.c
@@ -0,0 +1,584 @@
+/*
+ *  arch/ppc/platforms/pmac_nvram.c
+ *
+ *  Copyright (C) 2002 Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ *  Todo: - add support for the OF persistent properties
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/nvram.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include <linux/bootmem.h>
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/nvram.h>
+
+#define DEBUG
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+#define NVRAM_SIZE		0x2000	/* 8kB of non-volatile RAM */
+
+#define CORE99_SIGNATURE	0x5a
+#define CORE99_ADLER_START	0x14
+
+/* On Core99, nvram is either a sharp, a micron or an AMD flash */
+#define SM_FLASH_STATUS_DONE	0x80
+#define SM_FLASH_STATUS_ERR		0x38
+#define SM_FLASH_CMD_ERASE_CONFIRM	0xd0
+#define SM_FLASH_CMD_ERASE_SETUP	0x20
+#define SM_FLASH_CMD_RESET		0xff
+#define SM_FLASH_CMD_WRITE_SETUP	0x40
+#define SM_FLASH_CMD_CLEAR_STATUS	0x50
+#define SM_FLASH_CMD_READ_STATUS	0x70
+
+/* CHRP NVRAM header */
+struct chrp_header {
+  u8		signature;
+  u8		cksum;
+  u16		len;
+  char          name[12];
+  u8		data[0];
+};
+
+struct core99_header {
+  struct chrp_header	hdr;
+  u32			adler;
+  u32			generation;
+  u32			reserved[2];
+};
+
+/*
+ * Read and write the non-volatile RAM on PowerMacs and CHRP machines.
+ */
+static int nvram_naddrs;
+static volatile unsigned char *nvram_addr;
+static volatile unsigned char *nvram_data;
+static int nvram_mult, is_core_99;
+static int core99_bank = 0;
+static int nvram_partitions[3];
+static DEFINE_SPINLOCK(nv_lock);
+
+extern int pmac_newworld;
+extern int system_running;
+
+static int (*core99_write_bank)(int bank, u8* datas);
+static int (*core99_erase_bank)(int bank);
+
+static char *nvram_image;
+
+
+static unsigned char core99_nvram_read_byte(int addr)
+{
+	if (nvram_image == NULL)
+		return 0xff;
+	return nvram_image[addr];
+}
+
+static void core99_nvram_write_byte(int addr, unsigned char val)
+{
+	if (nvram_image == NULL)
+		return;
+	nvram_image[addr] = val;
+}
+
+
+static unsigned char direct_nvram_read_byte(int addr)
+{
+	return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]);
+}
+
+static void direct_nvram_write_byte(int addr, unsigned char val)
+{
+	out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val);
+}
+
+
+static unsigned char indirect_nvram_read_byte(int addr)
+{
+	unsigned char val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&nv_lock, flags);
+	out_8(nvram_addr, addr >> 5);
+	val = in_8(&nvram_data[(addr & 0x1f) << 4]);
+	spin_unlock_irqrestore(&nv_lock, flags);
+
+	return val;
+}
+
+static void indirect_nvram_write_byte(int addr, unsigned char val)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&nv_lock, flags);
+	out_8(nvram_addr, addr >> 5);
+	out_8(&nvram_data[(addr & 0x1f) << 4], val);
+	spin_unlock_irqrestore(&nv_lock, flags);
+}
+
+
+#ifdef CONFIG_ADB_PMU
+
+static void pmu_nvram_complete(struct adb_request *req)
+{
+	if (req->arg)
+		complete((struct completion *)req->arg);
+}
+
+static unsigned char pmu_nvram_read_byte(int addr)
+{
+	struct adb_request req;
+	DECLARE_COMPLETION(req_complete); 
+	
+	req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
+	if (pmu_request(&req, pmu_nvram_complete, 3, PMU_READ_NVRAM,
+			(addr >> 8) & 0xff, addr & 0xff))
+		return 0xff;
+	if (system_state == SYSTEM_RUNNING)
+		wait_for_completion(&req_complete);
+	while (!req.complete)
+		pmu_poll();
+	return req.reply[0];
+}
+
+static void pmu_nvram_write_byte(int addr, unsigned char val)
+{
+	struct adb_request req;
+	DECLARE_COMPLETION(req_complete); 
+	
+	req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
+	if (pmu_request(&req, pmu_nvram_complete, 4, PMU_WRITE_NVRAM,
+			(addr >> 8) & 0xff, addr & 0xff, val))
+		return;
+	if (system_state == SYSTEM_RUNNING)
+		wait_for_completion(&req_complete);
+	while (!req.complete)
+		pmu_poll();
+}
+
+#endif /* CONFIG_ADB_PMU */
+
+
+static u8 chrp_checksum(struct chrp_header* hdr)
+{
+	u8 *ptr;
+	u16 sum = hdr->signature;
+	for (ptr = (u8 *)&hdr->len; ptr < hdr->data; ptr++)
+		sum += *ptr;
+	while (sum > 0xFF)
+		sum = (sum & 0xFF) + (sum>>8);
+	return sum;
+}
+
+static u32 core99_calc_adler(u8 *buffer)
+{
+	int cnt;
+	u32 low, high;
+
+   	buffer += CORE99_ADLER_START;
+	low = 1;
+	high = 0;
+	for (cnt=0; cnt<(NVRAM_SIZE-CORE99_ADLER_START); cnt++) {
+		if ((cnt % 5000) == 0) {
+			high  %= 65521UL;
+			high %= 65521UL;
+		}
+		low += buffer[cnt];
+		high += low;
+	}
+	low  %= 65521UL;
+	high %= 65521UL;
+
+	return (high << 16) | low;
+}
+
+static u32 core99_check(u8* datas)
+{
+	struct core99_header* hdr99 = (struct core99_header*)datas;
+
+	if (hdr99->hdr.signature != CORE99_SIGNATURE) {
+		DBG("Invalid signature\n");
+		return 0;
+	}
+	if (hdr99->hdr.cksum != chrp_checksum(&hdr99->hdr)) {
+		DBG("Invalid checksum\n");
+		return 0;
+	}
+	if (hdr99->adler != core99_calc_adler(datas)) {
+		DBG("Invalid adler\n");
+		return 0;
+	}
+	return hdr99->generation;
+}
+
+static int sm_erase_bank(int bank)
+{
+	int stat, i;
+	unsigned long timeout;
+
+	u8* base = (u8 *)nvram_data + core99_bank*NVRAM_SIZE;
+
+       	DBG("nvram: Sharp/Micron Erasing bank %d...\n", bank);
+
+	out_8(base, SM_FLASH_CMD_ERASE_SETUP);
+	out_8(base, SM_FLASH_CMD_ERASE_CONFIRM);
+	timeout = 0;
+	do {
+		if (++timeout > 1000000) {
+			printk(KERN_ERR "nvram: Sharp/Miron flash erase timeout !\n");
+			break;
+		}
+		out_8(base, SM_FLASH_CMD_READ_STATUS);
+		stat = in_8(base);
+	} while (!(stat & SM_FLASH_STATUS_DONE));
+
+	out_8(base, SM_FLASH_CMD_CLEAR_STATUS);
+	out_8(base, SM_FLASH_CMD_RESET);
+
+	for (i=0; i<NVRAM_SIZE; i++)
+		if (base[i] != 0xff) {
+			printk(KERN_ERR "nvram: Sharp/Micron flash erase failed !\n");
+			return -ENXIO;
+		}
+	return 0;
+}
+
+static int sm_write_bank(int bank, u8* datas)
+{
+	int i, stat = 0;
+	unsigned long timeout;
+
+	u8* base = (u8 *)nvram_data + core99_bank*NVRAM_SIZE;
+
+       	DBG("nvram: Sharp/Micron Writing bank %d...\n", bank);
+
+	for (i=0; i<NVRAM_SIZE; i++) {
+		out_8(base+i, SM_FLASH_CMD_WRITE_SETUP);
+		udelay(1);
+		out_8(base+i, datas[i]);
+		timeout = 0;
+		do {
+			if (++timeout > 1000000) {
+				printk(KERN_ERR "nvram: Sharp/Micron flash write timeout !\n");
+				break;
+			}
+			out_8(base, SM_FLASH_CMD_READ_STATUS);
+			stat = in_8(base);
+		} while (!(stat & SM_FLASH_STATUS_DONE));
+		if (!(stat & SM_FLASH_STATUS_DONE))
+			break;
+	}
+	out_8(base, SM_FLASH_CMD_CLEAR_STATUS);
+	out_8(base, SM_FLASH_CMD_RESET);
+	for (i=0; i<NVRAM_SIZE; i++)
+		if (base[i] != datas[i]) {
+			printk(KERN_ERR "nvram: Sharp/Micron flash write failed !\n");
+			return -ENXIO;
+		}
+	return 0;
+}
+
+static int amd_erase_bank(int bank)
+{
+	int i, stat = 0;
+	unsigned long timeout;
+
+	u8* base = (u8 *)nvram_data + core99_bank*NVRAM_SIZE;
+
+       	DBG("nvram: AMD Erasing bank %d...\n", bank);
+
+	/* Unlock 1 */
+	out_8(base+0x555, 0xaa);
+	udelay(1);
+	/* Unlock 2 */
+	out_8(base+0x2aa, 0x55);
+	udelay(1);
+
+	/* Sector-Erase */
+	out_8(base+0x555, 0x80);
+	udelay(1);
+	out_8(base+0x555, 0xaa);
+	udelay(1);
+	out_8(base+0x2aa, 0x55);
+	udelay(1);
+	out_8(base, 0x30);
+	udelay(1);
+
+	timeout = 0;
+	do {
+		if (++timeout > 1000000) {
+			printk(KERN_ERR "nvram: AMD flash erase timeout !\n");
+			break;
+		}
+		stat = in_8(base) ^ in_8(base);
+	} while (stat != 0);
+	
+	/* Reset */
+	out_8(base, 0xf0);
+	udelay(1);
+	
+	for (i=0; i<NVRAM_SIZE; i++)
+		if (base[i] != 0xff) {
+			printk(KERN_ERR "nvram: AMD flash erase failed !\n");
+			return -ENXIO;
+		}
+	return 0;
+}
+
+static int amd_write_bank(int bank, u8* datas)
+{
+	int i, stat = 0;
+	unsigned long timeout;
+
+	u8* base = (u8 *)nvram_data + core99_bank*NVRAM_SIZE;
+
+       	DBG("nvram: AMD Writing bank %d...\n", bank);
+
+	for (i=0; i<NVRAM_SIZE; i++) {
+		/* Unlock 1 */
+		out_8(base+0x555, 0xaa);
+		udelay(1);
+		/* Unlock 2 */
+		out_8(base+0x2aa, 0x55);
+		udelay(1);
+
+		/* Write single word */
+		out_8(base+0x555, 0xa0);
+		udelay(1);
+		out_8(base+i, datas[i]);
+		
+		timeout = 0;
+		do {
+			if (++timeout > 1000000) {
+				printk(KERN_ERR "nvram: AMD flash write timeout !\n");
+				break;
+			}
+			stat = in_8(base) ^ in_8(base);
+		} while (stat != 0);
+		if (stat != 0)
+			break;
+	}
+
+	/* Reset */
+	out_8(base, 0xf0);
+	udelay(1);
+
+	for (i=0; i<NVRAM_SIZE; i++)
+		if (base[i] != datas[i]) {
+			printk(KERN_ERR "nvram: AMD flash write failed !\n");
+			return -ENXIO;
+		}
+	return 0;
+}
+
+static void __init lookup_partitions(void)
+{
+	u8 buffer[17];
+	int i, offset;
+	struct chrp_header* hdr;
+
+	if (pmac_newworld) {
+		nvram_partitions[pmac_nvram_OF] = -1;
+		nvram_partitions[pmac_nvram_XPRAM] = -1;
+		nvram_partitions[pmac_nvram_NR] = -1;
+		hdr = (struct chrp_header *)buffer;
+
+		offset = 0;
+		buffer[16] = 0;
+		do {
+			for (i=0;i<16;i++)
+				buffer[i] = nvram_read_byte(offset+i);
+			if (!strcmp(hdr->name, "common"))
+				nvram_partitions[pmac_nvram_OF] = offset + 0x10;
+			if (!strcmp(hdr->name, "APL,MacOS75")) {
+				nvram_partitions[pmac_nvram_XPRAM] = offset + 0x10;
+				nvram_partitions[pmac_nvram_NR] = offset + 0x110;
+			}
+			offset += (hdr->len * 0x10);
+		} while(offset < NVRAM_SIZE);
+	} else {
+		nvram_partitions[pmac_nvram_OF] = 0x1800;
+		nvram_partitions[pmac_nvram_XPRAM] = 0x1300;
+		nvram_partitions[pmac_nvram_NR] = 0x1400;
+	}
+	DBG("nvram: OF partition at 0x%x\n", nvram_partitions[pmac_nvram_OF]);
+	DBG("nvram: XP partition at 0x%x\n", nvram_partitions[pmac_nvram_XPRAM]);
+	DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]);
+}
+
+static void core99_nvram_sync(void)
+{
+	struct core99_header* hdr99;
+	unsigned long flags;
+
+	if (!is_core_99 || !nvram_data || !nvram_image)
+		return;
+
+	spin_lock_irqsave(&nv_lock, flags);
+	if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE,
+		NVRAM_SIZE))
+		goto bail;
+
+	DBG("Updating nvram...\n");
+
+	hdr99 = (struct core99_header*)nvram_image;
+	hdr99->generation++;
+	hdr99->hdr.signature = CORE99_SIGNATURE;
+	hdr99->hdr.cksum = chrp_checksum(&hdr99->hdr);
+	hdr99->adler = core99_calc_adler(nvram_image);
+	core99_bank = core99_bank ? 0 : 1;
+	if (core99_erase_bank)
+		if (core99_erase_bank(core99_bank)) {
+			printk("nvram: Error erasing bank %d\n", core99_bank);
+			goto bail;
+		}
+	if (core99_write_bank)
+		if (core99_write_bank(core99_bank, nvram_image))
+			printk("nvram: Error writing bank %d\n", core99_bank);
+ bail:
+	spin_unlock_irqrestore(&nv_lock, flags);
+
+#ifdef DEBUG
+       	mdelay(2000);
+#endif
+}
+
+void __init pmac_nvram_init(void)
+{
+	struct device_node *dp;
+
+	nvram_naddrs = 0;
+
+	dp = find_devices("nvram");
+	if (dp == NULL) {
+		printk(KERN_ERR "Can't find NVRAM device\n");
+		return;
+	}
+	nvram_naddrs = dp->n_addrs;
+	is_core_99 = device_is_compatible(dp, "nvram,flash");
+	if (is_core_99) {
+		int i;
+		u32 gen_bank0, gen_bank1;
+
+		if (nvram_naddrs < 1) {
+			printk(KERN_ERR "nvram: no address\n");
+			return;
+		}
+		nvram_image = alloc_bootmem(NVRAM_SIZE);
+		if (nvram_image == NULL) {
+			printk(KERN_ERR "nvram: can't allocate ram image\n");
+			return;
+		}
+		nvram_data = ioremap(dp->addrs[0].address, NVRAM_SIZE*2);
+		nvram_naddrs = 1; /* Make sure we get the correct case */
+
+		DBG("nvram: Checking bank 0...\n");
+
+		gen_bank0 = core99_check((u8 *)nvram_data);
+		gen_bank1 = core99_check((u8 *)nvram_data + NVRAM_SIZE);
+		core99_bank = (gen_bank0 < gen_bank1) ? 1 : 0;
+
+		DBG("nvram: gen0=%d, gen1=%d\n", gen_bank0, gen_bank1);
+		DBG("nvram: Active bank is: %d\n", core99_bank);
+
+		for (i=0; i<NVRAM_SIZE; i++)
+			nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE];
+
+		ppc_md.nvram_read_val	= core99_nvram_read_byte;
+		ppc_md.nvram_write_val	= core99_nvram_write_byte;
+		ppc_md.nvram_sync	= core99_nvram_sync;
+		/* 
+		 * Maybe we could be smarter here though making an exclusive list
+		 * of known flash chips is a bit nasty as older OF didn't provide us
+		 * with a useful "compatible" entry. A solution would be to really
+		 * identify the chip using flash id commands and base ourselves on
+		 * a list of known chips IDs
+		 */
+		if (device_is_compatible(dp, "amd-0137")) {
+			core99_erase_bank = amd_erase_bank;
+			core99_write_bank = amd_write_bank;
+		} else {
+			core99_erase_bank = sm_erase_bank;
+			core99_write_bank = sm_write_bank;
+		}
+	} else if (_machine == _MACH_chrp && nvram_naddrs == 1) {
+		nvram_data = ioremap(dp->addrs[0].address + isa_mem_base,
+				     dp->addrs[0].size);
+		nvram_mult = 1;
+		ppc_md.nvram_read_val	= direct_nvram_read_byte;
+		ppc_md.nvram_write_val	= direct_nvram_write_byte;
+	} else if (nvram_naddrs == 1) {
+		nvram_data = ioremap(dp->addrs[0].address, dp->addrs[0].size);
+		nvram_mult = (dp->addrs[0].size + NVRAM_SIZE - 1) / NVRAM_SIZE;
+		ppc_md.nvram_read_val	= direct_nvram_read_byte;
+		ppc_md.nvram_write_val	= direct_nvram_write_byte;
+	} else if (nvram_naddrs == 2) {
+		nvram_addr = ioremap(dp->addrs[0].address, dp->addrs[0].size);
+		nvram_data = ioremap(dp->addrs[1].address, dp->addrs[1].size);
+		ppc_md.nvram_read_val	= indirect_nvram_read_byte;
+		ppc_md.nvram_write_val	= indirect_nvram_write_byte;
+	} else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) {
+#ifdef CONFIG_ADB_PMU
+		nvram_naddrs = -1;
+		ppc_md.nvram_read_val	= pmu_nvram_read_byte;
+		ppc_md.nvram_write_val	= pmu_nvram_write_byte;
+#endif /* CONFIG_ADB_PMU */
+	} else {
+		printk(KERN_ERR "Don't know how to access NVRAM with %d addresses\n",
+		       nvram_naddrs);
+	}
+	lookup_partitions();
+}
+
+int pmac_get_partition(int partition)
+{
+	return nvram_partitions[partition];
+}
+
+u8 pmac_xpram_read(int xpaddr)
+{
+	int offset = nvram_partitions[pmac_nvram_XPRAM];
+
+	if (offset < 0)
+		return 0xff;
+
+	return ppc_md.nvram_read_val(xpaddr + offset);
+}
+
+void pmac_xpram_write(int xpaddr, u8 data)
+{
+	int offset = nvram_partitions[pmac_nvram_XPRAM];
+
+	if (offset < 0)
+		return;
+
+	ppc_md.nvram_write_val(xpaddr + offset, data);
+}
+
+EXPORT_SYMBOL(pmac_get_partition);
+EXPORT_SYMBOL(pmac_xpram_read);
+EXPORT_SYMBOL(pmac_xpram_write);
diff --git a/arch/powerpc/platforms/powermac/pmac_pci.c b/arch/powerpc/platforms/powermac/pmac_pci.c
new file mode 100644
index 0000000..40bcd3e
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pmac_pci.c
@@ -0,0 +1,1341 @@
+/*
+ * Support for PCI bridges found on Power Macintoshes.
+ * At present the "bandit" and "chaos" bridges are supported.
+ * Fortunately you access configuration space in the same
+ * way with either bridge.
+ *
+ * Copyright (C) 2003 Benjamin Herrenschmuidt (benh@kernel.crashing.org)
+ * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+static int add_bridge(struct device_node *dev);
+extern void pmac_check_ht_link(void);
+
+/* XXX Could be per-controller, but I don't think we risk anything by
+ * assuming we won't have both UniNorth and Bandit */
+static int has_uninorth;
+#ifdef CONFIG_POWER4
+static struct pci_controller *u3_agp;
+#endif /* CONFIG_POWER4 */
+
+extern u8 pci_cache_line_size;
+extern int pcibios_assign_bus_offset;
+
+struct device_node *k2_skiplist[2];
+
+/*
+ * Magic constants for enabling cache coherency in the bandit/PSX bridge.
+ */
+#define BANDIT_DEVID_2	8
+#define BANDIT_REVID	3
+
+#define BANDIT_DEVNUM	11
+#define BANDIT_MAGIC	0x50
+#define BANDIT_COHERENT	0x40
+
+static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
+{
+	for (; node != 0;node = node->sibling) {
+		int * bus_range;
+		unsigned int *class_code;
+		int len;
+
+		/* For PCI<->PCI bridges or CardBus bridges, we go down */
+		class_code = (unsigned int *) get_property(node, "class-code", NULL);
+		if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
+			(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
+			continue;
+		bus_range = (int *) get_property(node, "bus-range", &len);
+		if (bus_range != NULL && len > 2 * sizeof(int)) {
+			if (bus_range[1] > higher)
+				higher = bus_range[1];
+		}
+		higher = fixup_one_level_bus_range(node->child, higher);
+	}
+	return higher;
+}
+
+/* This routine fixes the "bus-range" property of all bridges in the
+ * system since they tend to have their "last" member wrong on macs
+ *
+ * Note that the bus numbers manipulated here are OF bus numbers, they
+ * are not Linux bus numbers.
+ */
+static void __init fixup_bus_range(struct device_node *bridge)
+{
+	int * bus_range;
+	int len;
+
+	/* Lookup the "bus-range" property for the hose */
+	bus_range = (int *) get_property(bridge, "bus-range", &len);
+	if (bus_range == NULL || len < 2 * sizeof(int)) {
+		printk(KERN_WARNING "Can't get bus-range for %s\n",
+			       bridge->full_name);
+		return;
+	}
+	bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]);
+}
+
+/*
+ * Apple MacRISC (U3, UniNorth, Bandit, Chaos) PCI controllers.
+ *
+ * The "Bandit" version is present in all early PCI PowerMacs,
+ * and up to the first ones using Grackle. Some machines may
+ * have 2 bandit controllers (2 PCI busses).
+ *
+ * "Chaos" is used in some "Bandit"-type machines as a bridge
+ * for the separate display bus. It is accessed the same
+ * way as bandit, but cannot be probed for devices. It therefore
+ * has its own config access functions.
+ *
+ * The "UniNorth" version is present in all Core99 machines
+ * (iBook, G4, new IMacs, and all the recent Apple machines).
+ * It contains 3 controllers in one ASIC.
+ *
+ * The U3 is the bridge used on G5 machines. It contains an
+ * AGP bus which is dealt with the old UniNorth access routines
+ * and a HyperTransport bus which uses its own set of access
+ * functions.
+ */
+
+#define MACRISC_CFA0(devfn, off)	\
+	((1 << (unsigned long)PCI_SLOT(dev_fn)) \
+	| (((unsigned long)PCI_FUNC(dev_fn)) << 8) \
+	| (((unsigned long)(off)) & 0xFCUL))
+
+#define MACRISC_CFA1(bus, devfn, off)	\
+	((((unsigned long)(bus)) << 16) \
+	|(((unsigned long)(devfn)) << 8) \
+	|(((unsigned long)(off)) & 0xFCUL) \
+	|1UL)
+
+static unsigned long macrisc_cfg_access(struct pci_controller* hose,
+					       u8 bus, u8 dev_fn, u8 offset)
+{
+	unsigned int caddr;
+
+	if (bus == hose->first_busno) {
+		if (dev_fn < (11 << 3))
+			return 0;
+		caddr = MACRISC_CFA0(dev_fn, offset);
+	} else
+		caddr = MACRISC_CFA1(bus, dev_fn, offset);
+
+	/* Uninorth will return garbage if we don't read back the value ! */
+	do {
+		out_le32(hose->cfg_addr, caddr);
+	} while (in_le32(hose->cfg_addr) != caddr);
+
+	offset &= has_uninorth ? 0x07 : 0x03;
+	return ((unsigned long)hose->cfg_data) + offset;
+}
+
+static int macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
+				      int offset, int len, u32 *val)
+{
+	struct pci_controller *hose = bus->sysdata;
+	unsigned long addr;
+
+	addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
+	if (!addr)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	/*
+	 * Note: the caller has already checked that offset is
+	 * suitably aligned and that len is 1, 2 or 4.
+	 */
+	switch (len) {
+	case 1:
+		*val = in_8((u8 *)addr);
+		break;
+	case 2:
+		*val = in_le16((u16 *)addr);
+		break;
+	default:
+		*val = in_le32((u32 *)addr);
+		break;
+	}
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
+				       int offset, int len, u32 val)
+{
+	struct pci_controller *hose = bus->sysdata;
+	unsigned long addr;
+
+	addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
+	if (!addr)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	/*
+	 * Note: the caller has already checked that offset is
+	 * suitably aligned and that len is 1, 2 or 4.
+	 */
+	switch (len) {
+	case 1:
+		out_8((u8 *)addr, val);
+		(void) in_8((u8 *)addr);
+		break;
+	case 2:
+		out_le16((u16 *)addr, val);
+		(void) in_le16((u16 *)addr);
+		break;
+	default:
+		out_le32((u32 *)addr, val);
+		(void) in_le32((u32 *)addr);
+		break;
+	}
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops macrisc_pci_ops =
+{
+	macrisc_read_config,
+	macrisc_write_config
+};
+
+/*
+ * Verifiy that a specific (bus, dev_fn) exists on chaos
+ */
+static int
+chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
+{
+	struct device_node *np;
+	u32 *vendor, *device;
+
+	np = pci_busdev_to_OF_node(bus, devfn);
+	if (np == NULL)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	vendor = (u32 *)get_property(np, "vendor-id", NULL);
+	device = (u32 *)get_property(np, "device-id", NULL);
+	if (vendor == NULL || device == NULL)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	if ((*vendor == 0x106b) && (*device == 3) && (offset >= 0x10)
+	    && (offset != 0x14) && (offset != 0x18) && (offset <= 0x24))
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
+		  int len, u32 *val)
+{
+	int result = chaos_validate_dev(bus, devfn, offset);
+	if (result == PCIBIOS_BAD_REGISTER_NUMBER)
+		*val = ~0U;
+	if (result != PCIBIOS_SUCCESSFUL)
+		return result;
+	return macrisc_read_config(bus, devfn, offset, len, val);
+}
+
+static int
+chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
+		   int len, u32 val)
+{
+	int result = chaos_validate_dev(bus, devfn, offset);
+	if (result != PCIBIOS_SUCCESSFUL)
+		return result;
+	return macrisc_write_config(bus, devfn, offset, len, val);
+}
+
+static struct pci_ops chaos_pci_ops =
+{
+	chaos_read_config,
+	chaos_write_config
+};
+
+#ifdef CONFIG_POWER4
+
+/*
+ * These versions of U3 HyperTransport config space access ops do not
+ * implement self-view of the HT host yet
+ */
+
+/*
+ * This function deals with some "special cases" devices.
+ *
+ *  0 -> No special case
+ *  1 -> Skip the device but act as if the access was successfull
+ *       (return 0xff's on reads, eventually, cache config space
+ *       accesses in a later version)
+ * -1 -> Hide the device (unsuccessful acess)
+ */
+static int u3_ht_skip_device(struct pci_controller *hose,
+			     struct pci_bus *bus, unsigned int devfn)
+{
+	struct device_node *busdn, *dn;
+	int i;
+
+	/* We only allow config cycles to devices that are in OF device-tree
+	 * as we are apparently having some weird things going on with some
+	 * revs of K2 on recent G5s
+	 */
+	if (bus->self)
+		busdn = pci_device_to_OF_node(bus->self);
+	else
+		busdn = hose->arch_data;
+	for (dn = busdn->child; dn; dn = dn->sibling)
+		if (dn->data && PCI_DN(dn)->devfn == devfn)
+			break;
+	if (dn == NULL)
+		return -1;
+
+	/*
+	 * When a device in K2 is powered down, we die on config
+	 * cycle accesses. Fix that here.
+	 */
+	for (i=0; i<2; i++)
+		if (k2_skiplist[i] == dn)
+			return 1;
+
+	return 0;
+}
+
+#define U3_HT_CFA0(devfn, off)		\
+		((((unsigned long)devfn) << 8) | offset)
+#define U3_HT_CFA1(bus, devfn, off)	\
+		(U3_HT_CFA0(devfn, off) \
+		+ (((unsigned long)bus) << 16) \
+		+ 0x01000000UL)
+
+static unsigned long u3_ht_cfg_access(struct pci_controller* hose,
+					     u8 bus, u8 devfn, u8 offset)
+{
+	if (bus == hose->first_busno) {
+		/* For now, we don't self probe U3 HT bridge */
+		if (PCI_SLOT(devfn) == 0)
+			return 0;
+		return ((unsigned long)hose->cfg_data) + U3_HT_CFA0(devfn, offset);
+	} else
+		return ((unsigned long)hose->cfg_data) + U3_HT_CFA1(bus, devfn, offset);
+}
+
+static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
+				    int offset, int len, u32 *val)
+{
+	struct pci_controller *hose = bus->sysdata;
+	unsigned long addr;
+
+	struct device_node *np = pci_busdev_to_OF_node(bus, devfn);
+	if (np == NULL)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
+	if (!addr)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	switch (u3_ht_skip_device(hose, bus, devfn)) {
+	case 0:
+		break;
+	case 1:
+			switch (len) {
+			case 1:
+				*val = 0xff; break;
+			case 2:
+				*val = 0xffff; break;
+			default:
+				*val = 0xfffffffful; break;
+			}
+			return PCIBIOS_SUCCESSFUL;
+	default:
+		return PCIBIOS_DEVICE_NOT_FOUND;
+		}
+	    
+	/*
+	 * Note: the caller has already checked that offset is
+	 * suitably aligned and that len is 1, 2 or 4.
+	 */
+	switch (len) {
+	case 1:
+		*val = in_8((u8 *)addr);
+		break;
+	case 2:
+		*val = in_le16((u16 *)addr);
+		break;
+	default:
+		*val = in_le32((u32 *)addr);
+		break;
+	}
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
+				     int offset, int len, u32 val)
+{
+	struct pci_controller *hose = bus->sysdata;
+	unsigned long addr;
+
+	struct device_node *np = pci_busdev_to_OF_node(bus, devfn);
+	if (np == NULL)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
+	if (!addr)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	switch (u3_ht_skip_device(hose, bus, devfn)) {
+	case 0:
+		break;
+	case 1:
+		return PCIBIOS_SUCCESSFUL;
+	default:
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	/*
+	 * Note: the caller has already checked that offset is
+	 * suitably aligned and that len is 1, 2 or 4.
+	 */
+	switch (len) {
+	case 1:
+		out_8((u8 *)addr, val);
+		(void) in_8((u8 *)addr);
+		break;
+	case 2:
+		out_le16((u16 *)addr, val);
+		(void) in_le16((u16 *)addr);
+		break;
+	default:
+		out_le32((u32 *)addr, val);
+		(void) in_le32((u32 *)addr);
+		break;
+	}
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops u3_ht_pci_ops =
+{
+	u3_ht_read_config,
+	u3_ht_write_config
+};
+
+#endif /* CONFIG_POWER4 */
+
+/*
+ * For a bandit bridge, turn on cache coherency if necessary.
+ * N.B. we could clean this up using the hose ops directly.
+ */
+static void __init
+init_bandit(struct pci_controller *bp)
+{
+	unsigned int vendev, magic;
+	int rev;
+
+	/* read the word at offset 0 in config space for device 11 */
+	out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + PCI_VENDOR_ID);
+	udelay(2);
+	vendev = in_le32(bp->cfg_data);
+	if (vendev == (PCI_DEVICE_ID_APPLE_BANDIT << 16) +
+			PCI_VENDOR_ID_APPLE) {
+		/* read the revision id */
+		out_le32(bp->cfg_addr,
+			 (1UL << BANDIT_DEVNUM) + PCI_REVISION_ID);
+		udelay(2);
+		rev = in_8(bp->cfg_data);
+		if (rev != BANDIT_REVID)
+			printk(KERN_WARNING
+			       "Unknown revision %d for bandit\n", rev);
+	} else if (vendev != (BANDIT_DEVID_2 << 16) + PCI_VENDOR_ID_APPLE) {
+		printk(KERN_WARNING "bandit isn't? (%x)\n", vendev);
+		return;
+	}
+
+	/* read the word at offset 0x50 */
+	out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + BANDIT_MAGIC);
+	udelay(2);
+	magic = in_le32(bp->cfg_data);
+	if ((magic & BANDIT_COHERENT) != 0)
+		return;
+	magic |= BANDIT_COHERENT;
+	udelay(2);
+	out_le32(bp->cfg_data, magic);
+	printk(KERN_INFO "Cache coherency enabled for bandit/PSX\n");
+}
+
+
+/*
+ * Tweak the PCI-PCI bridge chip on the blue & white G3s.
+ */
+static void __init
+init_p2pbridge(void)
+{
+	struct device_node *p2pbridge;
+	struct pci_controller* hose;
+	u8 bus, devfn;
+	u16 val;
+
+	/* XXX it would be better here to identify the specific
+	   PCI-PCI bridge chip we have. */
+	if ((p2pbridge = find_devices("pci-bridge")) == 0
+	    || p2pbridge->parent == NULL
+	    || strcmp(p2pbridge->parent->name, "pci") != 0)
+		return;
+	if (pci_device_from_OF_node(p2pbridge, &bus, &devfn) < 0) {
+		DBG("Can't find PCI infos for PCI<->PCI bridge\n");
+		return;
+	}
+	/* Warning: At this point, we have not yet renumbered all busses.
+	 * So we must use OF walking to find out hose
+	 */
+	hose = pci_find_hose_for_OF_device(p2pbridge);
+	if (!hose) {
+		DBG("Can't find hose for PCI<->PCI bridge\n");
+		return;
+	}
+	if (early_read_config_word(hose, bus, devfn,
+				   PCI_BRIDGE_CONTROL, &val) < 0) {
+		printk(KERN_ERR "init_p2pbridge: couldn't read bridge control\n");
+		return;
+	}
+	val &= ~PCI_BRIDGE_CTL_MASTER_ABORT;
+	early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val);
+}
+
+/*
+ * Some Apple desktop machines have a NEC PD720100A USB2 controller
+ * on the motherboard. Open Firmware, on these, will disable the
+ * EHCI part of it so it behaves like a pair of OHCI's. This fixup
+ * code re-enables it ;)
+ */
+static void __init
+fixup_nec_usb2(void)
+{
+	struct device_node *nec;
+
+	for (nec = NULL; (nec = of_find_node_by_name(nec, "usb")) != NULL;) {
+		struct pci_controller *hose;
+		u32 data, *prop;
+		u8 bus, devfn;
+		
+		prop = (u32 *)get_property(nec, "vendor-id", NULL);
+		if (prop == NULL)
+			continue;
+		if (0x1033 != *prop)
+			continue;
+		prop = (u32 *)get_property(nec, "device-id", NULL);
+		if (prop == NULL)
+			continue;
+		if (0x0035 != *prop)
+			continue;
+		prop = (u32 *)get_property(nec, "reg", NULL);
+		if (prop == NULL)
+			continue;
+		devfn = (prop[0] >> 8) & 0xff;
+		bus = (prop[0] >> 16) & 0xff;
+		if (PCI_FUNC(devfn) != 0)
+			continue;
+		hose = pci_find_hose_for_OF_device(nec);
+		if (!hose)
+			continue;
+		early_read_config_dword(hose, bus, devfn, 0xe4, &data);
+		if (data & 1UL) {
+			printk("Found NEC PD720100A USB2 chip with disabled EHCI, fixing up...\n");
+			data &= ~1UL;
+			early_write_config_dword(hose, bus, devfn, 0xe4, data);
+			early_write_config_byte(hose, bus, devfn | 2, PCI_INTERRUPT_LINE,
+				nec->intrs[0].line);
+		}
+	}
+}
+
+void __init
+pmac_find_bridges(void)
+{
+	struct device_node *np, *root;
+	struct device_node *ht = NULL;
+
+	root = of_find_node_by_path("/");
+	if (root == NULL) {
+		printk(KERN_CRIT "pmac_find_bridges: can't find root of device tree\n");
+		return;
+	}
+	for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
+		if (np->name == NULL)
+			continue;
+		if (strcmp(np->name, "bandit") == 0
+		    || strcmp(np->name, "chaos") == 0
+		    || strcmp(np->name, "pci") == 0) {
+			if (add_bridge(np) == 0)
+				of_node_get(np);
+		}
+		if (strcmp(np->name, "ht") == 0) {
+			of_node_get(np);
+			ht = np;
+		}
+	}
+	of_node_put(root);
+
+	/* Probe HT last as it relies on the agp resources to be already
+	 * setup
+	 */
+	if (ht && add_bridge(ht) != 0)
+		of_node_put(ht);
+
+	init_p2pbridge();
+	fixup_nec_usb2();
+	
+	/* We are still having some issues with the Xserve G4, enabling
+	 * some offset between bus number and domains for now when we
+	 * assign all busses should help for now
+	 */
+	if (pci_assign_all_busses)
+		pcibios_assign_bus_offset = 0x10;
+
+#ifdef CONFIG_POWER4 
+	/* There is something wrong with DMA on U3/HT. I haven't figured out
+	 * the details yet, but if I set the cache line size to 128 bytes like
+	 * it should, I'm getting memory corruption caused by devices like
+	 * sungem (even without the MWI bit set, but maybe sungem doesn't
+	 * care). Right now, it appears that setting up a 64 bytes line size
+	 * works properly, 64 bytes beeing the max transfer size of HT, I
+	 * suppose this is related the way HT/PCI are hooked together. I still
+	 * need to dive into more specs though to be really sure of what's
+	 * going on. --BenH.
+	 *
+	 * Ok, apparently, it's just that HT can't do more than 64 bytes
+	 * transactions. MWI seem to be meaningless there as well, it may
+	 * be worth nop'ing out pci_set_mwi too though I haven't done that
+	 * yet.
+	 *
+	 * Note that it's a bit different for whatever is in the AGP slot.
+	 * For now, I don't care, but this can become a real issue, we
+	 * should probably hook pci_set_mwi anyway to make sure it sets
+	 * the real cache line size in there.
+	 */
+	if (machine_is_compatible("MacRISC4"))
+		pci_cache_line_size = 16; /* 64 bytes */
+
+	pmac_check_ht_link();
+#endif /* CONFIG_POWER4 */
+}
+
+#define GRACKLE_CFA(b, d, o)	(0x80 | ((b) << 8) | ((d) << 16) \
+				 | (((o) & ~3) << 24))
+
+#define GRACKLE_PICR1_STG		0x00000040
+#define GRACKLE_PICR1_LOOPSNOOP		0x00000010
+
+/* N.B. this is called before bridges is initialized, so we can't
+   use grackle_pcibios_{read,write}_config_dword. */
+static inline void grackle_set_stg(struct pci_controller* bp, int enable)
+{
+	unsigned int val;
+
+	out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
+	val = in_le32(bp->cfg_data);
+	val = enable? (val | GRACKLE_PICR1_STG) :
+		(val & ~GRACKLE_PICR1_STG);
+	out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
+	out_le32(bp->cfg_data, val);
+	(void)in_le32(bp->cfg_data);
+}
+
+static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable)
+{
+	unsigned int val;
+
+	out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
+	val = in_le32(bp->cfg_data);
+	val = enable? (val | GRACKLE_PICR1_LOOPSNOOP) :
+		(val & ~GRACKLE_PICR1_LOOPSNOOP);
+	out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
+	out_le32(bp->cfg_data, val);
+	(void)in_le32(bp->cfg_data);
+}
+
+static int __init
+setup_uninorth(struct pci_controller* hose, struct reg_property* addr)
+{
+	pci_assign_all_busses = 1;
+	has_uninorth = 1;
+	hose->ops = &macrisc_pci_ops;
+	hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
+	hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
+	/* We "know" that the bridge at f2000000 has the PCI slots. */
+	return addr->address == 0xf2000000;
+}
+
+static void __init
+setup_bandit(struct pci_controller* hose, struct reg_property* addr)
+{
+	hose->ops = &macrisc_pci_ops;
+	hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
+	hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
+	init_bandit(hose);
+}
+
+static void __init
+setup_chaos(struct pci_controller* hose, struct reg_property* addr)
+{
+	/* assume a `chaos' bridge */
+	hose->ops = &chaos_pci_ops;
+	hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
+	hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
+}
+
+#ifdef CONFIG_POWER4
+
+static void __init setup_u3_agp(struct pci_controller* hose)
+{
+	/* On G5, we move AGP up to high bus number so we don't need
+	 * to reassign bus numbers for HT. If we ever have P2P bridges
+	 * on AGP, we'll have to move pci_assign_all_busses to the
+	 * pci_controller structure so we enable it for AGP and not for
+	 * HT childs.
+	 * We hard code the address because of the different size of
+	 * the reg address cell, we shall fix that by killing struct
+	 * reg_property and using some accessor functions instead
+	 */
+       	hose->first_busno = 0xf0;
+	hose->last_busno = 0xff;
+	has_uninorth = 1;
+	hose->ops = &macrisc_pci_ops;
+	hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
+	hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
+
+	u3_agp = hose;
+}
+
+static void __init setup_u3_ht(struct pci_controller* hose)
+{
+	struct device_node *np = (struct device_node *)hose->arch_data;
+	int i, cur;
+
+	hose->ops = &u3_ht_pci_ops;
+
+	/* We hard code the address because of the different size of
+	 * the reg address cell, we shall fix that by killing struct
+	 * reg_property and using some accessor functions instead
+	 */
+	hose->cfg_data = (volatile unsigned char *)ioremap(0xf2000000, 0x02000000);
+
+	/*
+	 * /ht node doesn't expose a "ranges" property, so we "remove" regions that
+	 * have been allocated to AGP. So far, this version of the code doesn't assign
+	 * any of the 0xfxxxxxxx "fine" memory regions to /ht.
+	 * We need to fix that sooner or later by either parsing all child "ranges"
+	 * properties or figuring out the U3 address space decoding logic and
+	 * then read its configuration register (if any).
+	 */
+	hose->io_base_phys = 0xf4000000;
+	hose->io_base_virt = ioremap(hose->io_base_phys, 0x00400000);
+	isa_io_base = (unsigned long) hose->io_base_virt;
+	hose->io_resource.name = np->full_name;
+	hose->io_resource.start = 0;
+	hose->io_resource.end = 0x003fffff;
+	hose->io_resource.flags = IORESOURCE_IO;
+	hose->pci_mem_offset = 0;
+	hose->first_busno = 0;
+	hose->last_busno = 0xef;
+	hose->mem_resources[0].name = np->full_name;
+	hose->mem_resources[0].start = 0x80000000;
+	hose->mem_resources[0].end = 0xefffffff;
+	hose->mem_resources[0].flags = IORESOURCE_MEM;
+
+	if (u3_agp == NULL) {
+		DBG("U3 has no AGP, using full resource range\n");
+		return;
+	}
+
+	/* We "remove" the AGP resources from the resources allocated to HT, that
+	 * is we create "holes". However, that code does assumptions that so far
+	 * happen to be true (cross fingers...), typically that resources in the
+	 * AGP node are properly ordered
+	 */
+	cur = 0;
+	for (i=0; i<3; i++) {
+		struct resource *res = &u3_agp->mem_resources[i];
+		if (res->flags != IORESOURCE_MEM)
+			continue;
+		/* We don't care about "fine" resources */
+		if (res->start >= 0xf0000000)
+			continue;
+		/* Check if it's just a matter of "shrinking" us in one direction */
+		if (hose->mem_resources[cur].start == res->start) {
+			DBG("U3/HT: shrink start of %d, %08lx -> %08lx\n",
+			    cur, hose->mem_resources[cur].start, res->end + 1);
+			hose->mem_resources[cur].start = res->end + 1;
+			continue;
+		}
+		if (hose->mem_resources[cur].end == res->end) {
+			DBG("U3/HT: shrink end of %d, %08lx -> %08lx\n",
+			    cur, hose->mem_resources[cur].end, res->start - 1);
+			hose->mem_resources[cur].end = res->start - 1;
+			continue;
+		}
+		/* No, it's not the case, we need a hole */
+		if (cur == 2) {
+			/* not enough resources to make a hole, we drop part of the range */
+			printk(KERN_WARNING "Running out of resources for /ht host !\n");
+			hose->mem_resources[cur].end = res->start - 1;
+			continue;
+		}		
+		cur++;
+       		DBG("U3/HT: hole, %d end at %08lx, %d start at %08lx\n",
+		    cur-1, res->start - 1, cur, res->end + 1);
+		hose->mem_resources[cur].name = np->full_name;
+		hose->mem_resources[cur].flags = IORESOURCE_MEM;
+		hose->mem_resources[cur].start = res->end + 1;
+		hose->mem_resources[cur].end = hose->mem_resources[cur-1].end;
+		hose->mem_resources[cur-1].end = res->start - 1;
+	}
+}
+
+#endif /* CONFIG_POWER4 */
+
+void __init
+setup_grackle(struct pci_controller *hose)
+{
+	setup_indirect_pci(hose, 0xfec00000, 0xfee00000);
+	if (machine_is_compatible("AAPL,PowerBook1998"))
+		grackle_set_loop_snoop(hose, 1);
+#if 0	/* Disabled for now, HW problems ??? */
+	grackle_set_stg(hose, 1);
+#endif
+}
+
+static void __init pmac_process_bridge_OF_ranges(struct pci_controller *hose,
+			   struct device_node *dev, int primary)
+{
+	static unsigned int static_lc_ranges[2024];
+	unsigned int *dt_ranges, *lc_ranges, *ranges, *prev;
+	unsigned int size;
+	int rlen = 0, orig_rlen;
+	int memno = 0;
+	struct resource *res;
+	int np, na = prom_n_addr_cells(dev);
+
+	np = na + 5;
+
+	/* First we try to merge ranges to fix a problem with some pmacs
+	 * that can have more than 3 ranges, fortunately using contiguous
+	 * addresses -- BenH
+	 */
+	dt_ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
+	if (!dt_ranges)
+		return;
+	/*	lc_ranges = alloc_bootmem(rlen);*/
+	lc_ranges = static_lc_ranges;
+	if (!lc_ranges)
+		return; /* what can we do here ? */
+	memcpy(lc_ranges, dt_ranges, rlen);
+	orig_rlen = rlen;
+
+	/* Let's work on a copy of the "ranges" property instead of damaging
+	 * the device-tree image in memory
+	 */
+	ranges = lc_ranges;
+	prev = NULL;
+	while ((rlen -= np * sizeof(unsigned int)) >= 0) {
+		if (prev) {
+			if (prev[0] == ranges[0] && prev[1] == ranges[1] &&
+				(prev[2] + prev[na+4]) == ranges[2] &&
+				(prev[na+2] + prev[na+4]) == ranges[na+2]) {
+				prev[na+4] += ranges[na+4];
+				ranges[0] = 0;
+				ranges += np;
+				continue;
+			}
+		}
+		prev = ranges;
+		ranges += np;
+	}
+
+	/*
+	 * The ranges property is laid out as an array of elements,
+	 * each of which comprises:
+	 *   cells 0 - 2:	a PCI address
+	 *   cells 3 or 3+4:	a CPU physical address
+	 *			(size depending on dev->n_addr_cells)
+	 *   cells 4+5 or 5+6:	the size of the range
+	 */
+	ranges = lc_ranges;
+	rlen = orig_rlen;
+	while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) {
+		res = NULL;
+		size = ranges[na+4];
+		switch (ranges[0] >> 24) {
+		case 1:		/* I/O space */
+			if (ranges[2] != 0)
+				break;
+			hose->io_base_phys = ranges[na+2];
+			/* limit I/O space to 16MB */
+			if (size > 0x01000000)
+				size = 0x01000000;
+			hose->io_base_virt = ioremap(ranges[na+2], size);
+			if (primary)
+				isa_io_base = (unsigned long) hose->io_base_virt;
+			res = &hose->io_resource;
+			res->flags = IORESOURCE_IO;
+			res->start = ranges[2];
+			break;
+		case 2:		/* memory space */
+			memno = 0;
+			if (ranges[1] == 0 && ranges[2] == 0
+			    && ranges[na+4] <= (16 << 20)) {
+				/* 1st 16MB, i.e. ISA memory area */
+#if 0
+				if (primary)
+					isa_mem_base = ranges[na+2];
+#endif
+				memno = 1;
+			}
+			while (memno < 3 && hose->mem_resources[memno].flags)
+				++memno;
+			if (memno == 0)
+				hose->pci_mem_offset = ranges[na+2] - ranges[2];
+			if (memno < 3) {
+				res = &hose->mem_resources[memno];
+				res->flags = IORESOURCE_MEM;
+				res->start = ranges[na+2];
+			}
+			break;
+		}
+		if (res != NULL) {
+			res->name = dev->full_name;
+			res->end = res->start + size - 1;
+			res->parent = NULL;
+			res->sibling = NULL;
+			res->child = NULL;
+		}
+		ranges += np;
+	}
+}
+
+/*
+ * We assume that if we have a G3 powermac, we have one bridge called
+ * "pci" (a MPC106) and no bandit or chaos bridges, and contrariwise,
+ * if we have one or more bandit or chaos bridges, we don't have a MPC106.
+ */
+static int __init add_bridge(struct device_node *dev)
+{
+	int len;
+	struct pci_controller *hose;
+	struct reg_property *addr;
+	char* disp_name;
+	int *bus_range;
+	int primary = 1;
+
+	DBG("Adding PCI host bridge %s\n", dev->full_name);
+
+       	addr = (struct reg_property *) get_property(dev, "reg", &len);
+       	if (addr == NULL || len < sizeof(*addr)) {
+       		printk(KERN_WARNING "Can't use %s: no address\n",
+       		       dev->full_name);
+       		return -ENODEV;
+       	}
+       	bus_range = (int *) get_property(dev, "bus-range", &len);
+       	if (bus_range == NULL || len < 2 * sizeof(int)) {
+       		printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n",
+       			       dev->full_name);
+       	}
+
+       	hose = pcibios_alloc_controller();
+       	if (!hose)
+       		return -ENOMEM;
+       	hose->arch_data = dev;
+       	hose->first_busno = bus_range ? bus_range[0] : 0;
+       	hose->last_busno = bus_range ? bus_range[1] : 0xff;
+
+	disp_name = NULL;
+#ifdef CONFIG_POWER4
+       	if (device_is_compatible(dev, "u3-agp")) {
+       		setup_u3_agp(hose, addr);
+       		disp_name = "U3-AGP";
+       		primary = 0;
+       	} else if (device_is_compatible(dev, "u3-ht")) {
+       		setup_u3_ht(hose, addr);
+       		disp_name = "U3-HT";
+       		primary = 1;
+       	} else
+#endif /* CONFIG_POWER4 */
+	if (device_is_compatible(dev, "uni-north")) {
+       		primary = setup_uninorth(hose, addr);
+       		disp_name = "UniNorth";
+       	} else if (strcmp(dev->name, "pci") == 0) {
+       		/* XXX assume this is a mpc106 (grackle) */
+       		setup_grackle(hose);
+       		disp_name = "Grackle (MPC106)";
+       	} else if (strcmp(dev->name, "bandit") == 0) {
+       		setup_bandit(hose, addr);
+       		disp_name = "Bandit";
+       	} else if (strcmp(dev->name, "chaos") == 0) {
+       		setup_chaos(hose, addr);
+       		disp_name = "Chaos";
+       		primary = 0;
+       	}
+       	printk(KERN_INFO "Found %s PCI host bridge at 0x%08x. Firmware bus number: %d->%d\n",
+       		disp_name, addr->address, hose->first_busno, hose->last_busno);
+       	DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
+       		hose, hose->cfg_addr, hose->cfg_data);
+
+       	/* Interpret the "ranges" property */
+       	/* This also maps the I/O region and sets isa_io/mem_base */
+       	pci_process_bridge_OF_ranges(hose, dev, primary);
+
+       	/* Fixup "bus-range" OF property */
+       	fixup_bus_range(dev);
+
+	return 0;
+}
+
+static void __init
+pcibios_fixup_OF_interrupts(void)
+{
+	struct pci_dev* dev = NULL;
+
+	/*
+	 * Open Firmware often doesn't initialize the
+	 * PCI_INTERRUPT_LINE config register properly, so we
+	 * should find the device node and apply the interrupt
+	 * obtained from the OF device-tree
+	 */
+	for_each_pci_dev(dev) {
+		struct device_node *node;
+		node = pci_device_to_OF_node(dev);
+		/* this is the node, see if it has interrupts */
+		if (node && node->n_intrs > 0)
+			dev->irq = node->intrs[0].line;
+		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+	}
+}
+
+void __init
+pmac_pcibios_fixup(void)
+{
+	/* Fixup interrupts according to OF tree */
+	pcibios_fixup_OF_interrupts();
+}
+
+int
+pmac_pci_enable_device_hook(struct pci_dev *dev, int initial)
+{
+	struct device_node* node;
+	int updatecfg = 0;
+	int uninorth_child;
+
+	node = pci_device_to_OF_node(dev);
+
+	/* We don't want to enable USB controllers absent from the OF tree
+	 * (iBook second controller)
+	 */
+	if (dev->vendor == PCI_VENDOR_ID_APPLE
+	    && (dev->class == ((PCI_CLASS_SERIAL_USB << 8) | 0x10))
+	    && !node) {
+		printk(KERN_INFO "Apple USB OHCI %s disabled by firmware\n",
+		       pci_name(dev));
+		return -EINVAL;
+	}
+
+	if (!node)
+		return 0;
+
+	uninorth_child = node->parent &&
+		device_is_compatible(node->parent, "uni-north");
+	
+	/* Firewire & GMAC were disabled after PCI probe, the driver is
+	 * claiming them, we must re-enable them now.
+	 */
+	if (uninorth_child && !strcmp(node->name, "firewire") &&
+	    (device_is_compatible(node, "pci106b,18") ||
+	     device_is_compatible(node, "pci106b,30") ||
+	     device_is_compatible(node, "pci11c1,5811"))) {
+		pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, node, 0, 1);
+		pmac_call_feature(PMAC_FTR_1394_ENABLE, node, 0, 1);
+		updatecfg = 1;
+	}
+	if (uninorth_child && !strcmp(node->name, "ethernet") &&
+	    device_is_compatible(node, "gmac")) {
+		pmac_call_feature(PMAC_FTR_GMAC_ENABLE, node, 0, 1);
+		updatecfg = 1;
+	}
+
+	if (updatecfg) {
+		u16 cmd;
+	
+		/*
+		 * Make sure PCI is correctly configured
+		 *
+		 * We use old pci_bios versions of the function since, by
+		 * default, gmac is not powered up, and so will be absent
+		 * from the kernel initial PCI lookup.
+		 *
+		 * Should be replaced by 2.4 new PCI mechanisms and really
+		 * register the device.
+		 */
+		pci_read_config_word(dev, PCI_COMMAND, &cmd);
+		cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
+    		pci_write_config_word(dev, PCI_COMMAND, cmd);
+    		pci_write_config_byte(dev, PCI_LATENCY_TIMER, 16);
+    		pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
+	}
+
+	return 0;
+}
+
+/* We power down some devices after they have been probed. They'll
+ * be powered back on later on
+ */
+void __init
+pmac_pcibios_after_init(void)
+{
+	struct device_node* nd;
+
+#ifdef CONFIG_BLK_DEV_IDE
+	struct pci_dev *dev = NULL;
+
+	/* OF fails to initialize IDE controllers on macs
+	 * (and maybe other machines)
+	 *
+	 * Ideally, this should be moved to the IDE layer, but we need
+	 * to check specifically with Andre Hedrick how to do it cleanly
+	 * since the common IDE code seem to care about the fact that the
+	 * BIOS may have disabled a controller.
+	 *
+	 * -- BenH
+	 */
+	for_each_pci_dev(dev) {
+		if ((dev->class >> 16) == PCI_BASE_CLASS_STORAGE)
+			pci_enable_device(dev);
+	}
+#endif /* CONFIG_BLK_DEV_IDE */
+
+	nd = find_devices("firewire");
+	while (nd) {
+		if (nd->parent && (device_is_compatible(nd, "pci106b,18") ||
+				   device_is_compatible(nd, "pci106b,30") ||
+				   device_is_compatible(nd, "pci11c1,5811"))
+		    && device_is_compatible(nd->parent, "uni-north")) {
+			pmac_call_feature(PMAC_FTR_1394_ENABLE, nd, 0, 0);
+			pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, nd, 0, 0);
+		}
+		nd = nd->next;
+	}
+	nd = find_devices("ethernet");
+	while (nd) {
+		if (nd->parent && device_is_compatible(nd, "gmac")
+		    && device_is_compatible(nd->parent, "uni-north"))
+			pmac_call_feature(PMAC_FTR_GMAC_ENABLE, nd, 0, 0);
+		nd = nd->next;
+	}
+}
+
+#ifdef CONFIG_PPC64
+static void __init pmac_fixup_phb_resources(void)
+{
+	struct pci_controller *hose, *tmp;
+	
+	list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+		unsigned long offset = (unsigned long)hose->io_base_virt - pci_io_base;
+		hose->io_resource.start += offset;
+		hose->io_resource.end += offset;
+		printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n",
+		       hose->global_number,
+		       hose->io_resource.start, hose->io_resource.end);
+	}
+}
+
+void __init pmac_pci_init(void)
+{
+	struct device_node *np, *root;
+	struct device_node *ht = NULL;
+
+	/* Probe root PCI hosts, that is on U3 the AGP host and the
+	 * HyperTransport host. That one is actually "kept" around
+	 * and actually added last as it's resource management relies
+	 * on the AGP resources to have been setup first
+	 */
+	root = of_find_node_by_path("/");
+	if (root == NULL) {
+		printk(KERN_CRIT "pmac_find_bridges: can't find root of device tree\n");
+		return;
+	}
+	for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
+		if (np->name == NULL)
+			continue;
+		if (strcmp(np->name, "pci") == 0) {
+			if (add_bridge(np) == 0)
+				of_node_get(np);
+		}
+		if (strcmp(np->name, "ht") == 0) {
+			of_node_get(np);
+			ht = np;
+		}
+	}
+	of_node_put(root);
+
+	/* Now setup the HyperTransport host if we found any
+	 */
+	if (ht && add_bridge(ht) != 0)
+		of_node_put(ht);
+
+	/* Fixup the IO resources on our host bridges as the common code
+	 * does it only for childs of the host bridges
+	 */
+	pmac_fixup_phb_resources();
+
+	/* Setup the linkage between OF nodes and PHBs */ 
+	pci_devs_phb_init();
+
+	/* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
+	 * assume there is no P2P bridge on the AGP bus, which should be a
+	 * safe assumptions hopefully.
+	 */
+	if (u3_agp) {
+		struct device_node *np = u3_agp->arch_data;
+		PCI_DN(np)->busno = 0xf0;
+		for (np = np->child; np; np = np->sibling)
+			PCI_DN(np)->busno = 0xf0;
+	}
+
+	pmac_check_ht_link();
+
+	/* Tell pci.c to not use the common resource allocation mecanism */
+	pci_probe_only = 1;
+	
+	/* Allow all IO */
+	io_page_mask = -1;
+}
+#endif
+
+#ifdef CONFIG_PPC32
+void pmac_pci_fixup_cardbus(struct pci_dev* dev)
+{
+	if (_machine != _MACH_Pmac)
+		return;
+	/*
+	 * Fix the interrupt routing on the various cardbus bridges
+	 * used on powerbooks
+	 */
+	if (dev->vendor != PCI_VENDOR_ID_TI)
+		return;
+	if (dev->device == PCI_DEVICE_ID_TI_1130 ||
+	    dev->device == PCI_DEVICE_ID_TI_1131) {
+		u8 val;
+	    	/* Enable PCI interrupt */
+		if (pci_read_config_byte(dev, 0x91, &val) == 0)
+			pci_write_config_byte(dev, 0x91, val | 0x30);
+		/* Disable ISA interrupt mode */
+		if (pci_read_config_byte(dev, 0x92, &val) == 0)
+			pci_write_config_byte(dev, 0x92, val & ~0x06);
+	}
+	if (dev->device == PCI_DEVICE_ID_TI_1210 ||
+	    dev->device == PCI_DEVICE_ID_TI_1211 ||
+	    dev->device == PCI_DEVICE_ID_TI_1410 ||
+	    dev->device == PCI_DEVICE_ID_TI_1510) {
+		u8 val;
+		/* 0x8c == TI122X_IRQMUX, 2 says to route the INTA
+		   signal out the MFUNC0 pin */
+		if (pci_read_config_byte(dev, 0x8c, &val) == 0)
+			pci_write_config_byte(dev, 0x8c, (val & ~0x0f) | 2);
+		/* Disable ISA interrupt mode */
+		if (pci_read_config_byte(dev, 0x92, &val) == 0)
+			pci_write_config_byte(dev, 0x92, val & ~0x06);
+	}
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_ANY_ID, pmac_pci_fixup_cardbus);
+
+void pmac_pci_fixup_pciata(struct pci_dev* dev)
+{
+       u8 progif = 0;
+
+       /*
+        * On PowerMacs, we try to switch any PCI ATA controller to
+	* fully native mode
+        */
+	if (_machine != _MACH_Pmac)
+		return;
+	/* Some controllers don't have the class IDE */
+	if (dev->vendor == PCI_VENDOR_ID_PROMISE)
+		switch(dev->device) {
+		case PCI_DEVICE_ID_PROMISE_20246:
+		case PCI_DEVICE_ID_PROMISE_20262:
+		case PCI_DEVICE_ID_PROMISE_20263:
+		case PCI_DEVICE_ID_PROMISE_20265:
+		case PCI_DEVICE_ID_PROMISE_20267:
+		case PCI_DEVICE_ID_PROMISE_20268:
+		case PCI_DEVICE_ID_PROMISE_20269:
+		case PCI_DEVICE_ID_PROMISE_20270:
+		case PCI_DEVICE_ID_PROMISE_20271:
+		case PCI_DEVICE_ID_PROMISE_20275:
+		case PCI_DEVICE_ID_PROMISE_20276:
+		case PCI_DEVICE_ID_PROMISE_20277:
+			goto good;
+		}
+	/* Others, check PCI class */
+	if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
+		return;
+ good:
+	pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
+	if ((progif & 5) != 5) {
+		printk(KERN_INFO "Forcing PCI IDE into native mode: %s\n", pci_name(dev));
+		(void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5);
+		if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
+		    (progif & 5) != 5)
+			printk(KERN_ERR "Rewrite of PROGIF failed !\n");
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pmac_pci_fixup_pciata);
+#endif
+
+/*
+ * Disable second function on K2-SATA, it's broken
+ * and disable IO BARs on first one
+ */
+static void fixup_k2_sata(struct pci_dev* dev)
+{
+	int i;
+	u16 cmd;
+
+	if (PCI_FUNC(dev->devfn) > 0) {
+		pci_read_config_word(dev, PCI_COMMAND, &cmd);
+		cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+		pci_write_config_word(dev, PCI_COMMAND, cmd);
+		for (i = 0; i < 6; i++) {
+			dev->resource[i].start = dev->resource[i].end = 0;
+			dev->resource[i].flags = 0;
+			pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
+		}
+	} else {
+		pci_read_config_word(dev, PCI_COMMAND, &cmd);
+		cmd &= ~PCI_COMMAND_IO;
+		pci_write_config_word(dev, PCI_COMMAND, cmd);
+		for (i = 0; i < 5; i++) {
+			dev->resource[i].start = dev->resource[i].end = 0;
+			dev->resource[i].flags = 0;
+			pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
+		}
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, 0x0240, fixup_k2_sata);
+
diff --git a/arch/powerpc/platforms/powermac/pmac_pic.c b/arch/powerpc/platforms/powermac/pmac_pic.c
new file mode 100644
index 0000000..a6b1b57
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pmac_pic.c
@@ -0,0 +1,672 @@
+/*
+ *  Support for the interrupt controllers found on Power Macintosh,
+ *  currently Apple's "Grand Central" interrupt controller in all
+ *  it's incarnations. OpenPIC support used on newer machines is
+ *  in a separate file
+ *
+ *  Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
+ *
+ *  Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/sysdev.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/time.h>
+#include <asm/open_pic.h>
+#include <asm/xmon.h>
+#include <asm/pmac_feature.h>
+#include <asm/mpic.h>
+
+#include "pmac_pic.h"
+
+/*
+ * XXX this should be in xmon.h, but putting it there means xmon.h
+ * has to include <linux/interrupt.h> (to get irqreturn_t), which
+ * causes all sorts of problems.  -- paulus
+ */
+extern irqreturn_t xmon_irq(int, void *, struct pt_regs *);
+
+struct pmac_irq_hw {
+        unsigned int    event;
+        unsigned int    enable;
+        unsigned int    ack;
+        unsigned int    level;
+};
+
+/* Default addresses */
+static volatile struct pmac_irq_hw *pmac_irq_hw[4] = {
+        (struct pmac_irq_hw *) 0xf3000020,
+        (struct pmac_irq_hw *) 0xf3000010,
+        (struct pmac_irq_hw *) 0xf4000020,
+        (struct pmac_irq_hw *) 0xf4000010,
+};
+
+#define GC_LEVEL_MASK		0x3ff00000
+#define OHARE_LEVEL_MASK	0x1ff00000
+#define HEATHROW_LEVEL_MASK	0x1ff00000
+
+static int max_irqs;
+static int max_real_irqs;
+static u32 level_mask[4];
+
+static DEFINE_SPINLOCK(pmac_pic_lock);
+
+
+#define GATWICK_IRQ_POOL_SIZE        10
+static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
+
+/*
+ * Mark an irq as "lost".  This is only used on the pmac
+ * since it can lose interrupts (see pmac_set_irq_mask).
+ * -- Cort
+ */
+void
+__set_lost(unsigned long irq_nr, int nokick)
+{
+	if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) {
+		atomic_inc(&ppc_n_lost_interrupts);
+		if (!nokick)
+			set_dec(1);
+	}
+}
+
+static void
+pmac_mask_and_ack_irq(unsigned int irq_nr)
+{
+        unsigned long bit = 1UL << (irq_nr & 0x1f);
+        int i = irq_nr >> 5;
+        unsigned long flags;
+
+        if ((unsigned)irq_nr >= max_irqs)
+                return;
+
+        clear_bit(irq_nr, ppc_cached_irq_mask);
+        if (test_and_clear_bit(irq_nr, ppc_lost_interrupts))
+                atomic_dec(&ppc_n_lost_interrupts);
+	spin_lock_irqsave(&pmac_pic_lock, flags);
+        out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
+        out_le32(&pmac_irq_hw[i]->ack, bit);
+        do {
+                /* make sure ack gets to controller before we enable
+                   interrupts */
+                mb();
+        } while((in_le32(&pmac_irq_hw[i]->enable) & bit)
+                != (ppc_cached_irq_mask[i] & bit));
+	spin_unlock_irqrestore(&pmac_pic_lock, flags);
+}
+
+static void pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
+{
+        unsigned long bit = 1UL << (irq_nr & 0x1f);
+        int i = irq_nr >> 5;
+        unsigned long flags;
+
+        if ((unsigned)irq_nr >= max_irqs)
+                return;
+
+	spin_lock_irqsave(&pmac_pic_lock, flags);
+        /* enable unmasked interrupts */
+        out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
+
+        do {
+                /* make sure mask gets to controller before we
+                   return to user */
+                mb();
+        } while((in_le32(&pmac_irq_hw[i]->enable) & bit)
+                != (ppc_cached_irq_mask[i] & bit));
+
+        /*
+         * Unfortunately, setting the bit in the enable register
+         * when the device interrupt is already on *doesn't* set
+         * the bit in the flag register or request another interrupt.
+         */
+        if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level))
+		__set_lost((ulong)irq_nr, nokicklost);
+	spin_unlock_irqrestore(&pmac_pic_lock, flags);
+}
+
+/* When an irq gets requested for the first client, if it's an
+ * edge interrupt, we clear any previous one on the controller
+ */
+static unsigned int pmac_startup_irq(unsigned int irq_nr)
+{
+        unsigned long bit = 1UL << (irq_nr & 0x1f);
+        int i = irq_nr >> 5;
+
+	if ((irq_desc[irq_nr].status & IRQ_LEVEL) == 0)
+		out_le32(&pmac_irq_hw[i]->ack, bit);
+        set_bit(irq_nr, ppc_cached_irq_mask);
+        pmac_set_irq_mask(irq_nr, 0);
+
+	return 0;
+}
+
+static void pmac_mask_irq(unsigned int irq_nr)
+{
+        clear_bit(irq_nr, ppc_cached_irq_mask);
+        pmac_set_irq_mask(irq_nr, 0);
+        mb();
+}
+
+static void pmac_unmask_irq(unsigned int irq_nr)
+{
+        set_bit(irq_nr, ppc_cached_irq_mask);
+        pmac_set_irq_mask(irq_nr, 0);
+}
+
+static void pmac_end_irq(unsigned int irq_nr)
+{
+	if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))
+	    && irq_desc[irq_nr].action) {
+        	set_bit(irq_nr, ppc_cached_irq_mask);
+	        pmac_set_irq_mask(irq_nr, 1);
+	}
+}
+
+
+struct hw_interrupt_type pmac_pic = {
+	.typename	= " PMAC-PIC ",
+	.startup	= pmac_startup_irq,
+	.enable		= pmac_unmask_irq,
+	.disable	= pmac_mask_irq,
+	.ack		= pmac_mask_and_ack_irq,
+	.end		= pmac_end_irq,
+};
+
+struct hw_interrupt_type gatwick_pic = {
+	.typename	= " GATWICK  ",
+	.startup	= pmac_startup_irq,
+	.enable		= pmac_unmask_irq,
+	.disable	= pmac_mask_irq,
+	.ack		= pmac_mask_and_ack_irq,
+	.end		= pmac_end_irq,
+};
+
+static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs)
+{
+	int irq, bits;
+
+	for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) {
+		int i = irq >> 5;
+		bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
+		/* We must read level interrupts from the level register */
+		bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]);
+		bits &= ppc_cached_irq_mask[i];
+		if (bits == 0)
+			continue;
+		irq += __ilog2(bits);
+		__do_IRQ(irq, regs);
+		return IRQ_HANDLED;
+	}
+	printk("gatwick irq not from gatwick pic\n");
+	return IRQ_NONE;
+}
+
+int
+pmac_get_irq(struct pt_regs *regs)
+{
+	int irq;
+	unsigned long bits = 0;
+
+#ifdef CONFIG_SMP
+	void psurge_smp_message_recv(struct pt_regs *);
+
+       	/* IPI's are a hack on the powersurge -- Cort */
+       	if ( smp_processor_id() != 0 ) {
+		psurge_smp_message_recv(regs);
+		return -2;	/* ignore, already handled */
+        }
+#endif /* CONFIG_SMP */
+	for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
+		int i = irq >> 5;
+		bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
+		/* We must read level interrupts from the level register */
+		bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]);
+		bits &= ppc_cached_irq_mask[i];
+		if (bits == 0)
+			continue;
+		irq += __ilog2(bits);
+		break;
+	}
+
+	return irq;
+}
+
+/* This routine will fix some missing interrupt values in the device tree
+ * on the gatwick mac-io controller used by some PowerBooks
+ */
+static void __init
+pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base)
+{
+	struct device_node *node;
+	int count;
+
+	memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool));
+	node = gw->child;
+	count = 0;
+	while(node)
+	{
+		/* Fix SCC */
+		if (strcasecmp(node->name, "escc") == 0)
+			if (node->child) {
+				if (node->child->n_intrs < 3) {
+					node->child->intrs = &gatwick_int_pool[count];
+					count += 3;
+				}
+				node->child->n_intrs = 3;
+				node->child->intrs[0].line = 15+irq_base;
+				node->child->intrs[1].line =  4+irq_base;
+				node->child->intrs[2].line =  5+irq_base;
+				printk(KERN_INFO "irq: fixed SCC on second controller (%d,%d,%d)\n",
+					node->child->intrs[0].line,
+					node->child->intrs[1].line,
+					node->child->intrs[2].line);
+			}
+		/* Fix media-bay & left SWIM */
+		if (strcasecmp(node->name, "media-bay") == 0) {
+			struct device_node* ya_node;
+
+			if (node->n_intrs == 0)
+				node->intrs = &gatwick_int_pool[count++];
+			node->n_intrs = 1;
+			node->intrs[0].line = 29+irq_base;
+			printk(KERN_INFO "irq: fixed media-bay on second controller (%d)\n",
+					node->intrs[0].line);
+
+			ya_node = node->child;
+			while(ya_node)
+			{
+				if (strcasecmp(ya_node->name, "floppy") == 0) {
+					if (ya_node->n_intrs < 2) {
+						ya_node->intrs = &gatwick_int_pool[count];
+						count += 2;
+					}
+					ya_node->n_intrs = 2;
+					ya_node->intrs[0].line = 19+irq_base;
+					ya_node->intrs[1].line =  1+irq_base;
+					printk(KERN_INFO "irq: fixed floppy on second controller (%d,%d)\n",
+						ya_node->intrs[0].line, ya_node->intrs[1].line);
+				}
+				if (strcasecmp(ya_node->name, "ata4") == 0) {
+					if (ya_node->n_intrs < 2) {
+						ya_node->intrs = &gatwick_int_pool[count];
+						count += 2;
+					}
+					ya_node->n_intrs = 2;
+					ya_node->intrs[0].line = 14+irq_base;
+					ya_node->intrs[1].line =  3+irq_base;
+					printk(KERN_INFO "irq: fixed ide on second controller (%d,%d)\n",
+						ya_node->intrs[0].line, ya_node->intrs[1].line);
+				}
+				ya_node = ya_node->sibling;
+			}
+		}
+		node = node->sibling;
+	}
+	if (count > 10) {
+		printk("WARNING !! Gatwick interrupt pool overflow\n");
+		printk("  GATWICK_IRQ_POOL_SIZE = %d\n", GATWICK_IRQ_POOL_SIZE);
+		printk("              requested = %d\n", count);
+	}
+}
+
+/*
+ * The PowerBook 3400/2400/3500 can have a combo ethernet/modem
+ * card which includes an ohare chip that acts as a second interrupt
+ * controller.  If we find this second ohare, set it up and fix the
+ * interrupt value in the device tree for the ethernet chip.
+ */
+static int __init enable_second_ohare(void)
+{
+	unsigned char bus, devfn;
+	unsigned short cmd;
+        unsigned long addr;
+	struct device_node *irqctrler = find_devices("pci106b,7");
+	struct device_node *ether;
+
+	if (irqctrler == NULL || irqctrler->n_addrs <= 0)
+		return -1;
+	addr = (unsigned long) ioremap(irqctrler->addrs[0].address, 0x40);
+	pmac_irq_hw[1] = (volatile struct pmac_irq_hw *)(addr + 0x20);
+	max_irqs = 64;
+	if (pci_device_from_OF_node(irqctrler, &bus, &devfn) == 0) {
+		struct pci_controller* hose = pci_find_hose_for_OF_device(irqctrler);
+		if (!hose)
+		    printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
+		else {
+		    early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
+		    cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+	  	    cmd &= ~PCI_COMMAND_IO;
+		    early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
+		}
+	}
+
+	/* Fix interrupt for the modem/ethernet combo controller. The number
+	   in the device tree (27) is bogus (correct for the ethernet-only
+	   board but not the combo ethernet/modem board).
+	   The real interrupt is 28 on the second controller -> 28+32 = 60.
+	*/
+	ether = find_devices("pci1011,14");
+	if (ether && ether->n_intrs > 0) {
+		ether->intrs[0].line = 60;
+		printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n",
+		       ether->intrs[0].line);
+	}
+
+	/* Return the interrupt number of the cascade */
+	return irqctrler->intrs[0].line;
+}
+
+static int pmac_u3_cascade(struct pt_regs *regs, void *data)
+{
+	return mpic_get_one_irq((struct mpic *)data, regs);
+}
+
+#ifdef CONFIG_XMON
+static struct irqaction xmon_action = {
+	.handler	= xmon_irq,
+	.flags		= 0,
+	.mask		= CPU_MASK_NONE,
+	.name		= "NMI - XMON"
+};
+#endif
+
+static struct irqaction gatwick_cascade_action = {
+	.handler	= gatwick_action,
+	.flags		= SA_INTERRUPT,
+	.mask		= CPU_MASK_NONE,
+	.name		= "cascade",
+};
+
+void __init pmac_pic_init(void)
+{
+        int i;
+        struct device_node *irqctrler  = NULL;
+        struct device_node *irqctrler2 = NULL;
+	struct device_node *np;
+        unsigned long addr;
+	int irq_cascade = -1;
+	struct mpic *mpic1, *mpic2;
+
+	/* We first try to detect Apple's new Core99 chipset, since mac-io
+	 * is quite different on those machines and contains an IBM MPIC2.
+	 */
+	np = find_type_devices("open-pic");
+	while (np) {
+		if (np->parent && !strcmp(np->parent->name, "u3"))
+			irqctrler2 = np;
+		else
+			irqctrler = np;
+		np = np->next;
+	}
+	if (irqctrler != NULL && irqctrler->n_addrs > 0) {
+		unsigned char senses[128];
+
+		printk(KERN_INFO "PowerMac using OpenPIC irq controller at 0x%08x\n",
+		       (unsigned int)irqctrler->addrs[0].address);
+		ppc_md.get_irq = mpic_get_irq;
+		pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler, 0, 0);
+
+		prom_get_irq_senses(senses, 0, 128);
+		mpic1 = mpic_alloc(irqctrler->addrs[0].address,
+				   MPIC_PRIMARY | MPIC_WANTS_RESET,
+				   0, 0, 128, 256, senses, 128, " K2-MPIC  ");
+		BUG_ON(mpic1 == NULL);
+		mpic_init(mpic1);		
+
+		if (irqctrler2 != NULL && irqctrler2->n_intrs > 0 &&
+		    irqctrler2->n_addrs > 0) {
+			printk(KERN_INFO "Slave OpenPIC at 0x%08x hooked on IRQ %d\n",
+			       (u32)irqctrler2->addrs[0].address,
+			       irqctrler2->intrs[0].line);
+
+			pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler2, 0, 0);
+			prom_get_irq_senses(senses, 128, 128 + 128);
+
+			/* We don't need to set MPIC_BROKEN_U3 here since we don't have
+			 * hypertransport interrupts routed to it
+			 */
+			mpic2 = mpic_alloc(irqctrler2->addrs[0].address,
+					   MPIC_BIG_ENDIAN | MPIC_WANTS_RESET,
+					   0, 128, 128, 0, senses, 128, " U3-MPIC  ");
+			BUG_ON(mpic2 == NULL);
+			mpic_init(mpic2);
+			mpic_setup_cascade(irqctrler2->intrs[0].line,
+					   pmac_u3_cascade, mpic2);
+		}
+#ifdef CONFIG_XMON
+		{
+			struct device_node* pswitch;
+			int nmi_irq;
+
+			pswitch = find_devices("programmer-switch");
+			if (pswitch && pswitch->n_intrs) {
+				nmi_irq = pswitch->intrs[0].line;
+				openpic_init_nmi_irq(nmi_irq);
+				setup_irq(nmi_irq, &xmon_action);
+			}
+		}
+#endif	/* CONFIG_XMON */
+		return;
+	}
+	irqctrler = NULL;
+
+	/* Get the level/edge settings, assume if it's not
+	 * a Grand Central nor an OHare, then it's an Heathrow
+	 * (or Paddington).
+	 */
+	if (find_devices("gc"))
+		level_mask[0] = GC_LEVEL_MASK;
+	else if (find_devices("ohare")) {
+		level_mask[0] = OHARE_LEVEL_MASK;
+		/* We might have a second cascaded ohare */
+		level_mask[1] = OHARE_LEVEL_MASK;
+	} else {
+		level_mask[0] = HEATHROW_LEVEL_MASK;
+		level_mask[1] = 0;
+		/* We might have a second cascaded heathrow */
+		level_mask[2] = HEATHROW_LEVEL_MASK;
+		level_mask[3] = 0;
+	}
+
+	/*
+	 * G3 powermacs and 1999 G3 PowerBooks have 64 interrupts,
+	 * 1998 G3 Series PowerBooks have 128,
+	 * other powermacs have 32.
+	 * The combo ethernet/modem card for the Powerstar powerbooks
+	 * (2400/3400/3500, ohare based) has a second ohare chip
+	 * effectively making a total of 64.
+	 */
+	max_irqs = max_real_irqs = 32;
+	irqctrler = find_devices("mac-io");
+	if (irqctrler)
+	{
+		max_real_irqs = 64;
+		if (irqctrler->next)
+			max_irqs = 128;
+		else
+			max_irqs = 64;
+	}
+	for ( i = 0; i < max_real_irqs ; i++ )
+		irq_desc[i].handler = &pmac_pic;
+
+	/* get addresses of first controller */
+	if (irqctrler) {
+		if  (irqctrler->n_addrs > 0) {
+			addr = (unsigned long)
+				ioremap(irqctrler->addrs[0].address, 0x40);
+			for (i = 0; i < 2; ++i)
+				pmac_irq_hw[i] = (volatile struct pmac_irq_hw*)
+					(addr + (2 - i) * 0x10);
+		}
+
+		/* get addresses of second controller */
+		irqctrler = irqctrler->next;
+		if (irqctrler && irqctrler->n_addrs > 0) {
+			addr = (unsigned long)
+				ioremap(irqctrler->addrs[0].address, 0x40);
+			for (i = 2; i < 4; ++i)
+				pmac_irq_hw[i] = (volatile struct pmac_irq_hw*)
+					(addr + (4 - i) * 0x10);
+			irq_cascade = irqctrler->intrs[0].line;
+			if (device_is_compatible(irqctrler, "gatwick"))
+				pmac_fix_gatwick_interrupts(irqctrler, max_real_irqs);
+		}
+	} else {
+		/* older powermacs have a GC (grand central) or ohare at
+		   f3000000, with interrupt control registers at f3000020. */
+		addr = (unsigned long) ioremap(0xf3000000, 0x40);
+		pmac_irq_hw[0] = (volatile struct pmac_irq_hw *) (addr + 0x20);
+	}
+
+	/* PowerBooks 3400 and 3500 can have a second controller in a second
+	   ohare chip, on the combo ethernet/modem card */
+	if (machine_is_compatible("AAPL,3400/2400")
+	     || machine_is_compatible("AAPL,3500"))
+		irq_cascade = enable_second_ohare();
+
+	/* disable all interrupts in all controllers */
+	for (i = 0; i * 32 < max_irqs; ++i)
+		out_le32(&pmac_irq_hw[i]->enable, 0);
+	/* mark level interrupts */
+	for (i = 0; i < max_irqs; i++)
+		if (level_mask[i >> 5] & (1UL << (i & 0x1f)))
+			irq_desc[i].status = IRQ_LEVEL;
+
+	/* get interrupt line of secondary interrupt controller */
+	if (irq_cascade >= 0) {
+		printk(KERN_INFO "irq: secondary controller on irq %d\n",
+			(int)irq_cascade);
+		for ( i = max_real_irqs ; i < max_irqs ; i++ )
+			irq_desc[i].handler = &gatwick_pic;
+		setup_irq(irq_cascade, &gatwick_cascade_action);
+	}
+	printk("System has %d possible interrupts\n", max_irqs);
+	if (max_irqs != max_real_irqs)
+		printk(KERN_DEBUG "%d interrupts on main controller\n",
+			max_real_irqs);
+
+#ifdef CONFIG_XMON
+	setup_irq(20, &xmon_action);
+#endif	/* CONFIG_XMON */
+}
+
+#ifdef CONFIG_PM
+/*
+ * These procedures are used in implementing sleep on the powerbooks.
+ * sleep_save_intrs() saves the states of all interrupt enables
+ * and disables all interrupts except for the nominated one.
+ * sleep_restore_intrs() restores the states of all interrupt enables.
+ */
+unsigned long sleep_save_mask[2];
+
+/* This used to be passed by the PMU driver but that link got
+ * broken with the new driver model. We use this tweak for now...
+ */
+static int pmacpic_find_viaint(void)
+{
+	int viaint = -1;
+
+#ifdef CONFIG_ADB_PMU
+	struct device_node *np;
+
+	if (pmu_get_model() != PMU_OHARE_BASED)
+		goto not_found;
+	np = of_find_node_by_name(NULL, "via-pmu");
+	if (np == NULL)
+		goto not_found;
+	viaint = np->intrs[0].line;
+#endif /* CONFIG_ADB_PMU */
+
+not_found:
+	return viaint;
+}
+
+static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state)
+{
+	int viaint = pmacpic_find_viaint();
+
+	sleep_save_mask[0] = ppc_cached_irq_mask[0];
+	sleep_save_mask[1] = ppc_cached_irq_mask[1];
+	ppc_cached_irq_mask[0] = 0;
+	ppc_cached_irq_mask[1] = 0;
+	if (viaint > 0)
+		set_bit(viaint, ppc_cached_irq_mask);
+	out_le32(&pmac_irq_hw[0]->enable, ppc_cached_irq_mask[0]);
+	if (max_real_irqs > 32)
+		out_le32(&pmac_irq_hw[1]->enable, ppc_cached_irq_mask[1]);
+	(void)in_le32(&pmac_irq_hw[0]->event);
+	/* make sure mask gets to controller before we return to caller */
+	mb();
+        (void)in_le32(&pmac_irq_hw[0]->enable);
+
+        return 0;
+}
+
+static int pmacpic_resume(struct sys_device *sysdev)
+{
+	int i;
+
+	out_le32(&pmac_irq_hw[0]->enable, 0);
+	if (max_real_irqs > 32)
+		out_le32(&pmac_irq_hw[1]->enable, 0);
+	mb();
+	for (i = 0; i < max_real_irqs; ++i)
+		if (test_bit(i, sleep_save_mask))
+			pmac_unmask_irq(i);
+
+	return 0;
+}
+
+#endif /* CONFIG_PM */
+
+static struct sysdev_class pmacpic_sysclass = {
+	set_kset_name("pmac_pic"),
+};
+
+static struct sys_device device_pmacpic = {
+	.id		= 0,
+	.cls		= &pmacpic_sysclass,
+};
+
+static struct sysdev_driver driver_pmacpic = {
+#ifdef CONFIG_PM
+	.suspend	= &pmacpic_suspend,
+	.resume		= &pmacpic_resume,
+#endif /* CONFIG_PM */
+};
+
+static int __init init_pmacpic_sysfs(void)
+{
+	if (max_irqs == 0)
+		return -ENODEV;
+
+	printk(KERN_DEBUG "Registering pmac pic with sysfs...\n");
+	sysdev_class_register(&pmacpic_sysclass);
+	sysdev_register(&device_pmacpic);
+	sysdev_driver_register(&pmacpic_sysclass, &driver_pmacpic);
+	return 0;
+}
+
+subsys_initcall(init_pmacpic_sysfs);
+
diff --git a/arch/powerpc/platforms/powermac/pmac_pic.h b/arch/powerpc/platforms/powermac/pmac_pic.h
new file mode 100644
index 0000000..664103d
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pmac_pic.h
@@ -0,0 +1,11 @@
+#ifndef __PPC_PLATFORMS_PMAC_PIC_H
+#define __PPC_PLATFORMS_PMAC_PIC_H
+
+#include <linux/irq.h>
+
+extern struct hw_interrupt_type pmac_pic;
+
+void pmac_pic_init(void);
+int pmac_get_irq(struct pt_regs *regs);
+
+#endif /* __PPC_PLATFORMS_PMAC_PIC_H */
diff --git a/arch/powerpc/platforms/powermac/pmac_setup.c b/arch/powerpc/platforms/powermac/pmac_setup.c
new file mode 100644
index 0000000..3667e0b
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pmac_setup.c
@@ -0,0 +1,662 @@
+/*
+ *  arch/ppc/platforms/setup.c
+ *
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Adapted for Power Macintosh by Paul Mackerras
+ *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
+ *
+ *  Derived from "arch/alpha/kernel/setup.c"
+ *    Copyright (C) 1995 Linus Torvalds
+ *
+ *  Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+/*
+ * bootup setup stuff..
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/tty.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/major.h>
+#include <linux/initrd.h>
+#include <linux/vt_kern.h>
+#include <linux/console.h>
+#include <linux/ide.h>
+#include <linux/pci.h>
+#include <linux/adb.h>
+#include <linux/cuda.h>
+#include <linux/pmu.h>
+#include <linux/irq.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/bitops.h>
+#include <linux/suspend.h>
+
+#include <asm/reg.h>
+#include <asm/sections.h>
+#include <asm/prom.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/pci-bridge.h>
+#include <asm/ohare.h>
+#include <asm/mediabay.h>
+#include <asm/machdep.h>
+#include <asm/dma.h>
+#include <asm/bootx.h>
+#include <asm/cputable.h>
+#include <asm/btext.h>
+#include <asm/pmac_feature.h>
+#include <asm/time.h>
+#include <asm/of_device.h>
+#include <asm/mmu_context.h>
+
+#include "pmac_pic.h"
+
+#undef SHOW_GATWICK_IRQS
+
+extern long pmac_time_init(void);
+extern unsigned long pmac_get_rtc_time(void);
+extern int pmac_set_rtc_time(unsigned long nowtime);
+extern void pmac_read_rtc_time(void);
+extern void pmac_calibrate_decr(void);
+extern void pmac_pcibios_fixup(void);
+extern void pmac_find_bridges(void);
+extern unsigned long pmac_ide_get_base(int index);
+extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
+	unsigned long data_port, unsigned long ctrl_port, int *irq);
+
+extern void pmac_nvram_update(void);
+extern unsigned char pmac_nvram_read_byte(int addr);
+extern void pmac_nvram_write_byte(int addr, unsigned char val);
+extern int pmac_pci_enable_device_hook(struct pci_dev *dev, int initial);
+extern void pmac_pcibios_after_init(void);
+extern int of_show_percpuinfo(struct seq_file *m, int i);
+
+unsigned char drive_info;
+
+int ppc_override_l2cr = 0;
+int ppc_override_l2cr_value;
+int has_l2cache = 0;
+
+static int current_root_goodness = -1;
+
+extern int pmac_newworld;
+
+#define DEFAULT_ROOT_DEVICE Root_SDA1	/* sda1 - slightly silly choice */
+
+extern void zs_kgdb_hook(int tty_num);
+static void ohare_init(void);
+#ifdef CONFIG_BOOTX_TEXT
+static void pmac_progress(char *s, unsigned short hex);
+#endif
+
+sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
+
+#ifdef CONFIG_SMP
+extern struct smp_ops_t psurge_smp_ops;
+extern struct smp_ops_t core99_smp_ops;
+#endif /* CONFIG_SMP */
+
+static int
+pmac_show_cpuinfo(struct seq_file *m)
+{
+	struct device_node *np;
+	char *pp;
+	int plen;
+	int mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO,
+		NULL, PMAC_MB_INFO_MODEL, 0);
+	unsigned int mbflags = (unsigned int)pmac_call_feature(PMAC_FTR_GET_MB_INFO,
+		NULL, PMAC_MB_INFO_FLAGS, 0);
+	char* mbname;
+
+	if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME, (int)&mbname) != 0)
+		mbname = "Unknown";
+
+	/* find motherboard type */
+	seq_printf(m, "machine\t\t: ");
+	np = find_devices("device-tree");
+	if (np != NULL) {
+		pp = (char *) get_property(np, "model", NULL);
+		if (pp != NULL)
+			seq_printf(m, "%s\n", pp);
+		else
+			seq_printf(m, "PowerMac\n");
+		pp = (char *) get_property(np, "compatible", &plen);
+		if (pp != NULL) {
+			seq_printf(m, "motherboard\t:");
+			while (plen > 0) {
+				int l = strlen(pp) + 1;
+				seq_printf(m, " %s", pp);
+				plen -= l;
+				pp += l;
+			}
+			seq_printf(m, "\n");
+		}
+	} else
+		seq_printf(m, "PowerMac\n");
+
+	/* print parsed model */
+	seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname);
+	seq_printf(m, "pmac flags\t: %08x\n", mbflags);
+
+	/* find l2 cache info */
+	np = find_devices("l2-cache");
+	if (np == 0)
+		np = find_type_devices("cache");
+	if (np != 0) {
+		unsigned int *ic = (unsigned int *)
+			get_property(np, "i-cache-size", NULL);
+		unsigned int *dc = (unsigned int *)
+			get_property(np, "d-cache-size", NULL);
+		seq_printf(m, "L2 cache\t:");
+		has_l2cache = 1;
+		if (get_property(np, "cache-unified", NULL) != 0 && dc) {
+			seq_printf(m, " %dK unified", *dc / 1024);
+		} else {
+			if (ic)
+				seq_printf(m, " %dK instruction", *ic / 1024);
+			if (dc)
+				seq_printf(m, "%s %dK data",
+					   (ic? " +": ""), *dc / 1024);
+		}
+		pp = get_property(np, "ram-type", NULL);
+		if (pp)
+			seq_printf(m, " %s", pp);
+		seq_printf(m, "\n");
+	}
+
+	/* find ram info */
+	np = find_devices("memory");
+	if (np != 0) {
+		int n;
+		struct reg_property *reg = (struct reg_property *)
+			get_property(np, "reg", &n);
+
+		if (reg != 0) {
+			unsigned long total = 0;
+
+			for (n /= sizeof(struct reg_property); n > 0; --n)
+				total += (reg++)->size;
+			seq_printf(m, "memory\t\t: %luMB\n", total >> 20);
+		}
+	}
+
+	/* Checks "l2cr-value" property in the registry */
+	np = find_devices("cpus");
+	if (np == 0)
+		np = find_type_devices("cpu");
+	if (np != 0) {
+		unsigned int *l2cr = (unsigned int *)
+			get_property(np, "l2cr-value", NULL);
+		if (l2cr != 0) {
+			seq_printf(m, "l2cr override\t: 0x%x\n", *l2cr);
+		}
+	}
+
+	/* Indicate newworld/oldworld */
+	seq_printf(m, "pmac-generation\t: %s\n",
+		   pmac_newworld ? "NewWorld" : "OldWorld");
+
+
+	return 0;
+}
+
+static int
+pmac_show_percpuinfo(struct seq_file *m, int i)
+{
+#ifdef CONFIG_CPU_FREQ_PMAC
+	extern unsigned int pmac_get_one_cpufreq(int i);
+	unsigned int freq = pmac_get_one_cpufreq(i);
+	if (freq != 0) {
+		seq_printf(m, "clock\t\t: %dMHz\n", freq/1000);
+		return 0;
+	}
+#endif /* CONFIG_CPU_FREQ_PMAC */
+	return of_show_percpuinfo(m, i);
+}
+
+static volatile u32 *sysctrl_regs;
+
+void __init
+pmac_setup_arch(void)
+{
+	struct device_node *cpu;
+	int *fp;
+	unsigned long pvr;
+
+	pvr = PVR_VER(mfspr(SPRN_PVR));
+
+	/* Set loops_per_jiffy to a half-way reasonable value,
+	   for use until calibrate_delay gets called. */
+	cpu = find_type_devices("cpu");
+	if (cpu != 0) {
+		fp = (int *) get_property(cpu, "clock-frequency", NULL);
+		if (fp != 0) {
+			if (pvr == 4 || pvr >= 8)
+				/* 604, G3, G4 etc. */
+				loops_per_jiffy = *fp / HZ;
+			else
+				/* 601, 603, etc. */
+				loops_per_jiffy = *fp / (2*HZ);
+		} else
+			loops_per_jiffy = 50000000 / HZ;
+	}
+
+	/* this area has the CPU identification register
+	   and some registers used by smp boards */
+	sysctrl_regs = (volatile u32 *) ioremap(0xf8000000, 0x1000);
+	ohare_init();
+
+	/* Lookup PCI hosts */
+	pmac_find_bridges();
+
+	/* Checks "l2cr-value" property in the registry */
+	if (cpu_has_feature(CPU_FTR_L2CR)) {
+		struct device_node *np = find_devices("cpus");
+		if (np == 0)
+			np = find_type_devices("cpu");
+		if (np != 0) {
+			unsigned int *l2cr = (unsigned int *)
+				get_property(np, "l2cr-value", NULL);
+			if (l2cr != 0) {
+				ppc_override_l2cr = 1;
+				ppc_override_l2cr_value = *l2cr;
+				_set_L2CR(0);
+				_set_L2CR(ppc_override_l2cr_value);
+			}
+		}
+	}
+
+	if (ppc_override_l2cr)
+		printk(KERN_INFO "L2CR overriden (0x%x), backside cache is %s\n",
+			ppc_override_l2cr_value, (ppc_override_l2cr_value & 0x80000000)
+				? "enabled" : "disabled");
+
+#ifdef CONFIG_KGDB
+	zs_kgdb_hook(0);
+#endif
+
+#ifdef CONFIG_ADB_CUDA
+	find_via_cuda();
+#else
+	if (find_devices("via-cuda")) {
+		printk("WARNING ! Your machine is Cuda based but your kernel\n");
+		printk("          wasn't compiled with CONFIG_ADB_CUDA option !\n");
+	}
+#endif
+#ifdef CONFIG_ADB_PMU
+	find_via_pmu();
+#else
+	if (find_devices("via-pmu")) {
+		printk("WARNING ! Your machine is PMU based but your kernel\n");
+		printk("          wasn't compiled with CONFIG_ADB_PMU option !\n");
+	}
+#endif
+#ifdef CONFIG_NVRAM
+	pmac_nvram_init();
+#endif
+#ifdef CONFIG_BLK_DEV_INITRD
+	if (initrd_start)
+		ROOT_DEV = Root_RAM0;
+	else
+#endif
+		ROOT_DEV = DEFAULT_ROOT_DEVICE;
+
+#ifdef CONFIG_SMP
+	/* Check for Core99 */
+	if (find_devices("uni-n") || find_devices("u3"))
+		ppc_md.smp_ops = &core99_smp_ops;
+	else
+		ppc_md.smp_ops = &psurge_smp_ops;
+#endif /* CONFIG_SMP */
+
+	pci_create_OF_bus_map();
+}
+
+static void __init ohare_init(void)
+{
+	/*
+	 * Turn on the L2 cache.
+	 * We assume that we have a PSX memory controller iff
+	 * we have an ohare I/O controller.
+	 */
+	if (find_devices("ohare") != NULL) {
+		if (((sysctrl_regs[2] >> 24) & 0xf) >= 3) {
+			if (sysctrl_regs[4] & 0x10)
+				sysctrl_regs[4] |= 0x04000020;
+			else
+				sysctrl_regs[4] |= 0x04000000;
+			if(has_l2cache)
+				printk(KERN_INFO "Level 2 cache enabled\n");
+		}
+	}
+}
+
+extern char *bootpath;
+extern char *bootdevice;
+void *boot_host;
+int boot_target;
+int boot_part;
+extern dev_t boot_dev;
+
+#ifdef CONFIG_SCSI
+void __init
+note_scsi_host(struct device_node *node, void *host)
+{
+	int l;
+	char *p;
+
+	l = strlen(node->full_name);
+	if (bootpath != NULL && bootdevice != NULL
+	    && strncmp(node->full_name, bootdevice, l) == 0
+	    && (bootdevice[l] == '/' || bootdevice[l] == 0)) {
+		boot_host = host;
+		/*
+		 * There's a bug in OF 1.0.5.  (Why am I not surprised.)
+		 * If you pass a path like scsi/sd@1:0 to canon, it returns
+		 * something like /bandit@F2000000/gc@10/53c94@10000/sd@0,0
+		 * That is, the scsi target number doesn't get preserved.
+		 * So we pick the target number out of bootpath and use that.
+		 */
+		p = strstr(bootpath, "/sd@");
+		if (p != NULL) {
+			p += 4;
+			boot_target = simple_strtoul(p, NULL, 10);
+			p = strchr(p, ':');
+			if (p != NULL)
+				boot_part = simple_strtoul(p + 1, NULL, 10);
+		}
+	}
+}
+#endif
+
+#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
+static dev_t __init
+find_ide_boot(void)
+{
+	char *p;
+	int n;
+	dev_t __init pmac_find_ide_boot(char *bootdevice, int n);
+
+	if (bootdevice == NULL)
+		return 0;
+	p = strrchr(bootdevice, '/');
+	if (p == NULL)
+		return 0;
+	n = p - bootdevice;
+
+	return pmac_find_ide_boot(bootdevice, n);
+}
+#endif /* CONFIG_BLK_DEV_IDE && CONFIG_BLK_DEV_IDE_PMAC */
+
+static void __init
+find_boot_device(void)
+{
+#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
+	boot_dev = find_ide_boot();
+#endif
+}
+
+static int initializing = 1;
+/* TODO: Merge the suspend-to-ram with the common code !!!
+ * currently, this is a stub implementation for suspend-to-disk
+ * only
+ */
+
+#ifdef CONFIG_SOFTWARE_SUSPEND
+
+static int pmac_pm_prepare(suspend_state_t state)
+{
+	printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
+
+	return 0;
+}
+
+static int pmac_pm_enter(suspend_state_t state)
+{
+	printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
+
+	/* Giveup the lazy FPU & vec so we don't have to back them
+	 * up from the low level code
+	 */
+	enable_kernel_fp();
+
+#ifdef CONFIG_ALTIVEC
+	if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
+		enable_kernel_altivec();
+#endif /* CONFIG_ALTIVEC */
+
+	return 0;
+}
+
+static int pmac_pm_finish(suspend_state_t state)
+{
+	printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
+
+	/* Restore userland MMU context */
+	set_context(current->active_mm->context, current->active_mm->pgd);
+
+	return 0;
+}
+
+static struct pm_ops pmac_pm_ops = {
+	.pm_disk_mode	= PM_DISK_SHUTDOWN,
+	.prepare	= pmac_pm_prepare,
+	.enter		= pmac_pm_enter,
+	.finish		= pmac_pm_finish,
+};
+
+#endif /* CONFIG_SOFTWARE_SUSPEND */
+
+static int pmac_late_init(void)
+{
+	initializing = 0;
+#ifdef CONFIG_SOFTWARE_SUSPEND
+	pm_set_ops(&pmac_pm_ops);
+#endif /* CONFIG_SOFTWARE_SUSPEND */
+	return 0;
+}
+
+late_initcall(pmac_late_init);
+
+/* can't be __init - can be called whenever a disk is first accessed */
+void
+note_bootable_part(dev_t dev, int part, int goodness)
+{
+	static int found_boot = 0;
+	char *p;
+
+	if (!initializing)
+		return;
+	if ((goodness <= current_root_goodness) &&
+	    ROOT_DEV != DEFAULT_ROOT_DEVICE)
+		return;
+	p = strstr(saved_command_line, "root=");
+	if (p != NULL && (p == saved_command_line || p[-1] == ' '))
+		return;
+
+	if (!found_boot) {
+		find_boot_device();
+		found_boot = 1;
+	}
+	if (!boot_dev || dev == boot_dev) {
+		ROOT_DEV = dev + part;
+		boot_dev = 0;
+		current_root_goodness = goodness;
+	}
+}
+
+static void
+pmac_restart(char *cmd)
+{
+#ifdef CONFIG_ADB_CUDA
+	struct adb_request req;
+#endif /* CONFIG_ADB_CUDA */
+
+	switch (sys_ctrler) {
+#ifdef CONFIG_ADB_CUDA
+	case SYS_CTRLER_CUDA:
+		cuda_request(&req, NULL, 2, CUDA_PACKET,
+			     CUDA_RESET_SYSTEM);
+		for (;;)
+			cuda_poll();
+		break;
+#endif /* CONFIG_ADB_CUDA */
+#ifdef CONFIG_ADB_PMU
+	case SYS_CTRLER_PMU:
+		pmu_restart();
+		break;
+#endif /* CONFIG_ADB_PMU */
+	default: ;
+	}
+}
+
+static void
+pmac_power_off(void)
+{
+#ifdef CONFIG_ADB_CUDA
+	struct adb_request req;
+#endif /* CONFIG_ADB_CUDA */
+
+	switch (sys_ctrler) {
+#ifdef CONFIG_ADB_CUDA
+	case SYS_CTRLER_CUDA:
+		cuda_request(&req, NULL, 2, CUDA_PACKET,
+			     CUDA_POWERDOWN);
+		for (;;)
+			cuda_poll();
+		break;
+#endif /* CONFIG_ADB_CUDA */
+#ifdef CONFIG_ADB_PMU
+	case SYS_CTRLER_PMU:
+		pmu_shutdown();
+		break;
+#endif /* CONFIG_ADB_PMU */
+	default: ;
+	}
+}
+
+static void
+pmac_halt(void)
+{
+	pmac_power_off();
+}
+
+void __init
+pmac_init(unsigned long r3, unsigned long r4, unsigned long r5,
+	  unsigned long r6, unsigned long r7)
+{
+	/* isa_io_base gets set in pmac_find_bridges */
+	isa_mem_base = PMAC_ISA_MEM_BASE;
+	pci_dram_offset = PMAC_PCI_DRAM_OFFSET;
+	ISA_DMA_THRESHOLD = ~0L;
+	DMA_MODE_READ = 1;
+	DMA_MODE_WRITE = 2;
+
+	ppc_md.setup_arch     = pmac_setup_arch;
+	ppc_md.show_cpuinfo   = pmac_show_cpuinfo;
+	ppc_md.show_percpuinfo = pmac_show_percpuinfo;
+	ppc_md.irq_canonicalize = NULL;
+	ppc_md.init_IRQ       = pmac_pic_init;
+	ppc_md.get_irq        = pmac_get_irq; /* Changed later on ... */
+
+	ppc_md.pcibios_fixup  = pmac_pcibios_fixup;
+	ppc_md.pcibios_enable_device_hook = pmac_pci_enable_device_hook;
+	ppc_md.pcibios_after_init = pmac_pcibios_after_init;
+	ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
+
+	ppc_md.restart        = pmac_restart;
+	ppc_md.power_off      = pmac_power_off;
+	ppc_md.halt           = pmac_halt;
+
+	ppc_md.time_init      = pmac_time_init;
+	ppc_md.set_rtc_time   = pmac_set_rtc_time;
+	ppc_md.get_rtc_time   = pmac_get_rtc_time;
+	ppc_md.calibrate_decr = pmac_calibrate_decr;
+
+	ppc_md.feature_call   = pmac_do_feature_call;
+
+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
+        ppc_ide_md.ide_init_hwif	= pmac_ide_init_hwif_ports;
+        ppc_ide_md.default_io_base	= pmac_ide_get_base;
+#endif /* CONFIG_BLK_DEV_IDE_PMAC */
+#endif /* defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) */
+
+#ifdef CONFIG_BOOTX_TEXT
+	ppc_md.progress = pmac_progress;
+#endif /* CONFIG_BOOTX_TEXT */
+
+	if (ppc_md.progress) ppc_md.progress("pmac_init(): exit", 0);
+
+}
+
+#ifdef CONFIG_BOOTX_TEXT
+static void __init
+pmac_progress(char *s, unsigned short hex)
+{
+	if (boot_text_mapped) {
+		btext_drawstring(s);
+		btext_drawchar('\n');
+	}
+}
+#endif /* CONFIG_BOOTX_TEXT */
+
+static int __init
+pmac_declare_of_platform_devices(void)
+{
+	struct device_node *np;
+
+	np = find_devices("uni-n");
+	if (np) {
+		for (np = np->child; np != NULL; np = np->sibling)
+			if (strncmp(np->name, "i2c", 3) == 0) {
+				of_platform_device_create(np, "uni-n-i2c",
+							  NULL);
+				break;
+			}
+	}
+	np = find_devices("u3");
+	if (np) {
+		for (np = np->child; np != NULL; np = np->sibling)
+			if (strncmp(np->name, "i2c", 3) == 0) {
+				of_platform_device_create(np, "u3-i2c",
+							  NULL);
+				break;
+			}
+	}
+
+	np = find_devices("valkyrie");
+	if (np)
+		of_platform_device_create(np, "valkyrie", NULL);
+	np = find_devices("platinum");
+	if (np)
+		of_platform_device_create(np, "platinum", NULL);
+
+	return 0;
+}
+
+device_initcall(pmac_declare_of_platform_devices);
diff --git a/arch/powerpc/platforms/powermac/pmac_sleep.S b/arch/powerpc/platforms/powermac/pmac_sleep.S
new file mode 100644
index 0000000..88419c7
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pmac_sleep.S
@@ -0,0 +1,396 @@
+/*
+ * This file contains sleep low-level functions for PowerBook G3.
+ *    Copyright (C) 1999 Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ *    and Paul Mackerras (paulus@samba.org).
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/ppc_asm.h>
+#include <asm/cputable.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+
+#define MAGIC	0x4c617273	/* 'Lars' */
+
+/*
+ * Structure for storing CPU registers on the stack.
+ */
+#define SL_SP		0
+#define SL_PC		4
+#define SL_MSR		8
+#define SL_SDR1		0xc
+#define SL_SPRG0	0x10	/* 4 sprg's */
+#define SL_DBAT0	0x20
+#define SL_IBAT0	0x28
+#define SL_DBAT1	0x30
+#define SL_IBAT1	0x38
+#define SL_DBAT2	0x40
+#define SL_IBAT2	0x48
+#define SL_DBAT3	0x50
+#define SL_IBAT3	0x58
+#define SL_TB		0x60
+#define SL_R2		0x68
+#define SL_CR		0x6c
+#define SL_R12		0x70	/* r12 to r31 */
+#define SL_SIZE		(SL_R12 + 80)
+
+	.section .text
+	.align	5
+
+#if defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ_PMAC)
+
+/* This gets called by via-pmu.c late during the sleep process.
+ * The PMU was already send the sleep command and will shut us down
+ * soon. We need to save all that is needed and setup the wakeup
+ * vector that will be called by the ROM on wakeup
+ */
+_GLOBAL(low_sleep_handler)
+#ifndef CONFIG_6xx
+	blr
+#else
+	mflr	r0
+	stw	r0,4(r1)
+	stwu	r1,-SL_SIZE(r1)
+	mfcr	r0
+	stw	r0,SL_CR(r1)
+	stw	r2,SL_R2(r1)
+	stmw	r12,SL_R12(r1)
+
+	/* Save MSR & SDR1 */
+	mfmsr	r4
+	stw	r4,SL_MSR(r1)
+	mfsdr1	r4
+	stw	r4,SL_SDR1(r1)
+
+	/* Get a stable timebase and save it */
+1:	mftbu	r4
+	stw	r4,SL_TB(r1)
+	mftb	r5
+	stw	r5,SL_TB+4(r1)
+	mftbu	r3
+	cmpw	r3,r4
+	bne	1b
+
+	/* Save SPRGs */
+	mfsprg	r4,0
+	stw	r4,SL_SPRG0(r1)
+	mfsprg	r4,1
+	stw	r4,SL_SPRG0+4(r1)
+	mfsprg	r4,2
+	stw	r4,SL_SPRG0+8(r1)
+	mfsprg	r4,3
+	stw	r4,SL_SPRG0+12(r1)
+
+	/* Save BATs */
+	mfdbatu	r4,0
+	stw	r4,SL_DBAT0(r1)
+	mfdbatl	r4,0
+	stw	r4,SL_DBAT0+4(r1)
+	mfdbatu	r4,1
+	stw	r4,SL_DBAT1(r1)
+	mfdbatl	r4,1
+	stw	r4,SL_DBAT1+4(r1)
+	mfdbatu	r4,2
+	stw	r4,SL_DBAT2(r1)
+	mfdbatl	r4,2
+	stw	r4,SL_DBAT2+4(r1)
+	mfdbatu	r4,3
+	stw	r4,SL_DBAT3(r1)
+	mfdbatl	r4,3
+	stw	r4,SL_DBAT3+4(r1)
+	mfibatu	r4,0
+	stw	r4,SL_IBAT0(r1)
+	mfibatl	r4,0
+	stw	r4,SL_IBAT0+4(r1)
+	mfibatu	r4,1
+	stw	r4,SL_IBAT1(r1)
+	mfibatl	r4,1
+	stw	r4,SL_IBAT1+4(r1)
+	mfibatu	r4,2
+	stw	r4,SL_IBAT2(r1)
+	mfibatl	r4,2
+	stw	r4,SL_IBAT2+4(r1)
+	mfibatu	r4,3
+	stw	r4,SL_IBAT3(r1)
+	mfibatl	r4,3
+	stw	r4,SL_IBAT3+4(r1)
+
+	/* Backup various CPU config stuffs */
+	bl	__save_cpu_setup
+
+	/* The ROM can wake us up via 2 different vectors:
+	 *  - On wallstreet & lombard, we must write a magic
+	 *    value 'Lars' at address 4 and a pointer to a
+	 *    memory location containing the PC to resume from
+	 *    at address 0.
+	 *  - On Core99, we must store the wakeup vector at
+	 *    address 0x80 and eventually it's parameters
+	 *    at address 0x84. I've have some trouble with those
+	 *    parameters however and I no longer use them.
+	 */
+	lis	r5,grackle_wake_up@ha
+	addi	r5,r5,grackle_wake_up@l
+	tophys(r5,r5)
+	stw	r5,SL_PC(r1)
+	lis	r4,KERNELBASE@h
+	tophys(r5,r1)
+	addi	r5,r5,SL_PC
+	lis	r6,MAGIC@ha
+	addi	r6,r6,MAGIC@l
+	stw	r5,0(r4)
+	stw	r6,4(r4)
+	/* Setup stuffs at 0x80-0x84 for Core99 */
+	lis	r3,core99_wake_up@ha
+	addi	r3,r3,core99_wake_up@l
+	tophys(r3,r3)
+	stw	r3,0x80(r4)
+	stw	r5,0x84(r4)
+	/* Store a pointer to our backup storage into
+	 * a kernel global
+	 */
+	lis r3,sleep_storage@ha
+	addi r3,r3,sleep_storage@l
+	stw r5,0(r3)
+
+	.globl	low_cpu_die
+low_cpu_die:
+	/* Flush & disable all caches */
+	bl	flush_disable_caches
+
+	/* Turn off data relocation. */
+	mfmsr	r3		/* Save MSR in r7 */
+	rlwinm	r3,r3,0,28,26	/* Turn off DR bit */
+	sync
+	mtmsr	r3
+	isync
+
+BEGIN_FTR_SECTION
+	/* Flush any pending L2 data prefetches to work around HW bug */
+	sync
+	lis	r3,0xfff0
+	lwz	r0,0(r3)	/* perform cache-inhibited load to ROM */
+	sync			/* (caches are disabled at this point) */
+END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
+
+/*
+ * Set the HID0 and MSR for sleep.
+ */
+	mfspr	r2,SPRN_HID0
+	rlwinm	r2,r2,0,10,7	/* clear doze, nap */
+	oris	r2,r2,HID0_SLEEP@h
+	sync
+	isync
+	mtspr	SPRN_HID0,r2
+	sync
+
+/* This loop puts us back to sleep in case we have a spurrious
+ * wakeup so that the host bridge properly stays asleep. The
+ * CPU will be turned off, either after a known time (about 1
+ * second) on wallstreet & lombard, or as soon as the CPU enters
+ * SLEEP mode on core99
+ */
+	mfmsr	r2
+	oris	r2,r2,MSR_POW@h
+1:	sync
+	mtmsr	r2
+	isync
+	b	1b
+
+/*
+ * Here is the resume code.
+ */
+
+
+/*
+ * Core99 machines resume here
+ * r4 has the physical address of SL_PC(sp) (unused)
+ */
+_GLOBAL(core99_wake_up)
+	/* Make sure HID0 no longer contains any sleep bit and that data cache
+	 * is disabled
+	 */
+	mfspr	r3,SPRN_HID0
+	rlwinm	r3,r3,0,11,7		/* clear SLEEP, NAP, DOZE bits */
+	rlwinm	3,r3,0,18,15		/* clear DCE, ICE */
+	mtspr	SPRN_HID0,r3
+	sync
+	isync
+
+	/* sanitize MSR */
+	mfmsr	r3
+	ori	r3,r3,MSR_EE|MSR_IP
+	xori	r3,r3,MSR_EE|MSR_IP
+	sync
+	isync
+	mtmsr	r3
+	sync
+	isync
+
+	/* Recover sleep storage */
+	lis	r3,sleep_storage@ha
+	addi	r3,r3,sleep_storage@l
+	tophys(r3,r3)
+	lwz	r1,0(r3)
+
+	/* Pass thru to older resume code ... */
+/*
+ * Here is the resume code for older machines.
+ * r1 has the physical address of SL_PC(sp).
+ */
+
+grackle_wake_up:
+
+	/* Restore the kernel's segment registers before
+	 * we do any r1 memory access as we are not sure they
+	 * are in a sane state above the first 256Mb region
+	 */
+	li	r0,16		/* load up segment register values */
+	mtctr	r0		/* for context 0 */
+	lis	r3,0x2000	/* Ku = 1, VSID = 0 */
+	li	r4,0
+3:	mtsrin	r3,r4
+	addi	r3,r3,0x111	/* increment VSID */
+	addis	r4,r4,0x1000	/* address of next segment */
+	bdnz	3b
+	sync
+	isync
+
+	subi	r1,r1,SL_PC
+
+	/* Restore various CPU config stuffs */
+	bl	__restore_cpu_setup
+
+	/* Make sure all FPRs have been initialized */
+	bl	reloc_offset
+	bl	__init_fpu_registers
+
+	/* Invalidate & enable L1 cache, we don't care about
+	 * whatever the ROM may have tried to write to memory
+	 */
+	bl	__inval_enable_L1
+
+	/* Restore the BATs, and SDR1.  Then we can turn on the MMU. */
+	lwz	r4,SL_SDR1(r1)
+	mtsdr1	r4
+	lwz	r4,SL_SPRG0(r1)
+	mtsprg	0,r4
+	lwz	r4,SL_SPRG0+4(r1)
+	mtsprg	1,r4
+	lwz	r4,SL_SPRG0+8(r1)
+	mtsprg	2,r4
+	lwz	r4,SL_SPRG0+12(r1)
+	mtsprg	3,r4
+
+	lwz	r4,SL_DBAT0(r1)
+	mtdbatu	0,r4
+	lwz	r4,SL_DBAT0+4(r1)
+	mtdbatl	0,r4
+	lwz	r4,SL_DBAT1(r1)
+	mtdbatu	1,r4
+	lwz	r4,SL_DBAT1+4(r1)
+	mtdbatl	1,r4
+	lwz	r4,SL_DBAT2(r1)
+	mtdbatu	2,r4
+	lwz	r4,SL_DBAT2+4(r1)
+	mtdbatl	2,r4
+	lwz	r4,SL_DBAT3(r1)
+	mtdbatu	3,r4
+	lwz	r4,SL_DBAT3+4(r1)
+	mtdbatl	3,r4
+	lwz	r4,SL_IBAT0(r1)
+	mtibatu	0,r4
+	lwz	r4,SL_IBAT0+4(r1)
+	mtibatl	0,r4
+	lwz	r4,SL_IBAT1(r1)
+	mtibatu	1,r4
+	lwz	r4,SL_IBAT1+4(r1)
+	mtibatl	1,r4
+	lwz	r4,SL_IBAT2(r1)
+	mtibatu	2,r4
+	lwz	r4,SL_IBAT2+4(r1)
+	mtibatl	2,r4
+	lwz	r4,SL_IBAT3(r1)
+	mtibatu	3,r4
+	lwz	r4,SL_IBAT3+4(r1)
+	mtibatl	3,r4
+
+BEGIN_FTR_SECTION
+	li	r4,0
+	mtspr	SPRN_DBAT4U,r4
+	mtspr	SPRN_DBAT4L,r4
+	mtspr	SPRN_DBAT5U,r4
+	mtspr	SPRN_DBAT5L,r4
+	mtspr	SPRN_DBAT6U,r4
+	mtspr	SPRN_DBAT6L,r4
+	mtspr	SPRN_DBAT7U,r4
+	mtspr	SPRN_DBAT7L,r4
+	mtspr	SPRN_IBAT4U,r4
+	mtspr	SPRN_IBAT4L,r4
+	mtspr	SPRN_IBAT5U,r4
+	mtspr	SPRN_IBAT5L,r4
+	mtspr	SPRN_IBAT6U,r4
+	mtspr	SPRN_IBAT6L,r4
+	mtspr	SPRN_IBAT7U,r4
+	mtspr	SPRN_IBAT7L,r4
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
+
+	/* Flush all TLBs */
+	lis	r4,0x1000
+1:	addic.	r4,r4,-0x1000
+	tlbie	r4
+	blt	1b
+	sync
+
+	/* restore the MSR and turn on the MMU */
+	lwz	r3,SL_MSR(r1)
+	bl	turn_on_mmu
+
+	/* get back the stack pointer */
+	tovirt(r1,r1)
+
+	/* Restore TB */
+	li	r3,0
+	mttbl	r3
+	lwz	r3,SL_TB(r1)
+	lwz	r4,SL_TB+4(r1)
+	mttbu	r3
+	mttbl	r4
+
+	/* Restore the callee-saved registers and return */
+	lwz	r0,SL_CR(r1)
+	mtcr	r0
+	lwz	r2,SL_R2(r1)
+	lmw	r12,SL_R12(r1)
+	addi	r1,r1,SL_SIZE
+	lwz	r0,4(r1)
+	mtlr	r0
+	blr
+
+turn_on_mmu:
+	mflr	r4
+	tovirt(r4,r4)
+	mtsrr0	r4
+	mtsrr1	r3
+	sync
+	isync
+	rfi
+
+#endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */
+
+	.section .data
+	.balign	L1_CACHE_LINE_SIZE
+sleep_storage:
+	.long 0
+	.balign	L1_CACHE_LINE_SIZE, 0
+
+#endif /* CONFIG_6xx */
+	.section .text
diff --git a/arch/powerpc/platforms/powermac/pmac_smp.c b/arch/powerpc/platforms/powermac/pmac_smp.c
new file mode 100644
index 0000000..995e909
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pmac_smp.c
@@ -0,0 +1,716 @@
+/*
+ * SMP support for power macintosh.
+ *
+ * We support both the old "powersurge" SMP architecture
+ * and the current Core99 (G4 PowerMac) machines.
+ *
+ * Note that we don't support the very first rev. of
+ * Apple/DayStar 2 CPUs board, the one with the funky
+ * watchdog. Hopefully, none of these should be there except
+ * maybe internally to Apple. I should probably still add some
+ * code to detect this card though and disable SMP. --BenH.
+ *
+ * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
+ * and Ben Herrenschmidt <benh@kernel.crashing.org>.
+ *
+ * Support for DayStar quad CPU cards
+ * Copyright (C) XLR8, Inc. 1994-2000
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/hardirq.h>
+#include <linux/cpu.h>
+
+#include <asm/ptrace.h>
+#include <asm/atomic.h>
+#include <asm/irq.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/smp.h>
+#include <asm/residual.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/time.h>
+#include <asm/open_pic.h>
+#include <asm/cacheflush.h>
+#include <asm/keylargo.h>
+
+/*
+ * Powersurge (old powermac SMP) support.
+ */
+
+extern void __secondary_start_pmac_0(void);
+
+/* Addresses for powersurge registers */
+#define HAMMERHEAD_BASE		0xf8000000
+#define HHEAD_CONFIG		0x90
+#define HHEAD_SEC_INTR		0xc0
+
+/* register for interrupting the primary processor on the powersurge */
+/* N.B. this is actually the ethernet ROM! */
+#define PSURGE_PRI_INTR		0xf3019000
+
+/* register for storing the start address for the secondary processor */
+/* N.B. this is the PCI config space address register for the 1st bridge */
+#define PSURGE_START		0xf2800000
+
+/* Daystar/XLR8 4-CPU card */
+#define PSURGE_QUAD_REG_ADDR	0xf8800000
+
+#define PSURGE_QUAD_IRQ_SET	0
+#define PSURGE_QUAD_IRQ_CLR	1
+#define PSURGE_QUAD_IRQ_PRIMARY	2
+#define PSURGE_QUAD_CKSTOP_CTL	3
+#define PSURGE_QUAD_PRIMARY_ARB	4
+#define PSURGE_QUAD_BOARD_ID	6
+#define PSURGE_QUAD_WHICH_CPU	7
+#define PSURGE_QUAD_CKSTOP_RDBK	8
+#define PSURGE_QUAD_RESET_CTL	11
+
+#define PSURGE_QUAD_OUT(r, v)	(out_8(quad_base + ((r) << 4) + 4, (v)))
+#define PSURGE_QUAD_IN(r)	(in_8(quad_base + ((r) << 4) + 4) & 0x0f)
+#define PSURGE_QUAD_BIS(r, v)	(PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v)))
+#define PSURGE_QUAD_BIC(r, v)	(PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v)))
+
+/* virtual addresses for the above */
+static volatile u8 __iomem *hhead_base;
+static volatile u8 __iomem *quad_base;
+static volatile u32 __iomem *psurge_pri_intr;
+static volatile u8 __iomem *psurge_sec_intr;
+static volatile u32 __iomem *psurge_start;
+
+/* values for psurge_type */
+#define PSURGE_NONE		-1
+#define PSURGE_DUAL		0
+#define PSURGE_QUAD_OKEE	1
+#define PSURGE_QUAD_COTTON	2
+#define PSURGE_QUAD_ICEGRASS	3
+
+/* what sort of powersurge board we have */
+static int psurge_type = PSURGE_NONE;
+
+/* L2 and L3 cache settings to pass from CPU0 to CPU1 */
+volatile static long int core99_l2_cache;
+volatile static long int core99_l3_cache;
+
+/* Timebase freeze GPIO */
+static unsigned int core99_tb_gpio;
+
+/* Sync flag for HW tb sync */
+static volatile int sec_tb_reset = 0;
+static unsigned int pri_tb_hi, pri_tb_lo;
+static unsigned int pri_tb_stamp;
+
+static void __devinit core99_init_caches(int cpu)
+{
+	if (!cpu_has_feature(CPU_FTR_L2CR))
+		return;
+
+	if (cpu == 0) {
+		core99_l2_cache = _get_L2CR();
+		printk("CPU0: L2CR is %lx\n", core99_l2_cache);
+	} else {
+		printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
+		_set_L2CR(0);
+		_set_L2CR(core99_l2_cache);
+		printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
+	}
+
+	if (!cpu_has_feature(CPU_FTR_L3CR))
+		return;
+
+	if (cpu == 0){
+		core99_l3_cache = _get_L3CR();
+		printk("CPU0: L3CR is %lx\n", core99_l3_cache);
+	} else {
+		printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
+		_set_L3CR(0);
+		_set_L3CR(core99_l3_cache);
+		printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
+	}
+}
+
+/*
+ * Set and clear IPIs for powersurge.
+ */
+static inline void psurge_set_ipi(int cpu)
+{
+	if (psurge_type == PSURGE_NONE)
+		return;
+	if (cpu == 0)
+		in_be32(psurge_pri_intr);
+	else if (psurge_type == PSURGE_DUAL)
+		out_8(psurge_sec_intr, 0);
+	else
+		PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);
+}
+
+static inline void psurge_clr_ipi(int cpu)
+{
+	if (cpu > 0) {
+		switch(psurge_type) {
+		case PSURGE_DUAL:
+			out_8(psurge_sec_intr, ~0);
+		case PSURGE_NONE:
+			break;
+		default:
+			PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu);
+		}
+	}
+}
+
+/*
+ * On powersurge (old SMP powermac architecture) we don't have
+ * separate IPIs for separate messages like openpic does.  Instead
+ * we have a bitmap for each processor, where a 1 bit means that
+ * the corresponding message is pending for that processor.
+ * Ideally each cpu's entry would be in a different cache line.
+ *  -- paulus.
+ */
+static unsigned long psurge_smp_message[NR_CPUS];
+
+void psurge_smp_message_recv(struct pt_regs *regs)
+{
+	int cpu = smp_processor_id();
+	int msg;
+
+	/* clear interrupt */
+	psurge_clr_ipi(cpu);
+
+	if (num_online_cpus() < 2)
+		return;
+
+	/* make sure there is a message there */
+	for (msg = 0; msg < 4; msg++)
+		if (test_and_clear_bit(msg, &psurge_smp_message[cpu]))
+			smp_message_recv(msg, regs);
+}
+
+irqreturn_t psurge_primary_intr(int irq, void *d, struct pt_regs *regs)
+{
+	psurge_smp_message_recv(regs);
+	return IRQ_HANDLED;
+}
+
+static void smp_psurge_message_pass(int target, int msg, unsigned long data,
+					   int wait)
+{
+	int i;
+
+	if (num_online_cpus() < 2)
+		return;
+
+	for (i = 0; i < NR_CPUS; i++) {
+		if (!cpu_online(i))
+			continue;
+		if (target == MSG_ALL
+		    || (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
+		    || target == i) {
+			set_bit(msg, &psurge_smp_message[i]);
+			psurge_set_ipi(i);
+		}
+	}
+}
+
+/*
+ * Determine a quad card presence. We read the board ID register, we
+ * force the data bus to change to something else, and we read it again.
+ * It it's stable, then the register probably exist (ugh !)
+ */
+static int __init psurge_quad_probe(void)
+{
+	int type;
+	unsigned int i;
+
+	type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID);
+	if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS
+	    || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
+		return PSURGE_DUAL;
+
+	/* looks OK, try a slightly more rigorous test */
+	/* bogus is not necessarily cacheline-aligned,
+	   though I don't suppose that really matters.  -- paulus */
+	for (i = 0; i < 100; i++) {
+		volatile u32 bogus[8];
+		bogus[(0+i)%8] = 0x00000000;
+		bogus[(1+i)%8] = 0x55555555;
+		bogus[(2+i)%8] = 0xFFFFFFFF;
+		bogus[(3+i)%8] = 0xAAAAAAAA;
+		bogus[(4+i)%8] = 0x33333333;
+		bogus[(5+i)%8] = 0xCCCCCCCC;
+		bogus[(6+i)%8] = 0xCCCCCCCC;
+		bogus[(7+i)%8] = 0x33333333;
+		wmb();
+		asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory");
+		mb();
+		if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
+			return PSURGE_DUAL;
+	}
+	return type;
+}
+
+static void __init psurge_quad_init(void)
+{
+	int procbits;
+
+	if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351);
+	procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU);
+	if (psurge_type == PSURGE_QUAD_ICEGRASS)
+		PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
+	else
+		PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits);
+	mdelay(33);
+	out_8(psurge_sec_intr, ~0);
+	PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits);
+	PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
+	if (psurge_type != PSURGE_QUAD_ICEGRASS)
+		PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits);
+	PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits);
+	mdelay(33);
+	PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits);
+	mdelay(33);
+	PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits);
+	mdelay(33);
+}
+
+static int __init smp_psurge_probe(void)
+{
+	int i, ncpus;
+
+	/* We don't do SMP on the PPC601 -- paulus */
+	if (PVR_VER(mfspr(SPRN_PVR)) == 1)
+		return 1;
+
+	/*
+	 * The powersurge cpu board can be used in the generation
+	 * of powermacs that have a socket for an upgradeable cpu card,
+	 * including the 7500, 8500, 9500, 9600.
+	 * The device tree doesn't tell you if you have 2 cpus because
+	 * OF doesn't know anything about the 2nd processor.
+	 * Instead we look for magic bits in magic registers,
+	 * in the hammerhead memory controller in the case of the
+	 * dual-cpu powersurge board.  -- paulus.
+	 */
+	if (find_devices("hammerhead") == NULL)
+		return 1;
+
+	hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
+	quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024);
+	psurge_sec_intr = hhead_base + HHEAD_SEC_INTR;
+
+	psurge_type = psurge_quad_probe();
+	if (psurge_type != PSURGE_DUAL) {
+		psurge_quad_init();
+		/* All released cards using this HW design have 4 CPUs */
+		ncpus = 4;
+	} else {
+		iounmap(quad_base);
+		if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
+			/* not a dual-cpu card */
+			iounmap(hhead_base);
+			psurge_type = PSURGE_NONE;
+			return 1;
+		}
+		ncpus = 2;
+	}
+
+	psurge_start = ioremap(PSURGE_START, 4);
+	psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
+
+	/* this is not actually strictly necessary -- paulus. */
+	for (i = 1; i < ncpus; ++i)
+		smp_hw_index[i] = i;
+
+	if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
+
+	return ncpus;
+}
+
+static void __init smp_psurge_kick_cpu(int nr)
+{
+	unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
+	unsigned long a;
+
+	/* may need to flush here if secondary bats aren't setup */
+	for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
+		asm volatile("dcbf 0,%0" : : "r" (a) : "memory");
+	asm volatile("sync");
+
+	if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
+
+	out_be32(psurge_start, start);
+	mb();
+
+	psurge_set_ipi(nr);
+	udelay(10);
+	psurge_clr_ipi(nr);
+
+	if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
+}
+
+/*
+ * With the dual-cpu powersurge board, the decrementers and timebases
+ * of both cpus are frozen after the secondary cpu is started up,
+ * until we give the secondary cpu another interrupt.  This routine
+ * uses this to get the timebases synchronized.
+ *  -- paulus.
+ */
+static void __init psurge_dual_sync_tb(int cpu_nr)
+{
+	int t;
+
+	set_dec(tb_ticks_per_jiffy);
+	set_tb(0, 0);
+	last_jiffy_stamp(cpu_nr) = 0;
+
+	if (cpu_nr > 0) {
+		mb();
+		sec_tb_reset = 1;
+		return;
+	}
+
+	/* wait for the secondary to have reset its TB before proceeding */
+	for (t = 10000000; t > 0 && !sec_tb_reset; --t)
+		;
+
+	/* now interrupt the secondary, starting both TBs */
+	psurge_set_ipi(1);
+
+	smp_tb_synchronized = 1;
+}
+
+static struct irqaction psurge_irqaction = {
+	.handler = psurge_primary_intr,
+	.flags = SA_INTERRUPT,
+	.mask = CPU_MASK_NONE,
+	.name = "primary IPI",
+};
+
+static void __init smp_psurge_setup_cpu(int cpu_nr)
+{
+
+	if (cpu_nr == 0) {
+		/* If we failed to start the second CPU, we should still
+		 * send it an IPI to start the timebase & DEC or we might
+		 * have them stuck.
+		 */
+		if (num_online_cpus() < 2) {
+			if (psurge_type == PSURGE_DUAL)
+				psurge_set_ipi(1);
+			return;
+		}
+		/* reset the entry point so if we get another intr we won't
+		 * try to startup again */
+		out_be32(psurge_start, 0x100);
+		if (setup_irq(30, &psurge_irqaction))
+			printk(KERN_ERR "Couldn't get primary IPI interrupt");
+	}
+
+	if (psurge_type == PSURGE_DUAL)
+		psurge_dual_sync_tb(cpu_nr);
+}
+
+void __init smp_psurge_take_timebase(void)
+{
+	/* Dummy implementation */
+}
+
+void __init smp_psurge_give_timebase(void)
+{
+	/* Dummy implementation */
+}
+
+static int __init smp_core99_probe(void)
+{
+#ifdef CONFIG_6xx
+	extern int powersave_nap;
+#endif
+	struct device_node *cpus, *firstcpu;
+	int i, ncpus = 0, boot_cpu = -1;
+	u32 *tbprop = NULL;
+
+	if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
+	cpus = firstcpu = find_type_devices("cpu");
+	while(cpus != NULL) {
+		u32 *regprop = (u32 *)get_property(cpus, "reg", NULL);
+		char *stateprop = (char *)get_property(cpus, "state", NULL);
+		if (regprop != NULL && stateprop != NULL &&
+		    !strncmp(stateprop, "running", 7))
+			boot_cpu = *regprop;
+		++ncpus;
+		cpus = cpus->next;
+	}
+	if (boot_cpu == -1)
+		printk(KERN_WARNING "Couldn't detect boot CPU !\n");
+	if (boot_cpu != 0)
+		printk(KERN_WARNING "Boot CPU is %d, unsupported setup !\n", boot_cpu);
+
+	if (machine_is_compatible("MacRISC4")) {
+		extern struct smp_ops_t core99_smp_ops;
+
+		core99_smp_ops.take_timebase = smp_generic_take_timebase;
+		core99_smp_ops.give_timebase = smp_generic_give_timebase;
+	} else {
+		if (firstcpu != NULL)
+			tbprop = (u32 *)get_property(firstcpu, "timebase-enable", NULL);
+		if (tbprop)
+			core99_tb_gpio = *tbprop;
+		else
+			core99_tb_gpio = KL_GPIO_TB_ENABLE;
+	}
+
+	if (ncpus > 1) {
+		mpic_request_ipis();
+		for (i = 1; i < ncpus; ++i)
+			smp_hw_index[i] = i;
+#ifdef CONFIG_6xx
+		powersave_nap = 0;
+#endif
+		core99_init_caches(0);
+	}
+
+	return ncpus;
+}
+
+static void __devinit smp_core99_kick_cpu(int nr)
+{
+	unsigned long save_vector, new_vector;
+	unsigned long flags;
+
+	volatile unsigned long *vector
+		 = ((volatile unsigned long *)(KERNELBASE+0x100));
+	if (nr < 0 || nr > 3)
+		return;
+	if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
+
+	local_irq_save(flags);
+	local_irq_disable();
+
+	/* Save reset vector */
+	save_vector = *vector;
+
+	/* Setup fake reset vector that does	
+	 *   b __secondary_start_pmac_0 + nr*8 - KERNELBASE
+	 */
+	new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8;
+	*vector = 0x48000002 + new_vector - KERNELBASE;
+
+	/* flush data cache and inval instruction cache */
+	flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
+
+	/* Put some life in our friend */
+	pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
+
+	/* FIXME: We wait a bit for the CPU to take the exception, I should
+	 * instead wait for the entry code to set something for me. Well,
+	 * ideally, all that crap will be done in prom.c and the CPU left
+	 * in a RAM-based wait loop like CHRP.
+	 */
+	mdelay(1);
+
+	/* Restore our exception vector */
+	*vector = save_vector;
+	flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
+
+	local_irq_restore(flags);
+	if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
+}
+
+static void __devinit smp_core99_setup_cpu(int cpu_nr)
+{
+	/* Setup L2/L3 */
+	if (cpu_nr != 0)
+		core99_init_caches(cpu_nr);
+
+	/* Setup openpic */
+	mpic_setup_this_cpu();
+
+	if (cpu_nr == 0) {
+#ifdef CONFIG_POWER4
+		extern void g5_phy_disable_cpu1(void);
+
+		/* If we didn't start the second CPU, we must take
+		 * it off the bus
+		 */
+		if (machine_is_compatible("MacRISC4") &&
+		    num_online_cpus() < 2)		
+			g5_phy_disable_cpu1();
+#endif /* CONFIG_POWER4 */
+		if (ppc_md.progress) ppc_md.progress("core99_setup_cpu 0 done", 0x349);
+	}
+}
+
+/* not __init, called in sleep/wakeup code */
+void smp_core99_take_timebase(void)
+{
+	unsigned long flags;
+
+	/* tell the primary we're here */
+	sec_tb_reset = 1;
+	mb();
+
+	/* wait for the primary to set pri_tb_hi/lo */
+	while (sec_tb_reset < 2)
+		mb();
+
+	/* set our stuff the same as the primary */
+	local_irq_save(flags);
+	set_dec(1);
+	set_tb(pri_tb_hi, pri_tb_lo);
+	last_jiffy_stamp(smp_processor_id()) = pri_tb_stamp;
+	mb();
+
+	/* tell the primary we're done */
+       	sec_tb_reset = 0;
+	mb();
+	local_irq_restore(flags);
+}
+
+/* not __init, called in sleep/wakeup code */
+void smp_core99_give_timebase(void)
+{
+	unsigned long flags;
+	unsigned int t;
+
+	/* wait for the secondary to be in take_timebase */
+	for (t = 100000; t > 0 && !sec_tb_reset; --t)
+		udelay(10);
+	if (!sec_tb_reset) {
+		printk(KERN_WARNING "Timeout waiting sync on second CPU\n");
+		return;
+	}
+
+	/* freeze the timebase and read it */
+	/* disable interrupts so the timebase is disabled for the
+	   shortest possible time */
+	local_irq_save(flags);
+	pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
+	pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
+	mb();
+	pri_tb_hi = get_tbu();
+	pri_tb_lo = get_tbl();
+	pri_tb_stamp = last_jiffy_stamp(smp_processor_id());
+	mb();
+
+	/* tell the secondary we're ready */
+	sec_tb_reset = 2;
+	mb();
+
+	/* wait for the secondary to have taken it */
+	for (t = 100000; t > 0 && sec_tb_reset; --t)
+		udelay(10);
+	if (sec_tb_reset)
+		printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
+	else
+		smp_tb_synchronized = 1;
+
+	/* Now, restart the timebase by leaving the GPIO to an open collector */
+       	pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
+        pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
+	local_irq_restore(flags);
+}
+
+void smp_core99_message_pass(int target, int msg, unsigned long data, int wait)
+{
+	cpumask_t mask = CPU_MASK_ALL;
+	/* make sure we're sending something that translates to an IPI */
+	if (msg > 0x3) {
+		printk("SMP %d: smp_message_pass: unknown msg %d\n",
+		       smp_processor_id(), msg);
+		return;
+	}
+	switch (target) {
+	case MSG_ALL:
+		mpic_send_ipi(msg, mask);
+		break;
+	case MSG_ALL_BUT_SELF:
+		cpu_clear(smp_processor_id(), mask);
+		mpic_send_ipi(msg, mask);
+		break;
+	default:
+		mpic_send_ipi(msg, cpumask_of_cpu(target));
+		break;
+	}
+}
+
+
+/* PowerSurge-style Macs */
+struct smp_ops_t psurge_smp_ops = {
+	.message_pass	= smp_psurge_message_pass,
+	.probe		= smp_psurge_probe,
+	.kick_cpu	= smp_psurge_kick_cpu,
+	.setup_cpu	= smp_psurge_setup_cpu,
+	.give_timebase	= smp_psurge_give_timebase,
+	.take_timebase	= smp_psurge_take_timebase,
+};
+
+/* Core99 Macs (dual G4s) */
+struct smp_ops_t core99_smp_ops = {
+	.message_pass	= smp_core99_message_pass,
+	.probe		= smp_core99_probe,
+	.kick_cpu	= smp_core99_kick_cpu,
+	.setup_cpu	= smp_core99_setup_cpu,
+	.give_timebase	= smp_core99_give_timebase,
+	.take_timebase	= smp_core99_take_timebase,
+};
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+int __cpu_disable(void)
+{
+	cpu_clear(smp_processor_id(), cpu_online_map);
+
+	/* XXX reset cpu affinity here */
+	openpic_set_priority(0xf);
+	asm volatile("mtdec %0" : : "r" (0x7fffffff));
+	mb();
+	udelay(20);
+	asm volatile("mtdec %0" : : "r" (0x7fffffff));
+	return 0;
+}
+
+extern void low_cpu_die(void) __attribute__((noreturn)); /* in pmac_sleep.S */
+static int cpu_dead[NR_CPUS];
+
+void cpu_die(void)
+{
+	local_irq_disable();
+	cpu_dead[smp_processor_id()] = 1;
+	mb();
+	low_cpu_die();
+}
+
+void __cpu_die(unsigned int cpu)
+{
+	int timeout;
+
+	timeout = 1000;
+	while (!cpu_dead[cpu]) {
+		if (--timeout == 0) {
+			printk("CPU %u refused to die!\n", cpu);
+			break;
+		}
+		msleep(1);
+	}
+	cpu_callin_map[cpu] = 0;
+	cpu_dead[cpu] = 0;
+}
+
+#endif
diff --git a/arch/powerpc/platforms/powermac/pmac_time.c b/arch/powerpc/platforms/powermac/pmac_time.c
new file mode 100644
index 0000000..ff6adff
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pmac_time.c
@@ -0,0 +1,291 @@
+/*
+ * Support for periodic interrupts (100 per second) and for getting
+ * the current time from the RTC on Power Macintoshes.
+ *
+ * We use the decrementer register for our periodic interrupts.
+ *
+ * Paul Mackerras	August 1996.
+ * Copyright (C) 1996 Paul Mackerras.
+ */
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/adb.h>
+#include <linux/cuda.h>
+#include <linux/pmu.h>
+#include <linux/hardirq.h>
+
+#include <asm/sections.h>
+#include <asm/prom.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/machdep.h>
+#include <asm/time.h>
+#include <asm/nvram.h>
+
+/* Apparently the RTC stores seconds since 1 Jan 1904 */
+#define RTC_OFFSET	2082844800
+
+/*
+ * Calibrate the decrementer frequency with the VIA timer 1.
+ */
+#define VIA_TIMER_FREQ_6	4700000	/* time 1 frequency * 6 */
+
+/* VIA registers */
+#define RS		0x200		/* skip between registers */
+#define T1CL		(4*RS)		/* Timer 1 ctr/latch (low 8 bits) */
+#define T1CH		(5*RS)		/* Timer 1 counter (high 8 bits) */
+#define T1LL		(6*RS)		/* Timer 1 latch (low 8 bits) */
+#define T1LH		(7*RS)		/* Timer 1 latch (high 8 bits) */
+#define ACR		(11*RS)		/* Auxiliary control register */
+#define IFR		(13*RS)		/* Interrupt flag register */
+
+/* Bits in ACR */
+#define T1MODE		0xc0		/* Timer 1 mode */
+#define T1MODE_CONT	0x40		/*  continuous interrupts */
+
+/* Bits in IFR and IER */
+#define T1_INT		0x40		/* Timer 1 interrupt */
+
+extern struct timezone sys_tz;
+
+long __init
+pmac_time_init(void)
+{
+#ifdef CONFIG_NVRAM
+	s32 delta = 0;
+	int dst;
+	
+	delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16;
+	delta |= ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xa)) << 8;
+	delta |= pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xb);
+	if (delta & 0x00800000UL)
+		delta |= 0xFF000000UL;
+	dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0);
+	printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60,
+		dst ? "on" : "off");
+	return delta;
+#else
+	return 0;
+#endif
+}
+
+unsigned long
+pmac_get_rtc_time(void)
+{
+#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU)
+	struct adb_request req;
+	unsigned long now;
+#endif
+
+	/* Get the time from the RTC */
+	switch (sys_ctrler) {
+#ifdef CONFIG_ADB_CUDA
+	case SYS_CTRLER_CUDA:
+		if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
+			return 0;
+		while (!req.complete)
+			cuda_poll();
+		if (req.reply_len != 7)
+			printk(KERN_ERR "pmac_get_rtc_time: got %d byte reply\n",
+			       req.reply_len);
+		now = (req.reply[3] << 24) + (req.reply[4] << 16)
+			+ (req.reply[5] << 8) + req.reply[6];
+		return now - RTC_OFFSET;
+#endif /* CONFIG_ADB_CUDA */
+#ifdef CONFIG_ADB_PMU
+	case SYS_CTRLER_PMU:
+		if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
+			return 0;
+		while (!req.complete)
+			pmu_poll();
+		if (req.reply_len != 4)
+			printk(KERN_ERR "pmac_get_rtc_time: got %d byte reply\n",
+			       req.reply_len);
+		now = (req.reply[0] << 24) + (req.reply[1] << 16)
+			+ (req.reply[2] << 8) + req.reply[3];
+		return now - RTC_OFFSET;
+#endif /* CONFIG_ADB_PMU */
+	default: ;
+	}
+	return 0;
+}
+
+int
+pmac_set_rtc_time(unsigned long nowtime)
+{
+#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU)
+	struct adb_request req;
+#endif
+
+	nowtime += RTC_OFFSET;
+
+	switch (sys_ctrler) {
+#ifdef CONFIG_ADB_CUDA
+	case SYS_CTRLER_CUDA:
+		if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
+				 nowtime >> 24, nowtime >> 16, nowtime >> 8, nowtime) < 0)
+			return 0;
+		while (!req.complete)
+			cuda_poll();
+		if ((req.reply_len != 3) && (req.reply_len != 7))
+			printk(KERN_ERR "pmac_set_rtc_time: got %d byte reply\n",
+			       req.reply_len);
+		return 1;
+#endif /* CONFIG_ADB_CUDA */
+#ifdef CONFIG_ADB_PMU
+	case SYS_CTRLER_PMU:
+		if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
+				nowtime >> 24, nowtime >> 16, nowtime >> 8, nowtime) < 0)
+			return 0;
+		while (!req.complete)
+			pmu_poll();
+		if (req.reply_len != 0)
+			printk(KERN_ERR "pmac_set_rtc_time: got %d byte reply\n",
+			       req.reply_len);
+		return 1;
+#endif /* CONFIG_ADB_PMU */
+	default:
+		return 0;
+	}
+}
+
+/*
+ * Calibrate the decrementer register using VIA timer 1.
+ * This is used both on powermacs and CHRP machines.
+ */
+int __init
+via_calibrate_decr(void)
+{
+	struct device_node *vias;
+	volatile unsigned char __iomem *via;
+	int count = VIA_TIMER_FREQ_6 / 100;
+	unsigned int dstart, dend;
+
+	vias = find_devices("via-cuda");
+	if (vias == 0)
+		vias = find_devices("via-pmu");
+	if (vias == 0)
+		vias = find_devices("via");
+	if (vias == 0 || vias->n_addrs == 0)
+		return 0;
+	via = ioremap(vias->addrs[0].address, vias->addrs[0].size);
+
+	/* set timer 1 for continuous interrupts */
+	out_8(&via[ACR], (via[ACR] & ~T1MODE) | T1MODE_CONT);
+	/* set the counter to a small value */
+	out_8(&via[T1CH], 2);
+	/* set the latch to `count' */
+	out_8(&via[T1LL], count);
+	out_8(&via[T1LH], count >> 8);
+	/* wait until it hits 0 */
+	while ((in_8(&via[IFR]) & T1_INT) == 0)
+		;
+	dstart = get_dec();
+	/* clear the interrupt & wait until it hits 0 again */
+	in_8(&via[T1CL]);
+	while ((in_8(&via[IFR]) & T1_INT) == 0)
+		;
+	dend = get_dec();
+
+	tb_ticks_per_jiffy = (dstart - dend) / (6 * (HZ/100));
+	tb_to_us = mulhwu_scale_factor(dstart - dend, 60000);
+
+	printk(KERN_INFO "via_calibrate_decr: ticks per jiffy = %u (%u ticks)\n",
+	       tb_ticks_per_jiffy, dstart - dend);
+
+	iounmap(via);
+	
+	return 1;
+}
+
+#ifdef CONFIG_PM
+/*
+ * Reset the time after a sleep.
+ */
+static int
+time_sleep_notify(struct pmu_sleep_notifier *self, int when)
+{
+	static unsigned long time_diff;
+	unsigned long flags;
+	unsigned long seq;
+
+	switch (when) {
+	case PBOOK_SLEEP_NOW:
+		do {
+			seq = read_seqbegin_irqsave(&xtime_lock, flags);
+			time_diff = xtime.tv_sec - pmac_get_rtc_time();
+		} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+		break;
+	case PBOOK_WAKE:
+		write_seqlock_irqsave(&xtime_lock, flags);
+		xtime.tv_sec = pmac_get_rtc_time() + time_diff;
+		xtime.tv_nsec = 0;
+		last_rtc_update = xtime.tv_sec;
+		write_sequnlock_irqrestore(&xtime_lock, flags);
+		break;
+	}
+	return PBOOK_SLEEP_OK;
+}
+
+static struct pmu_sleep_notifier time_sleep_notifier = {
+	time_sleep_notify, SLEEP_LEVEL_MISC,
+};
+#endif /* CONFIG_PM */
+
+/*
+ * Query the OF and get the decr frequency.
+ * This was taken from the pmac time_init() when merging the prep/pmac
+ * time functions.
+ */
+void __init
+pmac_calibrate_decr(void)
+{
+	struct device_node *cpu;
+	unsigned int freq, *fp;
+
+#ifdef CONFIG_PM
+	pmu_register_sleep_notifier(&time_sleep_notifier);
+#endif /* CONFIG_PM */
+
+	/* We assume MacRISC2 machines have correct device-tree
+	 * calibration. That's better since the VIA itself seems
+	 * to be slightly off. --BenH
+	 */
+	if (!machine_is_compatible("MacRISC2") &&
+	    !machine_is_compatible("MacRISC3") &&
+	    !machine_is_compatible("MacRISC4"))
+		if (via_calibrate_decr())
+			return;
+
+	/* Special case: QuickSilver G4s seem to have a badly calibrated
+	 * timebase-frequency in OF, VIA is much better on these. We should
+	 * probably implement calibration based on the KL timer on these
+	 * machines anyway... -BenH
+	 */
+	if (machine_is_compatible("PowerMac3,5"))
+		if (via_calibrate_decr())
+			return;
+	/*
+	 * The cpu node should have a timebase-frequency property
+	 * to tell us the rate at which the decrementer counts.
+	 */
+	cpu = find_type_devices("cpu");
+	if (cpu == 0)
+		panic("can't find cpu node in time_init");
+	fp = (unsigned int *) get_property(cpu, "timebase-frequency", NULL);
+	if (fp == 0)
+		panic("can't get cpu timebase frequency");
+	freq = *fp;
+	printk("time_init: decrementer frequency = %u.%.6u MHz\n",
+	       freq/1000000, freq%1000000);
+	tb_ticks_per_jiffy = freq / HZ;
+	tb_to_us = mulhwu_scale_factor(freq, 1000000);
+}
diff --git a/arch/powerpc/platforms/prep/Kconfig b/arch/powerpc/platforms/prep/Kconfig
new file mode 100644
index 0000000..673ac47
--- /dev/null
+++ b/arch/powerpc/platforms/prep/Kconfig
@@ -0,0 +1,22 @@
+
+config PREP_RESIDUAL
+	bool "Support for PReP Residual Data"
+	depends on PPC_PREP
+	help
+	  Some PReP systems have residual data passed to the kernel by the
+	  firmware.  This allows detection of memory size, devices present and
+	  other useful pieces of information.  Sometimes this information is
+	  not present or incorrect, in which case it could lead to the machine 
+	  behaving incorrectly.  If this happens, either disable PREP_RESIDUAL
+	  or pass the 'noresidual' option to the kernel.
+
+	  If you are running a PReP system, say Y here, otherwise say N.
+
+config PROC_PREPRESIDUAL
+	bool "Support for reading of PReP Residual Data in /proc"
+	depends on PREP_RESIDUAL && PROC_FS
+	help
+	  Enabling this option will create a /proc/residual file which allows
+	  you to get at the residual data on PReP systems.  You will need a tool
+	  (lsresidual) to parse it.  If you aren't on a PReP system, you don't
+	  want this.
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
new file mode 100644
index 0000000..7a3b6fc
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -0,0 +1,47 @@
+
+config PPC_SPLPAR
+	depends on PPC_PSERIES
+	bool "Support for shared-processor logical partitions"
+	default n
+	help
+	  Enabling this option will make the kernel run more efficiently
+	  on logically-partitioned pSeries systems which use shared
+	  processors, that is, which share physical processors between
+	  two or more partitions.
+
+config HMT
+	bool "Hardware multithreading"
+	depends on SMP && PPC_PSERIES && BROKEN
+	help
+	  This option enables hardware multithreading on RS64 cpus.
+	  pSeries systems p620 and p660 have such a cpu type.
+
+config EEH
+	bool "PCI Extended Error Handling (EEH)" if EMBEDDED
+	depends on PPC_PSERIES
+	default y if !EMBEDDED
+
+config PPC_RTAS
+	bool
+	depends on PPC_PSERIES || PPC_BPA
+	default y
+
+config RTAS_PROC
+	bool "Proc interface to RTAS"
+	depends on PPC_RTAS
+	default y
+
+config RTAS_FLASH
+	tristate "Firmware flash interface"
+	depends on PPC64 && RTAS_PROC
+
+config SCANLOG
+	tristate "Scanlog dump interface"
+	depends on RTAS_PROC && PPC_PSERIES
+
+config LPARCFG
+	tristate "LPAR Configuration Data"
+	depends on PPC_PSERIES || PPC_ISERIES
+	help
+	Provide system capacity information via human readable
+	<key word>=<value> pairs through a /proc/ppc64/lparcfg interface.
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
new file mode 100644
index 0000000..26bdcd9
--- /dev/null
+++ b/arch/powerpc/sysdev/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MPIC)	+= mpic.o
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
new file mode 100644
index 0000000..c660e7d
--- /dev/null
+++ b/arch/powerpc/sysdev/mpic.c
@@ -0,0 +1,904 @@
+/*
+ *  arch/powerpc/kernel/mpic.c
+ *
+ *  Driver for interrupt controllers following the OpenPIC standard, the
+ *  common implementation beeing IBM's MPIC. This driver also can deal
+ *  with various broken implementations of this HW.
+ *
+ *  Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
+ *
+ *  This file is subject to the terms and conditions of the GNU General Public
+ *  License.  See the file COPYING in the main directory of this archive
+ *  for more details.
+ */
+
+#undef DEBUG
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/bootmem.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+
+#include <asm/ptrace.h>
+#include <asm/signal.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/machdep.h>
+#include <asm/mpic.h>
+#include <asm/smp.h>
+
+#ifdef DEBUG
+#define DBG(fmt...) printk(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+static struct mpic *mpics;
+static struct mpic *mpic_primary;
+static DEFINE_SPINLOCK(mpic_lock);
+
+
+/*
+ * Register accessor functions
+ */
+
+
+static inline u32 _mpic_read(unsigned int be, volatile u32 __iomem *base,
+			    unsigned int reg)
+{
+	if (be)
+		return in_be32(base + (reg >> 2));
+	else
+		return in_le32(base + (reg >> 2));
+}
+
+static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base,
+			      unsigned int reg, u32 value)
+{
+	if (be)
+		out_be32(base + (reg >> 2), value);
+	else
+		out_le32(base + (reg >> 2), value);
+}
+
+static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
+{
+	unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0;
+	unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
+
+	if (mpic->flags & MPIC_BROKEN_IPI)
+		be = !be;
+	return _mpic_read(be, mpic->gregs, offset);
+}
+
+static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
+{
+	unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
+
+	_mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value);
+}
+
+static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
+{
+	unsigned int cpu = 0;
+
+	if (mpic->flags & MPIC_PRIMARY)
+		cpu = hard_smp_processor_id();
+
+	return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg);
+}
+
+static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
+{
+	unsigned int cpu = 0;
+
+	if (mpic->flags & MPIC_PRIMARY)
+		cpu = hard_smp_processor_id();
+
+	_mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg, value);
+}
+
+static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg)
+{
+	unsigned int	isu = src_no >> mpic->isu_shift;
+	unsigned int	idx = src_no & mpic->isu_mask;
+
+	return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
+			  reg + (idx * MPIC_IRQ_STRIDE));
+}
+
+static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
+				   unsigned int reg, u32 value)
+{
+	unsigned int	isu = src_no >> mpic->isu_shift;
+	unsigned int	idx = src_no & mpic->isu_mask;
+
+	_mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
+		    reg + (idx * MPIC_IRQ_STRIDE), value);
+}
+
+#define mpic_read(b,r)		_mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r))
+#define mpic_write(b,r,v)	_mpic_write(mpic->flags & MPIC_BIG_ENDIAN,(b),(r),(v))
+#define mpic_ipi_read(i)	_mpic_ipi_read(mpic,(i))
+#define mpic_ipi_write(i,v)	_mpic_ipi_write(mpic,(i),(v))
+#define mpic_cpu_read(i)	_mpic_cpu_read(mpic,(i))
+#define mpic_cpu_write(i,v)	_mpic_cpu_write(mpic,(i),(v))
+#define mpic_irq_read(s,r)	_mpic_irq_read(mpic,(s),(r))
+#define mpic_irq_write(s,r,v)	_mpic_irq_write(mpic,(s),(r),(v))
+
+
+/*
+ * Low level utility functions
+ */
+
+
+
+/* Check if we have one of those nice broken MPICs with a flipped endian on
+ * reads from IPI registers
+ */
+static void __init mpic_test_broken_ipi(struct mpic *mpic)
+{
+	u32 r;
+
+	mpic_write(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0, MPIC_VECPRI_MASK);
+	r = mpic_read(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0);
+
+	if (r == le32_to_cpu(MPIC_VECPRI_MASK)) {
+		printk(KERN_INFO "mpic: Detected reversed IPI registers\n");
+		mpic->flags |= MPIC_BROKEN_IPI;
+	}
+}
+
+#ifdef CONFIG_MPIC_BROKEN_U3
+
+/* Test if an interrupt is sourced from HyperTransport (used on broken U3s)
+ * to force the edge setting on the MPIC and do the ack workaround.
+ */
+static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source_no)
+{
+	if (source_no >= 128 || !mpic->fixups)
+		return 0;
+	return mpic->fixups[source_no].base != NULL;
+}
+
+static inline void mpic_apic_end_irq(struct mpic *mpic, unsigned int source_no)
+{
+	struct mpic_irq_fixup *fixup = &mpic->fixups[source_no];
+	u32 tmp;
+
+	spin_lock(&mpic->fixup_lock);
+	writeb(0x11 + 2 * fixup->irq, fixup->base);
+	tmp = readl(fixup->base + 2);
+	writel(tmp | 0x80000000ul, fixup->base + 2);
+	/* config writes shouldn't be posted but let's be safe ... */
+	(void)readl(fixup->base + 2);
+	spin_unlock(&mpic->fixup_lock);
+}
+
+
+static void __init mpic_amd8111_read_irq(struct mpic *mpic, u8 __iomem *devbase)
+{
+	int i, irq;
+	u32 tmp;
+
+	printk(KERN_INFO "mpic:    - Workarounds on AMD 8111 @ %p\n", devbase);
+
+	for (i=0; i < 24; i++) {
+		writeb(0x10 + 2*i, devbase + 0xf2);
+		tmp = readl(devbase + 0xf4);
+		if ((tmp & 0x1) || !(tmp & 0x20))
+			continue;
+		irq = (tmp >> 16) & 0xff;
+		mpic->fixups[irq].irq = i;
+		mpic->fixups[irq].base = devbase + 0xf2;
+	}
+}
+ 
+static void __init mpic_amd8131_read_irq(struct mpic *mpic, u8 __iomem *devbase)
+{
+	int i, irq;
+	u32 tmp;
+
+	printk(KERN_INFO "mpic:    - Workarounds on AMD 8131 @ %p\n", devbase);
+
+	for (i=0; i < 4; i++) {
+		writeb(0x10 + 2*i, devbase + 0xba);
+		tmp = readl(devbase + 0xbc);
+		if ((tmp & 0x1) || !(tmp & 0x20))
+			continue;
+		irq = (tmp >> 16) & 0xff;
+		mpic->fixups[irq].irq = i;
+		mpic->fixups[irq].base = devbase + 0xba;
+	}
+}
+ 
+static void __init mpic_scan_ioapics(struct mpic *mpic)
+{
+	unsigned int devfn;
+	u8 __iomem *cfgspace;
+
+	printk(KERN_INFO "mpic: Setting up IO-APICs workarounds for U3\n");
+
+	/* Allocate fixups array */
+	mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup));
+	BUG_ON(mpic->fixups == NULL);
+	memset(mpic->fixups, 0, 128 * sizeof(struct mpic_irq_fixup));
+
+	/* Init spinlock */
+	spin_lock_init(&mpic->fixup_lock);
+
+	/* Map u3 config space. We assume all IO-APICs are on the primary bus
+	 * and slot will never be above "0xf" so we only need to map 32k
+	 */
+	cfgspace = (unsigned char __iomem *)ioremap(0xf2000000, 0x8000);
+	BUG_ON(cfgspace == NULL);
+
+	/* Now we scan all slots. We do a very quick scan, we read the header type,
+	 * vendor ID and device ID only, that's plenty enough
+	 */
+	for (devfn = 0; devfn < PCI_DEVFN(0x10,0); devfn ++) {
+		u8 __iomem *devbase = cfgspace + (devfn << 8);
+		u8 hdr_type = readb(devbase + PCI_HEADER_TYPE);
+		u32 l = readl(devbase + PCI_VENDOR_ID);
+		u16 vendor_id, device_id;
+		int multifunc = 0;
+
+		DBG("devfn %x, l: %x\n", devfn, l);
+
+		/* If no device, skip */
+		if (l == 0xffffffff || l == 0x00000000 ||
+		    l == 0x0000ffff || l == 0xffff0000)
+			goto next;
+
+		/* Check if it's a multifunction device (only really used
+		 * to function 0 though
+		 */
+		multifunc = !!(hdr_type & 0x80);
+		vendor_id = l & 0xffff;
+		device_id = (l >> 16) & 0xffff;
+
+		/* If a known device, go to fixup setup code */
+		if (vendor_id == PCI_VENDOR_ID_AMD && device_id == 0x7460)
+			mpic_amd8111_read_irq(mpic, devbase);
+		if (vendor_id == PCI_VENDOR_ID_AMD && device_id == 0x7450)
+			mpic_amd8131_read_irq(mpic, devbase);
+	next:
+		/* next device, if function 0 */
+		if ((PCI_FUNC(devfn) == 0) && !multifunc)
+			devfn += 7;
+	}
+}
+
+#endif /* CONFIG_MPIC_BROKEN_U3 */
+
+
+/* Find an mpic associated with a given linux interrupt */
+static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi)
+{
+	struct mpic *mpic = mpics;
+
+	while(mpic) {
+		/* search IPIs first since they may override the main interrupts */
+		if (irq >= mpic->ipi_offset && irq < (mpic->ipi_offset + 4)) {
+			if (is_ipi)
+				*is_ipi = 1;
+			return mpic;
+		}
+		if (irq >= mpic->irq_offset &&
+		    irq < (mpic->irq_offset + mpic->irq_count)) {
+			if (is_ipi)
+				*is_ipi = 0;
+			return mpic;
+		}
+		mpic = mpic -> next;
+	}
+	return NULL;
+}
+
+/* Convert a cpu mask from logical to physical cpu numbers. */
+static inline u32 mpic_physmask(u32 cpumask)
+{
+	int i;
+	u32 mask = 0;
+
+	for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1)
+		mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
+	return mask;
+}
+
+#ifdef CONFIG_SMP
+/* Get the mpic structure from the IPI number */
+static inline struct mpic * mpic_from_ipi(unsigned int ipi)
+{
+	return container_of(irq_desc[ipi].handler, struct mpic, hc_ipi);
+}
+#endif
+
+/* Get the mpic structure from the irq number */
+static inline struct mpic * mpic_from_irq(unsigned int irq)
+{
+	return container_of(irq_desc[irq].handler, struct mpic, hc_irq);
+}
+
+/* Send an EOI */
+static inline void mpic_eoi(struct mpic *mpic)
+{
+	mpic_cpu_write(MPIC_CPU_EOI, 0);
+	(void)mpic_cpu_read(MPIC_CPU_WHOAMI);
+}
+
+#ifdef CONFIG_SMP
+static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct mpic *mpic = dev_id;
+
+	smp_message_recv(irq - mpic->ipi_offset, regs);
+	return IRQ_HANDLED;
+}
+#endif /* CONFIG_SMP */
+
+/*
+ * Linux descriptor level callbacks
+ */
+
+
+static void mpic_enable_irq(unsigned int irq)
+{
+	unsigned int loops = 100000;
+	struct mpic *mpic = mpic_from_irq(irq);
+	unsigned int src = irq - mpic->irq_offset;
+
+	DBG("%s: enable_irq: %d (src %d)\n", mpic->name, irq, src);
+
+	mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
+		       mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & ~MPIC_VECPRI_MASK);
+
+	/* make sure mask gets to controller before we return to user */
+	do {
+		if (!loops--) {
+			printk(KERN_ERR "mpic_enable_irq timeout\n");
+			break;
+		}
+	} while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK);	
+}
+
+static void mpic_disable_irq(unsigned int irq)
+{
+	unsigned int loops = 100000;
+	struct mpic *mpic = mpic_from_irq(irq);
+	unsigned int src = irq - mpic->irq_offset;
+
+	DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src);
+
+	mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
+		       mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) | MPIC_VECPRI_MASK);
+
+	/* make sure mask gets to controller before we return to user */
+	do {
+		if (!loops--) {
+			printk(KERN_ERR "mpic_enable_irq timeout\n");
+			break;
+		}
+	} while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK));
+}
+
+static void mpic_end_irq(unsigned int irq)
+{
+	struct mpic *mpic = mpic_from_irq(irq);
+
+	DBG("%s: end_irq: %d\n", mpic->name, irq);
+
+	/* We always EOI on end_irq() even for edge interrupts since that
+	 * should only lower the priority, the MPIC should have properly
+	 * latched another edge interrupt coming in anyway
+	 */
+
+#ifdef CONFIG_MPIC_BROKEN_U3
+	if (mpic->flags & MPIC_BROKEN_U3) {
+		unsigned int src = irq - mpic->irq_offset;
+		if (mpic_is_ht_interrupt(mpic, src))
+			mpic_apic_end_irq(mpic, src);
+	}
+#endif /* CONFIG_MPIC_BROKEN_U3 */
+
+	mpic_eoi(mpic);
+}
+
+#ifdef CONFIG_SMP
+
+static void mpic_enable_ipi(unsigned int irq)
+{
+	struct mpic *mpic = mpic_from_ipi(irq);
+	unsigned int src = irq - mpic->ipi_offset;
+
+	DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src);
+	mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
+}
+
+static void mpic_disable_ipi(unsigned int irq)
+{
+	/* NEVER disable an IPI... that's just plain wrong! */
+}
+
+static void mpic_end_ipi(unsigned int irq)
+{
+	struct mpic *mpic = mpic_from_ipi(irq);
+
+	/*
+	 * IPIs are marked IRQ_PER_CPU. This has the side effect of
+	 * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
+	 * applying to them. We EOI them late to avoid re-entering.
+	 * We mark IPI's with SA_INTERRUPT as they must run with
+	 * irqs disabled.
+	 */
+	mpic_eoi(mpic);
+}
+
+#endif /* CONFIG_SMP */
+
+static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
+{
+	struct mpic *mpic = mpic_from_irq(irq);
+
+	cpumask_t tmp;
+
+	cpus_and(tmp, cpumask, cpu_online_map);
+
+	mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_DESTINATION,
+		       mpic_physmask(cpus_addr(tmp)[0]));	
+}
+
+
+/*
+ * Exported functions
+ */
+
+
+struct mpic * __init mpic_alloc(unsigned long phys_addr,
+				unsigned int flags,
+				unsigned int isu_size,
+				unsigned int irq_offset,
+				unsigned int irq_count,
+				unsigned int ipi_offset,
+				unsigned char *senses,
+				unsigned int senses_count,
+				const char *name)
+{
+	struct mpic	*mpic;
+	u32		reg;
+	const char	*vers;
+	int		i;
+
+	mpic = alloc_bootmem(sizeof(struct mpic));
+	if (mpic == NULL)
+		return NULL;
+	
+
+	memset(mpic, 0, sizeof(struct mpic));
+	mpic->name = name;
+
+	mpic->hc_irq.typename = name;
+	mpic->hc_irq.enable = mpic_enable_irq;
+	mpic->hc_irq.disable = mpic_disable_irq;
+	mpic->hc_irq.end = mpic_end_irq;
+	if (flags & MPIC_PRIMARY)
+		mpic->hc_irq.set_affinity = mpic_set_affinity;
+#ifdef CONFIG_SMP
+	mpic->hc_ipi.typename = name;
+	mpic->hc_ipi.enable = mpic_enable_ipi;
+	mpic->hc_ipi.disable = mpic_disable_ipi;
+	mpic->hc_ipi.end = mpic_end_ipi;
+#endif /* CONFIG_SMP */
+
+	mpic->flags = flags;
+	mpic->isu_size = isu_size;
+	mpic->irq_offset = irq_offset;
+	mpic->irq_count = irq_count;
+	mpic->ipi_offset = ipi_offset;
+	mpic->num_sources = 0; /* so far */
+	mpic->senses = senses;
+	mpic->senses_count = senses_count;
+
+	/* Map the global registers */
+	mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000);
+	mpic->tmregs = mpic->gregs + (MPIC_TIMER_BASE >> 2);
+	BUG_ON(mpic->gregs == NULL);
+
+	/* Reset */
+	if (flags & MPIC_WANTS_RESET) {
+		mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
+			   mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
+			   | MPIC_GREG_GCONF_RESET);
+		while( mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
+		       & MPIC_GREG_GCONF_RESET)
+			mb();
+	}
+
+	/* Read feature register, calculate num CPUs and, for non-ISU
+	 * MPICs, num sources as well. On ISU MPICs, sources are counted
+	 * as ISUs are added
+	 */
+	reg = mpic_read(mpic->gregs, MPIC_GREG_FEATURE_0);
+	mpic->num_cpus = ((reg & MPIC_GREG_FEATURE_LAST_CPU_MASK)
+			  >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1;
+	if (isu_size == 0)
+		mpic->num_sources = ((reg & MPIC_GREG_FEATURE_LAST_SRC_MASK)
+				     >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1;
+
+	/* Map the per-CPU registers */
+	for (i = 0; i < mpic->num_cpus; i++) {
+		mpic->cpuregs[i] = ioremap(phys_addr + MPIC_CPU_BASE +
+					   i * MPIC_CPU_STRIDE, 0x1000);
+		BUG_ON(mpic->cpuregs[i] == NULL);
+	}
+
+	/* Initialize main ISU if none provided */
+	if (mpic->isu_size == 0) {
+		mpic->isu_size = mpic->num_sources;
+		mpic->isus[0] = ioremap(phys_addr + MPIC_IRQ_BASE,
+					MPIC_IRQ_STRIDE * mpic->isu_size);
+		BUG_ON(mpic->isus[0] == NULL);
+	}
+	mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
+	mpic->isu_mask = (1 << mpic->isu_shift) - 1;
+
+	/* Display version */
+	switch (reg & MPIC_GREG_FEATURE_VERSION_MASK) {
+	case 1:
+		vers = "1.0";
+		break;
+	case 2:
+		vers = "1.2";
+		break;
+	case 3:
+		vers = "1.3";
+		break;
+	default:
+		vers = "<unknown>";
+		break;
+	}
+	printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %lx, max %d CPUs\n",
+	       name, vers, phys_addr, mpic->num_cpus);
+	printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size,
+	       mpic->isu_shift, mpic->isu_mask);
+
+	mpic->next = mpics;
+	mpics = mpic;
+
+	if (flags & MPIC_PRIMARY)
+		mpic_primary = mpic;
+
+	return mpic;
+}
+
+void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
+			    unsigned long phys_addr)
+{
+	unsigned int isu_first = isu_num * mpic->isu_size;
+
+	BUG_ON(isu_num >= MPIC_MAX_ISU);
+
+	mpic->isus[isu_num] = ioremap(phys_addr, MPIC_IRQ_STRIDE * mpic->isu_size);
+	if ((isu_first + mpic->isu_size) > mpic->num_sources)
+		mpic->num_sources = isu_first + mpic->isu_size;
+}
+
+void __init mpic_setup_cascade(unsigned int irq, mpic_cascade_t handler,
+			       void *data)
+{
+	struct mpic *mpic = mpic_find(irq, NULL);
+	unsigned long flags;
+
+	/* Synchronization here is a bit dodgy, so don't try to replace cascade
+	 * interrupts on the fly too often ... but normally it's set up at boot.
+	 */
+	spin_lock_irqsave(&mpic_lock, flags);
+	if (mpic->cascade)	       
+		mpic_disable_irq(mpic->cascade_vec + mpic->irq_offset);
+	mpic->cascade = NULL;
+	wmb();
+	mpic->cascade_vec = irq - mpic->irq_offset;
+	mpic->cascade_data = data;
+	wmb();
+	mpic->cascade = handler;
+	mpic_enable_irq(irq);
+	spin_unlock_irqrestore(&mpic_lock, flags);
+}
+
+void __init mpic_init(struct mpic *mpic)
+{
+	int i;
+
+	BUG_ON(mpic->num_sources == 0);
+
+	printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);
+
+	/* Set current processor priority to max */
+	mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
+
+	/* Initialize timers: just disable them all */
+	for (i = 0; i < 4; i++) {
+		mpic_write(mpic->tmregs,
+			   i * MPIC_TIMER_STRIDE + MPIC_TIMER_DESTINATION, 0);
+		mpic_write(mpic->tmregs,
+			   i * MPIC_TIMER_STRIDE + MPIC_TIMER_VECTOR_PRI,
+			   MPIC_VECPRI_MASK |
+			   (MPIC_VEC_TIMER_0 + i));
+	}
+
+	/* Initialize IPIs to our reserved vectors and mark them disabled for now */
+	mpic_test_broken_ipi(mpic);
+	for (i = 0; i < 4; i++) {
+		mpic_ipi_write(i,
+			       MPIC_VECPRI_MASK |
+			       (10 << MPIC_VECPRI_PRIORITY_SHIFT) |
+			       (MPIC_VEC_IPI_0 + i));
+#ifdef CONFIG_SMP
+		if (!(mpic->flags & MPIC_PRIMARY))
+			continue;
+		irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU;
+		irq_desc[mpic->ipi_offset+i].handler = &mpic->hc_ipi;
+		
+#endif /* CONFIG_SMP */
+	}
+
+	/* Initialize interrupt sources */
+	if (mpic->irq_count == 0)
+		mpic->irq_count = mpic->num_sources;
+
+#ifdef CONFIG_MPIC_BROKEN_U3
+	/* Do the ioapic fixups on U3 broken mpic */
+	DBG("MPIC flags: %x\n", mpic->flags);
+	if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY))
+		mpic_scan_ioapics(mpic);
+#endif /* CONFIG_MPIC_BROKEN_U3 */
+
+	for (i = 0; i < mpic->num_sources; i++) {
+		/* start with vector = source number, and masked */
+		u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT);
+		int level = 0;
+		
+		/* if it's an IPI, we skip it */
+		if ((mpic->irq_offset + i) >= (mpic->ipi_offset + i) &&
+		    (mpic->irq_offset + i) <  (mpic->ipi_offset + i + 4))
+			continue;
+
+		/* do senses munging */
+		if (mpic->senses && i < mpic->senses_count) {
+			if (mpic->senses[i] & IRQ_SENSE_LEVEL)
+				vecpri |= MPIC_VECPRI_SENSE_LEVEL;
+			if (mpic->senses[i] & IRQ_POLARITY_POSITIVE)
+				vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
+		} else
+			vecpri |= MPIC_VECPRI_SENSE_LEVEL;
+
+		/* remember if it was a level interrupts */
+		level = (vecpri & MPIC_VECPRI_SENSE_LEVEL);
+
+		/* deal with broken U3 */
+		if (mpic->flags & MPIC_BROKEN_U3) {
+#ifdef CONFIG_MPIC_BROKEN_U3
+			if (mpic_is_ht_interrupt(mpic, i)) {
+				vecpri &= ~(MPIC_VECPRI_SENSE_MASK |
+					    MPIC_VECPRI_POLARITY_MASK);
+				vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
+			}
+#else
+			printk(KERN_ERR "mpic: BROKEN_U3 set, but CONFIG doesn't match\n");
+#endif
+		}
+
+		DBG("setup source %d, vecpri: %08x, level: %d\n", i, vecpri,
+		    (level != 0));
+
+		/* init hw */
+		mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri);
+		mpic_irq_write(i, MPIC_IRQ_DESTINATION,
+			       1 << hard_smp_processor_id());
+
+		/* init linux descriptors */
+		if (i < mpic->irq_count) {
+			irq_desc[mpic->irq_offset+i].status = level ? IRQ_LEVEL : 0;
+			irq_desc[mpic->irq_offset+i].handler = &mpic->hc_irq;
+		}
+	}
+	
+	/* Init spurrious vector */
+	mpic_write(mpic->gregs, MPIC_GREG_SPURIOUS, MPIC_VEC_SPURRIOUS);
+
+	/* Disable 8259 passthrough */
+	mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
+		   mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
+		   | MPIC_GREG_GCONF_8259_PTHROU_DIS);
+
+	/* Set current processor priority to 0 */
+	mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
+}
+
+
+
+void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
+{
+	int is_ipi;
+	struct mpic *mpic = mpic_find(irq, &is_ipi);
+	unsigned long flags;
+	u32 reg;
+
+	spin_lock_irqsave(&mpic_lock, flags);
+	if (is_ipi) {
+		reg = mpic_ipi_read(irq - mpic->ipi_offset) & MPIC_VECPRI_PRIORITY_MASK;
+		mpic_ipi_write(irq - mpic->ipi_offset,
+			       reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
+	} else {
+		reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI)
+			& MPIC_VECPRI_PRIORITY_MASK;
+		mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI,
+			       reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
+	}
+	spin_unlock_irqrestore(&mpic_lock, flags);
+}
+
+unsigned int mpic_irq_get_priority(unsigned int irq)
+{
+	int is_ipi;
+	struct mpic *mpic = mpic_find(irq, &is_ipi);
+	unsigned long flags;
+	u32 reg;
+
+	spin_lock_irqsave(&mpic_lock, flags);
+	if (is_ipi)
+		reg = mpic_ipi_read(irq - mpic->ipi_offset);
+	else
+		reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI);
+	spin_unlock_irqrestore(&mpic_lock, flags);
+	return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT;
+}
+
+void mpic_setup_this_cpu(void)
+{
+#ifdef CONFIG_SMP
+	struct mpic *mpic = mpic_primary;
+	unsigned long flags;
+	u32 msk = 1 << hard_smp_processor_id();
+	unsigned int i;
+
+	BUG_ON(mpic == NULL);
+
+	DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
+
+	spin_lock_irqsave(&mpic_lock, flags);
+
+ 	/* let the mpic know we want intrs. default affinity is 0xffffffff
+	 * until changed via /proc. That's how it's done on x86. If we want
+	 * it differently, then we should make sure we also change the default
+	 * values of irq_affinity in irq.c.
+ 	 */
+	if (distribute_irqs) {
+	 	for (i = 0; i < mpic->num_sources ; i++)
+			mpic_irq_write(i, MPIC_IRQ_DESTINATION,
+				mpic_irq_read(i, MPIC_IRQ_DESTINATION) | msk);
+	}
+
+	/* Set current processor priority to 0 */
+	mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
+
+	spin_unlock_irqrestore(&mpic_lock, flags);
+#endif /* CONFIG_SMP */
+}
+
+int mpic_cpu_get_priority(void)
+{
+	struct mpic *mpic = mpic_primary;
+
+	return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI);
+}
+
+void mpic_cpu_set_priority(int prio)
+{
+	struct mpic *mpic = mpic_primary;
+
+	prio &= MPIC_CPU_TASKPRI_MASK;
+	mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio);
+}
+
+/*
+ * XXX: someone who knows mpic should check this.
+ * do we need to eoi the ipi including for kexec cpu here (see xics comments)?
+ * or can we reset the mpic in the new kernel?
+ */
+void mpic_teardown_this_cpu(int secondary)
+{
+	struct mpic *mpic = mpic_primary;
+	unsigned long flags;
+	u32 msk = 1 << hard_smp_processor_id();
+	unsigned int i;
+
+	BUG_ON(mpic == NULL);
+
+	DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
+	spin_lock_irqsave(&mpic_lock, flags);
+
+	/* let the mpic know we don't want intrs.  */
+	for (i = 0; i < mpic->num_sources ; i++)
+		mpic_irq_write(i, MPIC_IRQ_DESTINATION,
+			mpic_irq_read(i, MPIC_IRQ_DESTINATION) & ~msk);
+
+	/* Set current processor priority to max */
+	mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
+
+	spin_unlock_irqrestore(&mpic_lock, flags);
+}
+
+
+void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
+{
+	struct mpic *mpic = mpic_primary;
+
+	BUG_ON(mpic == NULL);
+
+	DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
+
+	mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10,
+		       mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
+}
+
+int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs)
+{
+	u32 irq;
+
+	irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK;
+	DBG("%s: get_one_irq(): %d\n", mpic->name, irq);
+
+	if (mpic->cascade && irq == mpic->cascade_vec) {
+		DBG("%s: cascading ...\n", mpic->name);
+		irq = mpic->cascade(regs, mpic->cascade_data);
+		mpic_eoi(mpic);
+		return irq;
+	}
+	if (unlikely(irq == MPIC_VEC_SPURRIOUS))
+		return -1;
+	if (irq < MPIC_VEC_IPI_0) 
+		return irq + mpic->irq_offset;
+       	DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0);
+	return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset;
+}
+
+int mpic_get_irq(struct pt_regs *regs)
+{
+	struct mpic *mpic = mpic_primary;
+
+	BUG_ON(mpic == NULL);
+
+	return mpic_get_one_irq(mpic, regs);
+}
+
+
+#ifdef CONFIG_SMP
+void mpic_request_ipis(void)
+{
+	struct mpic *mpic = mpic_primary;
+
+	BUG_ON(mpic == NULL);
+	
+	printk("requesting IPIs ... \n");
+
+	/* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
+	request_irq(mpic->ipi_offset+0, mpic_ipi_action, SA_INTERRUPT,
+		    "IPI0 (call function)", mpic);
+	request_irq(mpic->ipi_offset+1, mpic_ipi_action, SA_INTERRUPT,
+		   "IPI1 (reschedule)", mpic);
+	request_irq(mpic->ipi_offset+2, mpic_ipi_action, SA_INTERRUPT,
+		   "IPI2 (unused)", mpic);
+	request_irq(mpic->ipi_offset+3, mpic_ipi_action, SA_INTERRUPT,
+		   "IPI3 (debugger break)", mpic);
+
+	printk("IPIs requested... \n");
+}
+#endif /* CONFIG_SMP */
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 776941c..ed9c972 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -747,12 +747,12 @@
 	  on it (826x, 827x, 8560).
 
 config PPC_CHRP
-	bool
+	bool "  Common Hardware Reference Platform (CHRP) based machines"
 	depends on PPC_MULTIPLATFORM
 	default y
 
 config PPC_PMAC
-	bool
+	bool "  Apple PowerMac based machines"
 	depends on PPC_MULTIPLATFORM
 	default y
 
@@ -762,7 +762,7 @@
 	default y
 
 config PPC_PREP
-	bool
+	bool "  PowerPC Reference Platform (PReP) based machines"
 	depends on PPC_MULTIPLATFORM
 	default y
 
@@ -1368,7 +1368,7 @@
 
 source "lib/Kconfig"
 
-source "arch/ppc/oprofile/Kconfig"
+source "arch/powerpc/oprofile/Kconfig"
 
 source "arch/ppc/Kconfig.debug"
 
diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile
index 16e2675..90c7502 100644
--- a/arch/ppc/Makefile
+++ b/arch/ppc/Makefile
@@ -71,7 +71,7 @@
 drivers-$(CONFIG_4xx)		+= arch/ppc/4xx_io/
 drivers-$(CONFIG_CPM2)		+= arch/ppc/8260_io/
 
-drivers-$(CONFIG_OPROFILE)	+= arch/ppc/oprofile/
+drivers-$(CONFIG_OPROFILE)	+= arch/powerpc/oprofile/
 
 BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm
 
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
index b1457a8..467f964 100644
--- a/arch/ppc/kernel/Makefile
+++ b/arch/ppc/kernel/Makefile
@@ -1,6 +1,7 @@
 #
 # Makefile for the linux kernel.
 #
+ifneq ($(CONFIG_PPC_MERGE),y)
 
 extra-$(CONFIG_PPC_STD_MMU)	:= head.o
 extra-$(CONFIG_40x)		:= head_4xx.o
@@ -35,3 +36,25 @@
 obj-$(CONFIG_8xx)		+= softemu8xx.o
 endif
 
+# These are here while we do the architecture merge
+vecemu-y			+= ../../powerpc/kernel/vecemu.o
+
+else
+obj-y				:= entry.o irq.o idle.o time.o misc.o \
+					signal.o ptrace.o align.o \
+					syscalls.o setup.o \
+					cputable.o perfmon.o
+obj-$(CONFIG_6xx)		+= l2cr.o cpu_setup_6xx.o
+obj-$(CONFIG_SOFTWARE_SUSPEND)	+= swsusp.o
+obj-$(CONFIG_POWER4)		+= cpu_setup_power4.o
+obj-$(CONFIG_MODULES)		+= module.o
+obj-$(CONFIG_NOT_COHERENT_CACHE)	+= dma-mapping.o
+obj-$(CONFIG_PCI)		+= pci.o
+obj-$(CONFIG_KGDB)		+= ppc-stub.o
+obj-$(CONFIG_SMP)		+= smp.o smp-tbsync.o
+obj-$(CONFIG_TAU)		+= temp.o
+ifndef CONFIG_E200
+obj-$(CONFIG_FSL_BOOKE)		+= perfmon_fsl_booke.o
+endif
+obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
+endif
diff --git a/arch/ppc/kernel/cpu_setup_6xx.S b/arch/ppc/kernel/cpu_setup_6xx.S
index ba39643..a5333c0 100644
--- a/arch/ppc/kernel/cpu_setup_6xx.S
+++ b/arch/ppc/kernel/cpu_setup_6xx.S
@@ -17,8 +17,6 @@
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
 
-_GLOBAL(__setup_cpu_601)
-	blr
 _GLOBAL(__setup_cpu_603)
 	b	setup_common_caches
 _GLOBAL(__setup_cpu_604)
diff --git a/arch/ppc/kernel/cpu_setup_power4.S b/arch/ppc/kernel/cpu_setup_power4.S
index 7e4fbb6..0abb5f25 100644
--- a/arch/ppc/kernel/cpu_setup_power4.S
+++ b/arch/ppc/kernel/cpu_setup_power4.S
@@ -63,8 +63,6 @@
 	isync
 	blr
 
-_GLOBAL(__setup_cpu_power4)
-	blr
 _GLOBAL(__setup_cpu_ppc970)
 	mfspr	r0,SPRN_HID0
 	li	r11,5			/* clear DOZE and SLEEP */
diff --git a/arch/ppc/kernel/cputable.c b/arch/ppc/kernel/cputable.c
index 546e1ea..207d4dd 100644
--- a/arch/ppc/kernel/cputable.c
+++ b/arch/ppc/kernel/cputable.c
@@ -14,23 +14,22 @@
 #include <linux/sched.h>
 #include <linux/threads.h>
 #include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/oprofile_impl.h>
 #include <asm/cputable.h>
 
-struct cpu_spec* cur_cpu_spec[NR_CPUS];
+struct cpu_spec* cur_cpu_spec = NULL;
 
-extern void __setup_cpu_601(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_603(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_604(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_750(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_750cx(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_750fx(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_7400(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_7410(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_745x(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_power3(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_power4(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_ppc970(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_generic(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
+extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_750cx(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_750fx(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_7400(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_7410(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
 
 #define CLASSIC_PPC (!defined(CONFIG_8xx) && !defined(CONFIG_4xx) && \
 		     !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \
@@ -42,17 +41,6 @@
 #define COMMON_PPC	(PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \
 			 PPC_FEATURE_HAS_MMU)
 
-/* We only set the altivec features if the kernel was compiled with altivec
- * support
- */
-#ifdef CONFIG_ALTIVEC
-#define CPU_FTR_ALTIVEC_COMP		CPU_FTR_ALTIVEC
-#define PPC_FEATURE_ALTIVEC_COMP    	PPC_FEATURE_HAS_ALTIVEC
-#else
-#define CPU_FTR_ALTIVEC_COMP		0
-#define PPC_FEATURE_ALTIVEC_COMP       	0
-#endif
-
 /* We only set the spe features if the kernel was compiled with
  * spe support
  */
@@ -62,47 +50,23 @@
 #define PPC_FEATURE_SPE_COMP       	0
 #endif
 
-/* We need to mark all pages as being coherent if we're SMP or we
- * have a 74[45]x and an MPC107 host bridge.
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE)
-#define CPU_FTR_COMMON                  CPU_FTR_NEED_COHERENT
-#else
-#define CPU_FTR_COMMON                  0
-#endif
-
-/* The powersave features NAP & DOZE seems to confuse BDI when
-   debugging. So if a BDI is used, disable theses
- */
-#ifndef CONFIG_BDI_SWITCH
-#define CPU_FTR_MAYBE_CAN_DOZE	CPU_FTR_CAN_DOZE
-#define CPU_FTR_MAYBE_CAN_NAP	CPU_FTR_CAN_NAP
-#else
-#define CPU_FTR_MAYBE_CAN_DOZE	0
-#define CPU_FTR_MAYBE_CAN_NAP	0
-#endif
-
 struct cpu_spec	cpu_specs[] = {
 #if CLASSIC_PPC
 	{ 	/* 601 */
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00010000,
 		.cpu_name		= "601",
-		.cpu_features		= CPU_FTR_COMMON | CPU_FTR_601 |
-			CPU_FTR_HPTE_TABLE,
+		.cpu_features		= CPU_FTRS_PPC601,
 		.cpu_user_features 	= COMMON_PPC | PPC_FEATURE_601_INSTR |
 			PPC_FEATURE_UNIFIED_CACHE,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
-		.cpu_setup		= __setup_cpu_601
 	},
 	{	/* 603 */
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00030000,
 		.cpu_name		= "603",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-			CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP,
+		.cpu_features		= CPU_FTRS_603,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -112,9 +76,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00060000,
 		.cpu_name		= "603e",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-			CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP,
+		.cpu_features		= CPU_FTRS_603,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -124,9 +86,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00070000,
 		.cpu_name		= "603ev",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-			CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP,
+		.cpu_features		= CPU_FTRS_603,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -136,9 +96,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00040000,
 		.cpu_name		= "604",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
+		.cpu_features		= CPU_FTRS_604,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -149,9 +107,7 @@
 		.pvr_mask		= 0xfffff000,
 		.pvr_value		= 0x00090000,
 		.cpu_name		= "604e",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
+		.cpu_features		= CPU_FTRS_604,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -162,9 +118,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00090000,
 		.cpu_name		= "604r",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
+		.cpu_features		= CPU_FTRS_604,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -175,9 +129,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x000a0000,
 		.cpu_name		= "604ev",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
+		.cpu_features		= CPU_FTRS_604,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -188,10 +140,7 @@
 		.pvr_mask		= 0xffffffff,
 		.pvr_value		= 0x00084202,
 		.cpu_name		= "740/750",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-			CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_HPTE_TABLE |
-			CPU_FTR_MAYBE_CAN_NAP,
+		.cpu_features		= CPU_FTRS_740_NOTAU,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -202,10 +151,7 @@
 		.pvr_mask		= 0xfffffff0,
 		.pvr_value		= 0x00080100,
 		.cpu_name		= "750CX",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-			CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-			CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
+		.cpu_features		= CPU_FTRS_750,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -216,10 +162,7 @@
 		.pvr_mask		= 0xfffffff0,
 		.pvr_value		= 0x00082200,
 		.cpu_name		= "750CX",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-			CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-			CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
+		.cpu_features		= CPU_FTRS_750,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -230,10 +173,7 @@
 		.pvr_mask		= 0xfffffff0,
 		.pvr_value		= 0x00082210,
 		.cpu_name		= "750CXe",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-			CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-			CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
+		.cpu_features		= CPU_FTRS_750,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -244,10 +184,7 @@
 		.pvr_mask		= 0xffffffff,
 		.pvr_value		= 0x00083214,
 		.cpu_name		= "750CXe",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-			CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-			CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
+		.cpu_features		= CPU_FTRS_750,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -258,10 +195,7 @@
 		.pvr_mask		= 0xfffff000,
 		.pvr_value		= 0x00083000,
 		.cpu_name		= "745/755",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-			CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-			CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
+		.cpu_features		= CPU_FTRS_750,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -272,11 +206,7 @@
 		.pvr_mask		= 0xffffff00,
 		.pvr_value		= 0x70000100,
 		.cpu_name		= "750FX",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-			CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-			CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
-			CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM,
+		.cpu_features		= CPU_FTRS_750FX1,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -287,11 +217,7 @@
 		.pvr_mask		= 0xffffffff,
 		.pvr_value		= 0x70000200,
 		.cpu_name		= "750FX",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-			CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-			CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
-			CPU_FTR_NO_DPM,
+		.cpu_features		= CPU_FTRS_750FX2,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -302,11 +228,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x70000000,
 		.cpu_name		= "750FX",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-			CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-			CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
-			CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS,
+		.cpu_features		= CPU_FTRS_750FX,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -317,11 +239,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x70020000,
 		.cpu_name		= "750GX",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB |
-			CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE |
-			CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_DUAL_PLL_750FX |
-			CPU_FTR_HAS_HIGH_BATS,
+		.cpu_features		= CPU_FTRS_750GX,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -332,10 +250,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00080000,
 		.cpu_name		= "740/750",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-			CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-			CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
+		.cpu_features		= CPU_FTRS_740,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -346,11 +261,8 @@
 		.pvr_mask		= 0xffffffff,
 		.pvr_value		= 0x000c1101,
 		.cpu_name		= "7400 (1.1)",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-			CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
-			CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
-		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+		.cpu_features		= CPU_FTRS_7400_NOTAU,
+		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
 		.num_pmcs		= 4,
@@ -360,12 +272,8 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x000c0000,
 		.cpu_name		= "7400",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-			CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-			CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
-			CPU_FTR_MAYBE_CAN_NAP,
-		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+		.cpu_features		= CPU_FTRS_7400,
+		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
 		.num_pmcs		= 4,
@@ -375,12 +283,8 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x800c0000,
 		.cpu_name		= "7410",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-			CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
-			CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
-			CPU_FTR_MAYBE_CAN_NAP,
-		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+		.cpu_features		= CPU_FTRS_7400,
+		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
 		.num_pmcs		= 4,
@@ -390,12 +294,8 @@
 		.pvr_mask		= 0xffffffff,
 		.pvr_value		= 0x80000200,
 		.cpu_name		= "7450",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
-			CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
-			CPU_FTR_NEED_COHERENT,
-		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+		.cpu_features		= CPU_FTRS_7450_20,
+		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
 		.num_pmcs		= 6,
@@ -405,14 +305,8 @@
 		.pvr_mask		= 0xffffffff,
 		.pvr_value		= 0x80000201,
 		.cpu_name		= "7450",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
-			CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
-			CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
-			CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
-			CPU_FTR_NEED_COHERENT,
-		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+		.cpu_features		= CPU_FTRS_7450_21,
+		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
 		.num_pmcs		= 6,
@@ -422,13 +316,8 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x80000000,
 		.cpu_name		= "7450",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
-			CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
-			CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
-			CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT,
-		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+		.cpu_features		= CPU_FTRS_7450_23,
+		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
 		.num_pmcs		= 6,
@@ -438,12 +327,8 @@
 		.pvr_mask		= 0xffffff00,
 		.pvr_value		= 0x80010100,
 		.cpu_name		= "7455",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
-			CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
-			CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
-		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+		.cpu_features		= CPU_FTRS_7455_1,
+		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
 		.num_pmcs		= 6,
@@ -453,14 +338,8 @@
 		.pvr_mask		= 0xffffffff,
 		.pvr_value		= 0x80010200,
 		.cpu_name		= "7455",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
-			CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
-			CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
-			CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
-			CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS,
-		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+		.cpu_features		= CPU_FTRS_7455_20,
+		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
 		.num_pmcs		= 6,
@@ -470,14 +349,8 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x80010000,
 		.cpu_name		= "7455",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
-			CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
-			CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
-			CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
-			CPU_FTR_NEED_COHERENT,
-		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+		.cpu_features		= CPU_FTRS_7455,
+		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
 		.num_pmcs		= 6,
@@ -487,14 +360,8 @@
 		.pvr_mask		= 0xffffffff,
 		.pvr_value		= 0x80020100,
 		.cpu_name		= "7447/7457",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
-			CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
-			CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
-			CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
-			CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
-		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+		.cpu_features		= CPU_FTRS_7447_10,
+		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
 		.num_pmcs		= 6,
@@ -504,14 +371,8 @@
 		.pvr_mask		= 0xffffffff,
 		.pvr_value		= 0x80020101,
 		.cpu_name		= "7447/7457",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
-			CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
-			CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
-			CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
-			CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
-		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+		.cpu_features		= CPU_FTRS_7447_10,
+		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
 		.num_pmcs		= 6,
@@ -521,14 +382,8 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x80020000,
 		.cpu_name		= "7447/7457",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
-			CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
-			CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
-			CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
-			CPU_FTR_NEED_COHERENT,
-		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+		.cpu_features		= CPU_FTRS_7447,
+		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
 		.num_pmcs		= 6,
@@ -538,13 +393,8 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x80030000,
 		.cpu_name		= "7447A",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
-			CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
-			CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
-			CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
-		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+		.cpu_features		= CPU_FTRS_7447A,
+		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
 		.num_pmcs		= 6,
@@ -554,13 +404,8 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x80040000,
 		.cpu_name		= "7448",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
-			CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
-			CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
-			CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
-		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+		.cpu_features		= CPU_FTRS_7447A,
+		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
 		.num_pmcs		= 6,
@@ -570,9 +415,7 @@
 		.pvr_mask		= 0x7fff0000,
 		.pvr_value		= 0x00810000,
 		.cpu_name		= "82xx",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_82XX,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -582,9 +425,7 @@
 		.pvr_mask		= 0x7fff0000,
 		.pvr_value		= 0x00820000,
 		.cpu_name		= "G2_LE",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB |
-			CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
+		.cpu_features		= CPU_FTRS_G2_LE,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -594,9 +435,7 @@
 		.pvr_mask		= 0x7fff0000,
 		.pvr_value		= 0x00830000,
 		.cpu_name		= "e300",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB |
-			CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
+		.cpu_features		= CPU_FTRS_E300,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -606,13 +445,10 @@
 		.pvr_mask		= 0x00000000,
 		.pvr_value		= 0x00000000,
 		.cpu_name		= "(generic PPC)",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_HPTE_TABLE,
+		.cpu_features		= CPU_FTRS_CLASSIC32,
 		.cpu_user_features	= COMMON_PPC,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
-		.cpu_setup		= __setup_cpu_generic
 	},
 #endif /* CLASSIC_PPC */
 #ifdef CONFIG_PPC64BRIDGE
@@ -620,94 +456,50 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00400000,
 		.cpu_name		= "Power3 (630)",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_HPTE_TABLE,
+		.cpu_features		= CPU_FTRS_POWER3_32,
 		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_64,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
 		.num_pmcs		= 8,
-		.cpu_setup		= __setup_cpu_power3
 	},
 	{	/* Power3+ */
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00410000,
 		.cpu_name		= "Power3 (630+)",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_HPTE_TABLE,
+		.cpu_features		= CPU_FTRS_POWER3_32,
 		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_64,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
 		.num_pmcs		= 8,
-		.cpu_setup		= __setup_cpu_power3
 	},
 	{	/* I-star */
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00360000,
 		.cpu_name		= "I-star",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_HPTE_TABLE,
+		.cpu_features		= CPU_FTRS_POWER3_32,
 		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_64,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
 		.num_pmcs		= 8,
-		.cpu_setup		= __setup_cpu_power3
 	},
 	{	/* S-star */
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00370000,
 		.cpu_name		= "S-star",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_HPTE_TABLE,
+		.cpu_features		= CPU_FTRS_POWER3_32,
 		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_64,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
 		.num_pmcs		= 8,
-		.cpu_setup		= __setup_cpu_power3
 	},
 #endif /* CONFIG_PPC64BRIDGE */
 #ifdef CONFIG_POWER4
-	{	/* Power4 */
-		.pvr_mask		= 0xffff0000,
-		.pvr_value		= 0x00350000,
-		.cpu_name		= "Power4",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_HPTE_TABLE,
-		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_64,
-		.icache_bsize		= 128,
-		.dcache_bsize		= 128,
-		.num_pmcs		= 8,
-		.cpu_setup		= __setup_cpu_power4
-	},
-	{	/* PPC970 */
-		.pvr_mask		= 0xffff0000,
-		.pvr_value		= 0x00390000,
-		.cpu_name		= "PPC970",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_HPTE_TABLE |
-			CPU_FTR_ALTIVEC_COMP | CPU_FTR_MAYBE_CAN_NAP,
-		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_64 |
-			PPC_FEATURE_ALTIVEC_COMP,
-		.icache_bsize		= 128,
-		.dcache_bsize		= 128,
-		.num_pmcs		= 8,
-		.cpu_setup		= __setup_cpu_ppc970
-	},
 	{	/* PPC970FX */
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x003c0000,
 		.cpu_name		= "PPC970FX",
-		.cpu_features		= CPU_FTR_COMMON |
-			CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
-			CPU_FTR_HPTE_TABLE |
-			CPU_FTR_ALTIVEC_COMP | CPU_FTR_MAYBE_CAN_NAP,
-		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_64 |
-			PPC_FEATURE_ALTIVEC_COMP,
+		.cpu_features		= CPU_FTRS_970_32,
+		.cpu_user_features	= COMMON_PPC | PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
 		.num_pmcs		= 8,
@@ -721,8 +513,7 @@
 		.cpu_name		= "8xx",
 		/* CPU_FTR_MAYBE_CAN_DOZE is possible,
 		 * if the 8xx code is there.... */
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_8XX,
 		.cpu_user_features	= PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
 		.icache_bsize		= 16,
 		.dcache_bsize		= 16,
@@ -733,8 +524,7 @@
 		.pvr_mask		= 0xffffff00,
 		.pvr_value		= 0x00200200,
 		.cpu_name		= "403GC",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_40X,
 		.cpu_user_features	= PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
 		.icache_bsize		= 16,
 		.dcache_bsize		= 16,
@@ -743,8 +533,7 @@
 		.pvr_mask		= 0xffffff00,
 		.pvr_value		= 0x00201400,
 		.cpu_name		= "403GCX",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_40X,
 		.cpu_user_features	= PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
 		.icache_bsize		= 16,
 		.dcache_bsize		= 16,
@@ -753,8 +542,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00200000,
 		.cpu_name		= "403G ??",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_40X,
 		.cpu_user_features	= PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
 		.icache_bsize		= 16,
 		.dcache_bsize		= 16,
@@ -763,8 +551,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x40110000,
 		.cpu_name		= "405GP",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_40X,
 		.cpu_user_features	= PPC_FEATURE_32 |
 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
 		.icache_bsize		= 32,
@@ -774,8 +561,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x40130000,
 		.cpu_name		= "STB03xxx",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_40X,
 		.cpu_user_features	= PPC_FEATURE_32 |
 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
 		.icache_bsize		= 32,
@@ -785,8 +571,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x41810000,
 		.cpu_name		= "STB04xxx",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_40X,
 		.cpu_user_features	= PPC_FEATURE_32 |
 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
 		.icache_bsize		= 32,
@@ -796,8 +581,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x41610000,
 		.cpu_name		= "NP405L",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_40X,
 		.cpu_user_features	= PPC_FEATURE_32 |
 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
 		.icache_bsize		= 32,
@@ -807,8 +591,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x40B10000,
 		.cpu_name		= "NP4GS3",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_40X,
 		.cpu_user_features	= PPC_FEATURE_32 |
 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
 		.icache_bsize		= 32,
@@ -818,8 +601,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x41410000,
 		.cpu_name		= "NP405H",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_40X,
 		.cpu_user_features	= PPC_FEATURE_32 |
 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
 		.icache_bsize		= 32,
@@ -829,8 +611,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x50910000,
 		.cpu_name		= "405GPr",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_40X,
 		.cpu_user_features	= PPC_FEATURE_32 |
 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
 		.icache_bsize		= 32,
@@ -840,8 +621,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x51510000,
 		.cpu_name		= "STBx25xx",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_40X,
 		.cpu_user_features	= PPC_FEATURE_32 |
 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
 		.icache_bsize		= 32,
@@ -851,8 +631,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x41F10000,
 		.cpu_name		= "405LP",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_40X,
 		.cpu_user_features	= PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -861,8 +640,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x20010000,
 		.cpu_name		= "Virtex-II Pro",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_40X,
 		.cpu_user_features	= PPC_FEATURE_32 |
 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
 		.icache_bsize		= 32,
@@ -872,8 +650,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x51210000,
 		.cpu_name		= "405EP",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_40X,
 		.cpu_user_features	= PPC_FEATURE_32 |
 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
 		.icache_bsize		= 32,
@@ -886,8 +663,7 @@
 		.pvr_mask		= 0xf0000fff,
 		.pvr_value		= 0x40000850,
 		.cpu_name		= "440EP Rev. A",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_44X,
 		.cpu_user_features	= COMMON_PPC, /* 440EP has an FPU */
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -896,8 +672,7 @@
 		.pvr_mask		= 0xf0000fff,
 		.pvr_value		= 0x400008d3,
 		.cpu_name		= "440EP Rev. B",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_44X,
 		.cpu_user_features	= COMMON_PPC, /* 440EP has an FPU */
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -906,8 +681,7 @@
 		.pvr_mask		= 0xf0000fff,
 		.pvr_value		= 0x40000440,
 		.cpu_name		= "440GP Rev. B",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_44X,
 		.cpu_user_features	= PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -916,8 +690,7 @@
 		.pvr_mask		= 0xf0000fff,
 		.pvr_value		= 0x40000481,
 		.cpu_name		= "440GP Rev. C",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_44X,
 		.cpu_user_features	= PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -926,8 +699,7 @@
 		.pvr_mask		= 0xf0000fff,
 		.pvr_value		= 0x50000850,
 		.cpu_name		= "440GX Rev. A",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_44X,
 		.cpu_user_features	= PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -936,8 +708,7 @@
 		.pvr_mask		= 0xf0000fff,
 		.pvr_value		= 0x50000851,
 		.cpu_name		= "440GX Rev. B",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_44X,
 		.cpu_user_features	= PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -946,8 +717,7 @@
 		.pvr_mask		= 0xf0000fff,
 		.pvr_value		= 0x50000892,
 		.cpu_name		= "440GX Rev. C",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_44X,
 		.cpu_user_features	= PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -956,8 +726,7 @@
 		.pvr_mask		= 0xf0000fff,
 		.pvr_value		= 0x50000894,
 		.cpu_name		= "440GX Rev. F",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_44X,
 		.cpu_user_features	= PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -966,8 +735,7 @@
 		.pvr_mask		= 0xff000fff,
 		.pvr_value		= 0x53000891,
 		.cpu_name		= "440SP Rev. A",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_44X,
 		.cpu_user_features	= PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
@@ -979,7 +747,7 @@
 		.pvr_value		= 0x81000000,
 		.cpu_name		= "e200z5",
 		/* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
-		.cpu_features		= CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_E200,
 		.cpu_user_features	= PPC_FEATURE_32 |
 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_EFP_SINGLE |
 			PPC_FEATURE_UNIFIED_CACHE,
@@ -990,7 +758,7 @@
 		.pvr_value		= 0x81100000,
 		.cpu_name		= "e200z6",
 		/* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
-		.cpu_features		= CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_E200,
 		.cpu_user_features	= PPC_FEATURE_32 |
 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
 			PPC_FEATURE_HAS_EFP_SINGLE |
@@ -1002,8 +770,7 @@
 		.pvr_value		= 0x80200000,
 		.cpu_name		= "e500",
 		/* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB,
+		.cpu_features		= CPU_FTRS_E500,
 		.cpu_user_features	= PPC_FEATURE_32 |
 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
 			PPC_FEATURE_HAS_EFP_SINGLE,
@@ -1016,8 +783,7 @@
 		.pvr_value		= 0x80210000,
 		.cpu_name		= "e500v2",
 		/* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB | CPU_FTR_BIG_PHYS,
+		.cpu_features		= CPU_FTRS_E500_2,
 		.cpu_user_features	= PPC_FEATURE_32 |
 			PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
 			PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_HAS_EFP_DOUBLE,
@@ -1031,7 +797,7 @@
 		.pvr_mask		= 0x00000000,
 		.pvr_value		= 0x00000000,
 		.cpu_name		= "(generic PPC)",
-		.cpu_features		= CPU_FTR_COMMON,
+		.cpu_features		= CPU_FTRS_GENERIC_32,
 		.cpu_user_features	= PPC_FEATURE_32,
 		.icache_bsize		= 32,
 		.dcache_bsize		= 32,
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S
index 1960fb8..8cdac73 100644
--- a/arch/ppc/kernel/head.S
+++ b/arch/ppc/kernel/head.S
@@ -804,7 +804,7 @@
 	beq	1f
 	add	r4,r4,r6
 	addi	r4,r4,THREAD	/* want THREAD of last_task_used_altivec */
-	SAVE_32VR(0,r10,r4)
+	SAVE_32VRS(0,r10,r4)
 	mfvscr	vr0
 	li	r10,THREAD_VSCR
 	stvx	vr0,r10,r4
@@ -824,7 +824,7 @@
 	stw	r4,THREAD_USED_VR(r5)
 	lvx	vr0,r10,r5
 	mtvscr	vr0
-	REST_32VR(0,r10,r5)
+	REST_32VRS(0,r10,r5)
 #ifndef CONFIG_SMP
 	subi	r4,r5,THREAD
 	sub	r4,r4,r6
@@ -870,7 +870,7 @@
 	addi	r3,r3,THREAD		/* want THREAD of task */
 	lwz	r5,PT_REGS(r3)
 	cmpwi	0,r5,0
-	SAVE_32VR(0, r4, r3)
+	SAVE_32VRS(0, r4, r3)
 	mfvscr	vr0
 	li	r4,THREAD_VSCR
 	stvx	vr0,r4,r3
@@ -1059,7 +1059,6 @@
 
 	lis	r3,-KERNELBASE@h
 	mr	r4,r24
-	bl	identify_cpu
 	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
 #ifdef CONFIG_6xx
 	lis	r3,-KERNELBASE@h
@@ -1109,11 +1108,6 @@
  * Those generic dummy functions are kept for CPUs not
  * included in CONFIG_6xx
  */
-_GLOBAL(__setup_cpu_power3)
-	blr
-_GLOBAL(__setup_cpu_generic)
-	blr
-
 #if !defined(CONFIG_6xx) && !defined(CONFIG_POWER4)
 _GLOBAL(__save_cpu_setup)
 	blr
diff --git a/arch/ppc/kernel/head_fsl_booke.S b/arch/ppc/kernel/head_fsl_booke.S
index 8e52e84..eba5a5f 100644
--- a/arch/ppc/kernel/head_fsl_booke.S
+++ b/arch/ppc/kernel/head_fsl_booke.S
@@ -853,7 +853,7 @@
 	cmpi	0,r4,0
 	beq	1f
 	addi	r4,r4,THREAD	/* want THREAD of last_task_used_spe */
-	SAVE_32EVR(0,r10,r4)
+	SAVE_32EVRS(0,r10,r4)
    	evxor	evr10, evr10, evr10	/* clear out evr10 */
 	evmwumiaa evr10, evr10, evr10	/* evr10 <- ACC = 0 * 0 + ACC */
 	li	r5,THREAD_ACC
@@ -873,7 +873,7 @@
 	stw	r4,THREAD_USED_SPE(r5)
 	evlddx	evr4,r10,r5
 	evmra	evr4,evr4
-	REST_32EVR(0,r10,r5)
+	REST_32EVRS(0,r10,r5)
 #ifndef CONFIG_SMP
 	subi	r4,r5,THREAD
 	stw	r4,last_task_used_spe@l(r3)
@@ -963,7 +963,7 @@
 	addi	r3,r3,THREAD		/* want THREAD of task */
 	lwz	r5,PT_REGS(r3)
 	cmpi	0,r5,0
-	SAVE_32EVR(0, r4, r3)
+	SAVE_32EVRS(0, r4, r3)
    	evxor	evr6, evr6, evr6	/* clear out evr6 */
 	evmwumiaa evr6, evr6, evr6	/* evr6 <- ACC = 0 * 0 + ACC */
 	li	r4,THREAD_ACC
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index 90d917d..2b9a162 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -125,9 +125,8 @@
 1:
 	addis	r6,r3,cur_cpu_spec@ha
 	addi	r6,r6,cur_cpu_spec@l
-	slwi	r4,r4,2
 	sub	r8,r8,r3
-	stwx	r8,r4,r6
+	stw	r8,0(r6)
 	blr
 
 /*
@@ -186,19 +185,18 @@
  *
  * Setup function is called with:
  *   r3 = data offset
- *   r4 = CPU number
- *   r5 = ptr to CPU spec (relocated)
+ *   r4 = ptr to CPU spec (relocated)
  */
 _GLOBAL(call_setup_cpu)
-	addis	r5,r3,cur_cpu_spec@ha
-	addi	r5,r5,cur_cpu_spec@l
-	slwi	r4,r24,2
-	lwzx	r5,r4,r5
+	addis	r4,r3,cur_cpu_spec@ha
+	addi	r4,r4,cur_cpu_spec@l
+	lwz	r4,0(r4)
+	add	r4,r4,r3
+	lwz	r5,CPU_SPEC_SETUP(r4)
+	cmpi	0,r5,0
 	add	r5,r5,r3
-	lwz	r6,CPU_SPEC_SETUP(r5)
-	add	r6,r6,r3
-	mtctr	r6
-	mr	r4,r24
+	beqlr
+	mtctr	r5
 	bctr
 
 #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
@@ -273,134 +271,6 @@
 
 #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
 
-/* void local_save_flags_ptr(unsigned long *flags) */
-_GLOBAL(local_save_flags_ptr)
-	mfmsr	r4
-	stw	r4,0(r3)
-	blr
-	/*
-	 * Need these nops here for taking over save/restore to
-	 * handle lost intrs
-	 * -- Cort
-	 */
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-_GLOBAL(local_save_flags_ptr_end)
-
-/* void local_irq_restore(unsigned long flags) */
-_GLOBAL(local_irq_restore)
-/*
- * Just set/clear the MSR_EE bit through restore/flags but do not
- * change anything else.  This is needed by the RT system and makes
- * sense anyway.
- *    -- Cort
- */
-	mfmsr 	r4
-	/* Copy all except the MSR_EE bit from r4 (current MSR value)
-	   to r3.  This is the sort of thing the rlwimi instruction is
-	   designed for.  -- paulus. */
-	rlwimi	r3,r4,0,17,15
-	 /* Check if things are setup the way we want _already_. */
-	cmpw	0,r3,r4
-	beqlr
-1:	SYNC
-	mtmsr	r3
-	SYNC
-	blr
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-_GLOBAL(local_irq_restore_end)
-
-_GLOBAL(local_irq_disable)
-	mfmsr	r0		/* Get current interrupt state */
-	rlwinm	r3,r0,16+1,32-1,31	/* Extract old value of 'EE' */
-	rlwinm	r0,r0,0,17,15	/* clear MSR_EE in r0 */
-	SYNC			/* Some chip revs have problems here... */
-	mtmsr	r0		/* Update machine state */
-	blr			/* Done */
-	/*
-	 * Need these nops here for taking over save/restore to
-	 * handle lost intrs
-	 * -- Cort
-	 */
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-_GLOBAL(local_irq_disable_end)
-
-_GLOBAL(local_irq_enable)
-	mfmsr	r3		/* Get current state */
-	ori	r3,r3,MSR_EE	/* Turn on 'EE' bit */
-	SYNC			/* Some chip revs have problems here... */
-	mtmsr	r3		/* Update machine state */
-	blr
-	/*
-	 * Need these nops here for taking over save/restore to
-	 * handle lost intrs
-	 * -- Cort
-	 */
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-_GLOBAL(local_irq_enable_end)
-
 /*
  * complement mask on the msr then "or" some values on.
  *     _nmask_and_or_msr(nmask, value_to_or)
diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c
index 854e45b..2d3c557 100644
--- a/arch/ppc/kernel/pci.c
+++ b/arch/ppc/kernel/pci.c
@@ -644,7 +644,7 @@
 /*
  * Functions below are used on OpenFirmware machines.
  */
-static void __openfirmware
+static void
 make_one_node_map(struct device_node* node, u8 pci_bus)
 {
 	int *bus_range;
@@ -678,7 +678,7 @@
 	}
 }
 	
-void __openfirmware
+void
 pcibios_make_OF_bus_map(void)
 {
 	int i;
@@ -720,7 +720,7 @@
 
 typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
 
-static struct device_node* __openfirmware
+static struct device_node*
 scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data)
 {
 	struct device_node* sub_node;
@@ -761,7 +761,7 @@
 	return 0;
 }
 
-static struct device_node* __openfirmware
+static struct device_node*
 scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
 {
 	u8 filter_data[2] = {bus, dev_fn};
@@ -842,7 +842,7 @@
 	return NULL;
 }
 
-static int __openfirmware
+static int
 find_OF_pci_device_filter(struct device_node* node, void* data)
 {
 	return ((void *)node == data);
diff --git a/arch/ppc/kernel/perfmon.c b/arch/ppc/kernel/perfmon.c
index 22df9a5..c9a38dd 100644
--- a/arch/ppc/kernel/perfmon.c
+++ b/arch/ppc/kernel/perfmon.c
@@ -64,7 +64,7 @@
 
 /* Grab the interrupt, if it's free.
  * Returns 0 on success, -1 if the interrupt is taken already */
-int request_perfmon_irq(void (*handler)(struct pt_regs *))
+int reserve_pmc_hardware(void (*handler)(struct pt_regs *))
 {
 	int err = 0;
 
@@ -74,7 +74,7 @@
 		perf_irq = handler;
 	else {
 		pr_info("perfmon irq already handled by %p\n", perf_irq);
-		err = -1;
+		err = -EBUSY;
 	}
 
 	spin_unlock(&perfmon_lock);
@@ -82,7 +82,7 @@
 	return err;
 }
 
-void free_perfmon_irq(void)
+void release_pmc_hardware(void)
 {
 	spin_lock(&perfmon_lock);
 
@@ -92,5 +92,5 @@
 }
 
 EXPORT_SYMBOL(perf_irq);
-EXPORT_SYMBOL(request_perfmon_irq);
-EXPORT_SYMBOL(free_perfmon_irq);
+EXPORT_SYMBOL(reserve_pmc_hardware);
+EXPORT_SYMBOL(release_pmc_hardware);
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index 88f6bb7..1545621 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -272,16 +272,6 @@
 #endif
 
 EXPORT_SYMBOL(__delay);
-#ifndef INLINE_IRQS
-EXPORT_SYMBOL(local_irq_enable);
-EXPORT_SYMBOL(local_irq_enable_end);
-EXPORT_SYMBOL(local_irq_disable);
-EXPORT_SYMBOL(local_irq_disable_end);
-EXPORT_SYMBOL(local_save_flags_ptr);
-EXPORT_SYMBOL(local_save_flags_ptr_end);
-EXPORT_SYMBOL(local_irq_restore);
-EXPORT_SYMBOL(local_irq_restore_end);
-#endif
 EXPORT_SYMBOL(timer_interrupt);
 EXPORT_SYMBOL(irq_desc);
 EXPORT_SYMBOL(tb_ticks_per_jiffy);
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 545cfd0..62022ea 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -71,7 +71,8 @@
 unsigned long boot_mem_size;
 
 unsigned long ISA_DMA_THRESHOLD;
-unsigned long DMA_MODE_READ, DMA_MODE_WRITE;
+unsigned int DMA_MODE_READ;
+unsigned int DMA_MODE_WRITE;
 
 #ifdef CONFIG_PPC_MULTIPLATFORM
 int _machine = 0;
@@ -82,6 +83,8 @@
 		unsigned long r5, unsigned long r6, unsigned long r7);
 extern void chrp_init(unsigned long r3, unsigned long r4,
 		unsigned long r5, unsigned long r6, unsigned long r7);
+
+dev_t boot_dev;
 #endif /* CONFIG_PPC_MULTIPLATFORM */
 
 #ifdef CONFIG_MAGIC_SYSRQ
@@ -185,18 +188,18 @@
 	seq_printf(m, "processor\t: %d\n", i);
 	seq_printf(m, "cpu\t\t: ");
 
-	if (cur_cpu_spec[i]->pvr_mask)
-		seq_printf(m, "%s", cur_cpu_spec[i]->cpu_name);
+	if (cur_cpu_spec->pvr_mask)
+		seq_printf(m, "%s", cur_cpu_spec->cpu_name);
 	else
 		seq_printf(m, "unknown (%08x)", pvr);
 #ifdef CONFIG_ALTIVEC
-	if (cur_cpu_spec[i]->cpu_features & CPU_FTR_ALTIVEC)
+	if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
 		seq_printf(m, ", altivec supported");
 #endif
 	seq_printf(m, "\n");
 
 #ifdef CONFIG_TAU
-	if (cur_cpu_spec[i]->cpu_features & CPU_FTR_TAU) {
+	if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
 #ifdef CONFIG_TAU_AVERAGE
 		/* more straightforward, but potentially misleading */
 		seq_printf(m,  "temperature \t: %u C (uncalibrated)\n",
@@ -339,7 +342,7 @@
  * Assume here that all clock rates are the same in a
  * smp system.  -- Cort
  */
-int __openfirmware
+int
 of_show_percpuinfo(struct seq_file *m, int i)
 {
 	struct device_node *cpu_node;
@@ -404,11 +407,13 @@
 			_machine = _MACH_prep;
 	}
 
+#ifdef CONFIG_PPC_PREP
 	/* not much more to do here, if prep */
 	if (_machine == _MACH_prep) {
 		prep_init(r3, r4, r5, r6, r7);
 		return;
 	}
+#endif
 
 	/* prom_init has already been called from __start */
 	if (boot_infos)
@@ -479,12 +484,16 @@
 #endif /* CONFIG_ADB */
 
 	switch (_machine) {
+#ifdef CONFIG_PPC_PMAC
 	case _MACH_Pmac:
 		pmac_init(r3, r4, r5, r6, r7);
 		break;
+#endif
+#ifdef CONFIG_PPC_CHRP
 	case _MACH_chrp:
 		chrp_init(r3, r4, r5, r6, r7);
 		break;
+#endif
 	}
 }
 
@@ -745,12 +754,12 @@
 	 * for a possibly more accurate value.
 	 */
 	if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) {
-		dcache_bsize = cur_cpu_spec[0]->dcache_bsize;
-		icache_bsize = cur_cpu_spec[0]->icache_bsize;
+		dcache_bsize = cur_cpu_spec->dcache_bsize;
+		icache_bsize = cur_cpu_spec->icache_bsize;
 		ucache_bsize = 0;
 	} else
 		ucache_bsize = dcache_bsize = icache_bsize
-			= cur_cpu_spec[0]->dcache_bsize;
+			= cur_cpu_spec->dcache_bsize;
 
 	/* reboot on panic */
 	panic_timeout = 180;
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
index 961ede8..82e4d70 100644
--- a/arch/ppc/kernel/traps.c
+++ b/arch/ppc/kernel/traps.c
@@ -575,7 +575,7 @@
 #define module_find_bug(x)	NULL
 #endif
 
-static struct bug_entry *find_bug(unsigned long bugaddr)
+struct bug_entry *find_bug(unsigned long bugaddr)
 {
 	struct bug_entry *bug;
 
diff --git a/arch/ppc/kernel/vmlinux.lds.S b/arch/ppc/kernel/vmlinux.lds.S
index 17d2db7..09c6525 100644
--- a/arch/ppc/kernel/vmlinux.lds.S
+++ b/arch/ppc/kernel/vmlinux.lds.S
@@ -149,32 +149,6 @@
 
   . = ALIGN(4096);
   _sextratext = .;
-  __pmac_begin = .;
-  .pmac.text : { *(.pmac.text) }
-  .pmac.data : { *(.pmac.data) }
-  . = ALIGN(4096);
-  __pmac_end = .;
-
-  . = ALIGN(4096);
-  __prep_begin = .;
-  .prep.text : { *(.prep.text) }
-  .prep.data : { *(.prep.data) }
-  . = ALIGN(4096);
-  __prep_end = .;
-
-  . = ALIGN(4096);
-  __chrp_begin = .;
-  .chrp.text : { *(.chrp.text) }
-  .chrp.data : { *(.chrp.data) }
-  . = ALIGN(4096);
-  __chrp_end = .;
-
-  . = ALIGN(4096);
-  __openfirmware_begin = .;
-  .openfirmware.text : { *(.openfirmware.text) }
-  .openfirmware.data : { *(.openfirmware.data) }
-  . = ALIGN(4096);
-  __openfirmware_end = .;
   _eextratext = .;
 
   __bss_start = .;
diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c
index f421a4b..5e9ef23 100644
--- a/arch/ppc/mm/init.c
+++ b/arch/ppc/mm/init.c
@@ -74,10 +74,6 @@
 extern char _end[];
 extern char etext[], _stext[];
 extern char __init_begin, __init_end;
-extern char __prep_begin, __prep_end;
-extern char __chrp_begin, __chrp_end;
-extern char __pmac_begin, __pmac_end;
-extern char __openfirmware_begin, __openfirmware_end;
 
 #ifdef CONFIG_HIGHMEM
 pte_t *kmap_pte;
@@ -167,14 +163,6 @@
 
 	printk ("Freeing unused kernel memory:");
 	FREESEC(init);
-	if (_machine != _MACH_Pmac)
-		FREESEC(pmac);
-	if (_machine != _MACH_chrp)
-		FREESEC(chrp);
-	if (_machine != _MACH_prep)
-		FREESEC(prep);
-	if (!have_of)
-		FREESEC(openfirmware);
  	printk("\n");
 	ppc_md.progress = NULL;
 #undef FREESEC
diff --git a/arch/ppc/oprofile/common.c b/arch/ppc/oprofile/common.c
deleted file mode 100644
index 3169c67..0000000
--- a/arch/ppc/oprofile/common.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * PPC 32 oprofile support
- * Based on PPC64 oprofile support
- * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * Copyright (C) Freescale Semiconductor, Inc 2004
- *
- * Author: Andy Fleming
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/oprofile.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <asm/ptrace.h>
-#include <asm/system.h>
-#include <asm/perfmon.h>
-#include <asm/cputable.h>
-
-#include "op_impl.h"
-
-static struct op_ppc32_model *model;
-
-static struct op_counter_config ctr[OP_MAX_COUNTER];
-static struct op_system_config sys;
-
-static void op_handle_interrupt(struct pt_regs *regs)
-{
-	model->handle_interrupt(regs, ctr);
-}
-
-static int op_ppc32_setup(void)
-{
-	/* Install our interrupt handler into the existing hook.  */
-	if(request_perfmon_irq(&op_handle_interrupt))
-		return -EBUSY;
-
-	mb();
-
-	/* Pre-compute the values to stuff in the hardware registers.  */
-	model->reg_setup(ctr, &sys, model->num_counters);
-
-#if 0
-	/* FIXME: Make multi-cpu work */
-	/* Configure the registers on all cpus.  */
-	on_each_cpu(model->reg_setup, NULL, 0, 1);
-#endif
-
-	return 0;
-}
-
-static void op_ppc32_shutdown(void)
-{
-	mb();
-
-	/* Remove our interrupt handler. We may be removing this module. */
-	free_perfmon_irq();
-}
-
-static void op_ppc32_cpu_start(void *dummy)
-{
-	model->start(ctr);
-}
-
-static int op_ppc32_start(void)
-{
-	on_each_cpu(op_ppc32_cpu_start, NULL, 0, 1);
-	return 0;
-}
-
-static inline void op_ppc32_cpu_stop(void *dummy)
-{
-	model->stop();
-}
-
-static void op_ppc32_stop(void)
-{
-	on_each_cpu(op_ppc32_cpu_stop, NULL, 0, 1);
-}
-
-static int op_ppc32_create_files(struct super_block *sb, struct dentry *root)
-{
-	int i;
-
-	for (i = 0; i < model->num_counters; ++i) {
-		struct dentry *dir;
-		char buf[3];
-
-		snprintf(buf, sizeof buf, "%d", i);
-		dir = oprofilefs_mkdir(sb, root, buf);
-
-		oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
-		oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
-		oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
-		oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
-		oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
-
-		/* FIXME: Not sure if this is used */
-		oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
-	}
-
-	oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
-	oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
-
-	/* Default to tracing both kernel and user */
-	sys.enable_kernel = 1;
-	sys.enable_user = 1;
-
-	return 0;
-}
-
-static struct oprofile_operations oprof_ppc32_ops = {
-	.create_files	= op_ppc32_create_files,
-	.setup		= op_ppc32_setup,
-	.shutdown	= op_ppc32_shutdown,
-	.start		= op_ppc32_start,
-	.stop		= op_ppc32_stop,
-	.cpu_type	= NULL		/* To be filled in below. */
-};
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
-	char *name;
-	int cpu_id = smp_processor_id();
-
-#ifdef CONFIG_FSL_BOOKE
-	model = &op_model_fsl_booke;
-#else
-	return -ENODEV;
-#endif
-
-	name = kmalloc(32, GFP_KERNEL);
-
-	if (NULL == name)
-		return -ENOMEM;
-
-	sprintf(name, "ppc/%s", cur_cpu_spec[cpu_id]->cpu_name);
-
-	oprof_ppc32_ops.cpu_type = name;
-
-	model->num_counters = cur_cpu_spec[cpu_id]->num_pmcs;
-
-	*ops = oprof_ppc32_ops;
-
-	printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
-	       oprof_ppc32_ops.cpu_type);
-
-	return 0;
-}
-
-void oprofile_arch_exit(void)
-{
-	kfree(oprof_ppc32_ops.cpu_type);
-	oprof_ppc32_ops.cpu_type = NULL;
-}
diff --git a/arch/ppc/oprofile/op_impl.h b/arch/ppc/oprofile/op_impl.h
deleted file mode 100644
index bc336dc..0000000
--- a/arch/ppc/oprofile/op_impl.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * Based on alpha version.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef OP_IMPL_H
-#define OP_IMPL_H 1
-
-#define OP_MAX_COUNTER 8
-
-/* Per-counter configuration as set via oprofilefs.  */
-struct op_counter_config {
-	unsigned long enabled;
-	unsigned long event;
-	unsigned long count;
-	unsigned long kernel;
-	unsigned long user;
-	unsigned long unit_mask;
-};
-
-/* System-wide configuration as set via oprofilefs.  */
-struct op_system_config {
-	unsigned long enable_kernel;
-	unsigned long enable_user;
-};
-
-/* Per-arch configuration */
-struct op_ppc32_model {
-	void (*reg_setup) (struct op_counter_config *,
-			   struct op_system_config *,
-			   int num_counters);
-	void (*start) (struct op_counter_config *);
-	void (*stop) (void);
-	void (*handle_interrupt) (struct pt_regs *,
-				  struct op_counter_config *);
-	int num_counters;
-};
-
-#endif /* OP_IMPL_H */
diff --git a/arch/ppc/platforms/4xx/ebony.c b/arch/ppc/platforms/4xx/ebony.c
index d6b2b19..9decb72 100644
--- a/arch/ppc/platforms/4xx/ebony.c
+++ b/arch/ppc/platforms/4xx/ebony.c
@@ -91,7 +91,7 @@
 	 * on Rev. C silicon then errata forces us to
 	 * use the internal clock.
 	 */
-	if (strcmp(cur_cpu_spec[0]->cpu_name, "440GP Rev. B") == 0)
+	if (strcmp(cur_cpu_spec->cpu_name, "440GP Rev. B") == 0)
 		freq = EBONY_440GP_RB_SYSCLK;
 	else
 		freq = EBONY_440GP_RC_SYSCLK;
diff --git a/arch/ppc/platforms/83xx/mpc834x_sys.h b/arch/ppc/platforms/83xx/mpc834x_sys.h
index 1584cd7..58e44c0 100644
--- a/arch/ppc/platforms/83xx/mpc834x_sys.h
+++ b/arch/ppc/platforms/83xx/mpc834x_sys.h
@@ -19,7 +19,6 @@
 
 #include <linux/config.h>
 #include <linux/init.h>
-#include <linux/seq_file.h>
 #include <syslib/ppc83xx_setup.h>
 #include <asm/ppcboot.h>
 
diff --git a/arch/ppc/platforms/85xx/mpc85xx_ads_common.h b/arch/ppc/platforms/85xx/mpc85xx_ads_common.h
index 3875e83..84acf6e 100644
--- a/arch/ppc/platforms/85xx/mpc85xx_ads_common.h
+++ b/arch/ppc/platforms/85xx/mpc85xx_ads_common.h
@@ -19,7 +19,6 @@
 
 #include <linux/config.h>
 #include <linux/init.h>
-#include <linux/seq_file.h>
 #include <asm/ppcboot.h>
 
 #define BOARD_CCSRBAR		((uint)0xe0000000)
diff --git a/arch/ppc/platforms/85xx/stx_gp3.h b/arch/ppc/platforms/85xx/stx_gp3.h
index 7bcc6c3..95fdf4b 100644
--- a/arch/ppc/platforms/85xx/stx_gp3.h
+++ b/arch/ppc/platforms/85xx/stx_gp3.h
@@ -21,7 +21,6 @@
 
 #include <linux/config.h>
 #include <linux/init.h>
-#include <linux/seq_file.h>
 #include <asm/ppcboot.h>
 
 #define BOARD_CCSRBAR		((uint)0xe0000000)
diff --git a/arch/ppc/platforms/chrp_pci.c b/arch/ppc/platforms/chrp_pci.c
index 7d3fbb5..f12192c 100644
--- a/arch/ppc/platforms/chrp_pci.c
+++ b/arch/ppc/platforms/chrp_pci.c
@@ -29,7 +29,7 @@
  * limit the bus number to 3 bits
  */
 
-int __chrp gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off,
+int gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off,
 			   int len, u32 *val)
 {
 	volatile void __iomem *cfg_data;
@@ -56,7 +56,7 @@
 	return PCIBIOS_SUCCESSFUL;
 }
 
-int __chrp gg2_write_config(struct pci_bus *bus, unsigned int devfn, int off,
+int gg2_write_config(struct pci_bus *bus, unsigned int devfn, int off,
 			    int len, u32 val)
 {
 	volatile void __iomem *cfg_data;
@@ -92,7 +92,7 @@
 /*
  * Access functions for PCI config space using RTAS calls.
  */
-int __chrp
+int
 rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
 		 int len, u32 *val)
 {
@@ -108,7 +108,7 @@
 	return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL;
 }
 
-int __chrp
+int
 rtas_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
 		  int len, u32 val)
 {
diff --git a/arch/ppc/platforms/chrp_setup.c b/arch/ppc/platforms/chrp_setup.c
index 57f29ab..47b154c 100644
--- a/arch/ppc/platforms/chrp_setup.c
+++ b/arch/ppc/platforms/chrp_setup.c
@@ -105,7 +105,7 @@
 	"Disabled", "Write-Through", "Copy-Back", "Transparent Mode"
 };
 
-int __chrp
+int
 chrp_show_cpuinfo(struct seq_file *m)
 {
 	int i, sdramen;
@@ -303,7 +303,7 @@
 	pci_create_OF_bus_map();
 }
 
-void __chrp
+void
 chrp_event_scan(void)
 {
 	unsigned char log[1024];
@@ -314,7 +314,7 @@
 	ppc_md.heartbeat_count = ppc_md.heartbeat_reset;
 }
 
-void __chrp
+void
 chrp_restart(char *cmd)
 {
 	printk("RTAS system-reboot returned %d\n",
@@ -322,7 +322,7 @@
 	for (;;);
 }
 
-void __chrp
+void
 chrp_power_off(void)
 {
 	/* allow power on only with power button press */
@@ -331,13 +331,13 @@
 	for (;;);
 }
 
-void __chrp
+void
 chrp_halt(void)
 {
 	chrp_power_off();
 }
 
-u_int __chrp
+u_int
 chrp_irq_canonicalize(u_int irq)
 {
 	if (irq == 2)
@@ -572,7 +572,7 @@
 	if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0);
 }
 
-void __chrp
+void
 rtas_display_progress(char *s, unsigned short hex)
 {
 	int width;
@@ -599,7 +599,7 @@
 	call_rtas( "display-character", 1, 1, NULL, ' ' );
 }
 
-void __chrp
+void
 rtas_indicator_progress(char *s, unsigned short hex)
 {
 	call_rtas("set-indicator", 3, 1, NULL, 6, 0, hex);
diff --git a/arch/ppc/platforms/chrp_smp.c b/arch/ppc/platforms/chrp_smp.c
index 0ea1f7d..dc62e32 100644
--- a/arch/ppc/platforms/chrp_smp.c
+++ b/arch/ppc/platforms/chrp_smp.c
@@ -88,7 +88,7 @@
 }
 
 /* CHRP with openpic */
-struct smp_ops_t chrp_smp_ops __chrpdata = {
+struct smp_ops_t chrp_smp_ops = {
 	.message_pass = smp_openpic_message_pass,
 	.probe = smp_chrp_probe,
 	.kick_cpu = smp_chrp_kick_cpu,
diff --git a/arch/ppc/platforms/chrp_time.c b/arch/ppc/platforms/chrp_time.c
index 6037ce7..29d074c 100644
--- a/arch/ppc/platforms/chrp_time.c
+++ b/arch/ppc/platforms/chrp_time.c
@@ -52,7 +52,7 @@
 	return 0;
 }
 
-int __chrp chrp_cmos_clock_read(int addr)
+int chrp_cmos_clock_read(int addr)
 {
 	if (nvram_as1 != 0)
 		outb(addr>>8, nvram_as1);
@@ -60,7 +60,7 @@
 	return (inb(nvram_data));
 }
 
-void __chrp chrp_cmos_clock_write(unsigned long val, int addr)
+void chrp_cmos_clock_write(unsigned long val, int addr)
 {
 	if (nvram_as1 != 0)
 		outb(addr>>8, nvram_as1);
@@ -72,7 +72,7 @@
 /*
  * Set the hardware clock. -- Cort
  */
-int __chrp chrp_set_rtc_time(unsigned long nowtime)
+int chrp_set_rtc_time(unsigned long nowtime)
 {
 	unsigned char save_control, save_freq_select;
 	struct rtc_time tm;
@@ -118,7 +118,7 @@
 	return 0;
 }
 
-unsigned long __chrp chrp_get_rtc_time(void)
+unsigned long chrp_get_rtc_time(void)
 {
 	unsigned int year, mon, day, hour, min, sec;
 	int uip, i;
diff --git a/arch/ppc/platforms/pmac_backlight.c b/arch/ppc/platforms/pmac_backlight.c
index ed2b1ce..8be2f7d 100644
--- a/arch/ppc/platforms/pmac_backlight.c
+++ b/arch/ppc/platforms/pmac_backlight.c
@@ -37,7 +37,7 @@
 static void backlight_callback(void *);
 static DECLARE_WORK(backlight_work, backlight_callback, NULL);
 
-void __pmac register_backlight_controller(struct backlight_controller *ctrler,
+void register_backlight_controller(struct backlight_controller *ctrler,
 					  void *data, char *type)
 {
 	struct device_node* bk_node;
@@ -99,7 +99,7 @@
 }
 EXPORT_SYMBOL(register_backlight_controller);
 
-void __pmac unregister_backlight_controller(struct backlight_controller
+void unregister_backlight_controller(struct backlight_controller
 					    *ctrler, void *data)
 {
 	/* We keep the current backlight level (for now) */
@@ -108,7 +108,7 @@
 }
 EXPORT_SYMBOL(unregister_backlight_controller);
 
-static int __pmac __set_backlight_enable(int enable)
+static int __set_backlight_enable(int enable)
 {
 	int rc;
 
@@ -122,7 +122,7 @@
 	release_console_sem();
 	return rc;
 }
-int __pmac set_backlight_enable(int enable)
+int set_backlight_enable(int enable)
 {
 	if (!backlighter)
 		return -ENODEV;
@@ -133,7 +133,7 @@
 
 EXPORT_SYMBOL(set_backlight_enable);
 
-int __pmac get_backlight_enable(void)
+int get_backlight_enable(void)
 {
 	if (!backlighter)
 		return -ENODEV;
@@ -141,7 +141,7 @@
 }
 EXPORT_SYMBOL(get_backlight_enable);
 
-static int __pmac __set_backlight_level(int level)
+static int __set_backlight_level(int level)
 {
 	int rc = 0;
 
@@ -165,7 +165,7 @@
 	}
 	return rc;
 }
-int __pmac set_backlight_level(int level)
+int set_backlight_level(int level)
 {
 	if (!backlighter)
 		return -ENODEV;
@@ -176,7 +176,7 @@
 
 EXPORT_SYMBOL(set_backlight_level);
 
-int __pmac get_backlight_level(void)
+int get_backlight_level(void)
 {
 	if (!backlighter)
 		return -ENODEV;
diff --git a/arch/ppc/platforms/pmac_cpufreq.c b/arch/ppc/platforms/pmac_cpufreq.c
index c060524..ebb8be9 100644
--- a/arch/ppc/platforms/pmac_cpufreq.c
+++ b/arch/ppc/platforms/pmac_cpufreq.c
@@ -136,7 +136,7 @@
 
 /* Switch CPU speed under 750FX CPU control
  */
-static int __pmac cpu_750fx_cpu_speed(int low_speed)
+static int cpu_750fx_cpu_speed(int low_speed)
 {
 	u32 hid2;
 
@@ -172,7 +172,7 @@
 	return 0;
 }
 
-static unsigned int __pmac cpu_750fx_get_cpu_speed(void)
+static unsigned int cpu_750fx_get_cpu_speed(void)
 {
 	if (mfspr(SPRN_HID1) & HID1_PS)
 		return low_freq;
@@ -181,7 +181,7 @@
 }
 
 /* Switch CPU speed using DFS */
-static int __pmac dfs_set_cpu_speed(int low_speed)
+static int dfs_set_cpu_speed(int low_speed)
 {
 	if (low_speed == 0) {
 		/* ramping up, set voltage first */
@@ -205,7 +205,7 @@
 	return 0;
 }
 
-static unsigned int __pmac dfs_get_cpu_speed(void)
+static unsigned int dfs_get_cpu_speed(void)
 {
 	if (mfspr(SPRN_HID1) & HID1_DFS)
 		return low_freq;
@@ -216,7 +216,7 @@
 
 /* Switch CPU speed using slewing GPIOs
  */
-static int __pmac gpios_set_cpu_speed(int low_speed)
+static int gpios_set_cpu_speed(int low_speed)
 {
 	int gpio, timeout = 0;
 
@@ -258,7 +258,7 @@
 
 /* Switch CPU speed under PMU control
  */
-static int __pmac pmu_set_cpu_speed(int low_speed)
+static int pmu_set_cpu_speed(int low_speed)
 {
 	struct adb_request req;
 	unsigned long save_l2cr;
@@ -354,7 +354,7 @@
 	return 0;
 }
 
-static int __pmac do_set_cpu_speed(int speed_mode, int notify)
+static int do_set_cpu_speed(int speed_mode, int notify)
 {
 	struct cpufreq_freqs freqs;
 	unsigned long l3cr;
@@ -391,17 +391,17 @@
 	return 0;
 }
 
-static unsigned int __pmac pmac_cpufreq_get_speed(unsigned int cpu)
+static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
 {
 	return cur_freq;
 }
 
-static int __pmac pmac_cpufreq_verify(struct cpufreq_policy *policy)
+static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
 {
 	return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
 }
 
-static int __pmac pmac_cpufreq_target(	struct cpufreq_policy *policy,
+static int pmac_cpufreq_target(	struct cpufreq_policy *policy,
 					unsigned int target_freq,
 					unsigned int relation)
 {
@@ -414,13 +414,13 @@
 	return do_set_cpu_speed(newstate, 1);
 }
 
-unsigned int __pmac pmac_get_one_cpufreq(int i)
+unsigned int pmac_get_one_cpufreq(int i)
 {
 	/* Supports only one CPU for now */
 	return (i == 0) ? cur_freq : 0;
 }
 
-static int __pmac pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
+static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
 	if (policy->cpu != 0)
 		return -ENODEV;
@@ -433,7 +433,7 @@
 	return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
 }
 
-static u32 __pmac read_gpio(struct device_node *np)
+static u32 read_gpio(struct device_node *np)
 {
 	u32 *reg = (u32 *)get_property(np, "reg", NULL);
 	u32 offset;
@@ -452,7 +452,7 @@
 	return offset;
 }
 
-static int __pmac pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg)
+static int pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg)
 {
 	/* Ok, this could be made a bit smarter, but let's be robust for now. We
 	 * always force a speed change to high speed before sleep, to make sure
@@ -468,7 +468,7 @@
 	return 0;
 }
 
-static int __pmac pmac_cpufreq_resume(struct cpufreq_policy *policy)
+static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
 {
 	/* If we resume, first check if we have a get() function */
 	if (get_speed_proc)
@@ -501,7 +501,7 @@
 };
 
 
-static int __pmac pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
+static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
 {
 	struct device_node *volt_gpio_np = of_find_node_by_name(NULL,
 								"voltage-gpio");
@@ -593,7 +593,7 @@
 	return 0;
 }
 
-static int __pmac pmac_cpufreq_init_7447A(struct device_node *cpunode)
+static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
 {
 	struct device_node *volt_gpio_np;
 
@@ -620,7 +620,7 @@
 	return 0;
 }
 
-static int __pmac pmac_cpufreq_init_750FX(struct device_node *cpunode)
+static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
 {
 	struct device_node *volt_gpio_np;
 	u32 pvr, *value;
diff --git a/arch/ppc/platforms/pmac_feature.c b/arch/ppc/platforms/pmac_feature.c
index 867336a..e8cd0a8 100644
--- a/arch/ppc/platforms/pmac_feature.c
+++ b/arch/ppc/platforms/pmac_feature.c
@@ -63,7 +63,7 @@
  * We use a single global lock to protect accesses. Each driver has
  * to take care of its own locking
  */
-static DEFINE_SPINLOCK(feature_lock  __pmacdata);
+static DEFINE_SPINLOCK(feature_lock);
 
 #define LOCK(flags)	spin_lock_irqsave(&feature_lock, flags);
 #define UNLOCK(flags)	spin_unlock_irqrestore(&feature_lock, flags);
@@ -72,9 +72,9 @@
 /*
  * Instance of some macio stuffs
  */
-struct macio_chip macio_chips[MAX_MACIO_CHIPS]  __pmacdata;
+struct macio_chip macio_chips[MAX_MACIO_CHIPS];
 
-struct macio_chip* __pmac macio_find(struct device_node* child, int type)
+struct macio_chip* macio_find(struct device_node* child, int type)
 {
 	while(child) {
 		int	i;
@@ -89,7 +89,7 @@
 }
 EXPORT_SYMBOL_GPL(macio_find);
 
-static const char* macio_names[] __pmacdata =
+static const char* macio_names[] =
 {
 	"Unknown",
 	"Grand Central",
@@ -116,10 +116,10 @@
 #define UN_BIS(r,v)	(UN_OUT((r), UN_IN(r) | (v)))
 #define UN_BIC(r,v)	(UN_OUT((r), UN_IN(r) & ~(v)))
 
-static struct device_node* uninorth_node __pmacdata;
-static u32 __iomem * uninorth_base __pmacdata;
-static u32 uninorth_rev __pmacdata;
-static int uninorth_u3 __pmacdata;
+static struct device_node* uninorth_node;
+static u32 __iomem * uninorth_base;
+static u32 uninorth_rev;
+static int uninorth_u3;
 static void __iomem *u3_ht;
 
 /*
@@ -142,13 +142,13 @@
 	struct feature_table_entry* 	features;
 	unsigned long			board_flags;
 };
-static struct pmac_mb_def pmac_mb __pmacdata;
+static struct pmac_mb_def pmac_mb;
 
 /*
  * Here are the chip specific feature functions
  */
 
-static inline int __pmac
+static inline int
 simple_feature_tweak(struct device_node* node, int type, int reg, u32 mask, int value)
 {
 	struct macio_chip*	macio;
@@ -170,7 +170,7 @@
 
 #ifndef CONFIG_POWER4
 
-static long __pmac
+static long
 ohare_htw_scc_enable(struct device_node* node, long param, long value)
 {
 	struct macio_chip*	macio;
@@ -263,21 +263,21 @@
 	return 0;
 }
 
-static long __pmac
+static long
 ohare_floppy_enable(struct device_node* node, long param, long value)
 {
 	return simple_feature_tweak(node, macio_ohare,
 		OHARE_FCR, OH_FLOPPY_ENABLE, value);
 }
 
-static long __pmac
+static long
 ohare_mesh_enable(struct device_node* node, long param, long value)
 {
 	return simple_feature_tweak(node, macio_ohare,
 		OHARE_FCR, OH_MESH_ENABLE, value);
 }
 
-static long __pmac
+static long
 ohare_ide_enable(struct device_node* node, long param, long value)
 {
 	switch(param) {
@@ -298,7 +298,7 @@
 	}
 }
 
-static long __pmac
+static long
 ohare_ide_reset(struct device_node* node, long param, long value)
 {
 	switch(param) {
@@ -313,7 +313,7 @@
 	}
 }
 
-static long __pmac
+static long
 ohare_sleep_state(struct device_node* node, long param, long value)
 {
 	struct macio_chip*	macio = &macio_chips[0];
@@ -329,7 +329,7 @@
 	return 0;
 }
 
-static long __pmac
+static long
 heathrow_modem_enable(struct device_node* node, long param, long value)
 {
 	struct macio_chip*	macio;
@@ -373,7 +373,7 @@
 	return 0;
 }
 
-static long __pmac
+static long
 heathrow_floppy_enable(struct device_node* node, long param, long value)
 {
 	return simple_feature_tweak(node, macio_unknown,
@@ -382,7 +382,7 @@
 		value);
 }
 
-static long __pmac
+static long
 heathrow_mesh_enable(struct device_node* node, long param, long value)
 {
 	struct macio_chip*	macio;
@@ -411,7 +411,7 @@
 	return 0;
 }
 
-static long __pmac
+static long
 heathrow_ide_enable(struct device_node* node, long param, long value)
 {
 	switch(param) {
@@ -426,7 +426,7 @@
 	}
 }
 
-static long __pmac
+static long
 heathrow_ide_reset(struct device_node* node, long param, long value)
 {
 	switch(param) {
@@ -441,7 +441,7 @@
 	}
 }
 
-static long __pmac
+static long
 heathrow_bmac_enable(struct device_node* node, long param, long value)
 {
 	struct macio_chip*	macio;
@@ -470,7 +470,7 @@
 	return 0;
 }
 
-static long __pmac
+static long
 heathrow_sound_enable(struct device_node* node, long param, long value)
 {
 	struct macio_chip*	macio;
@@ -501,16 +501,16 @@
 	return 0;
 }
 
-static u32 save_fcr[6] __pmacdata;
-static u32 save_mbcr __pmacdata;
-static u32 save_gpio_levels[2] __pmacdata;
-static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT] __pmacdata;
-static u8 save_gpio_normal[KEYLARGO_GPIO_CNT] __pmacdata;
-static u32 save_unin_clock_ctl __pmacdata;
-static struct dbdma_regs save_dbdma[13] __pmacdata;
-static struct dbdma_regs save_alt_dbdma[13] __pmacdata;
+static u32 save_fcr[6];
+static u32 save_mbcr;
+static u32 save_gpio_levels[2];
+static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT];
+static u8 save_gpio_normal[KEYLARGO_GPIO_CNT];
+static u32 save_unin_clock_ctl;
+static struct dbdma_regs save_dbdma[13];
+static struct dbdma_regs save_alt_dbdma[13];
 
-static void __pmac
+static void
 dbdma_save(struct macio_chip* macio, struct dbdma_regs* save)
 {
 	int i;
@@ -527,7 +527,7 @@
 	}
 }
 
-static void __pmac
+static void
 dbdma_restore(struct macio_chip* macio, struct dbdma_regs* save)
 {
 	int i;
@@ -547,7 +547,7 @@
 	}
 }
 
-static void __pmac
+static void
 heathrow_sleep(struct macio_chip* macio, int secondary)
 {
 	if (secondary) {
@@ -580,7 +580,7 @@
 	(void)MACIO_IN32(HEATHROW_FCR);
 }
 
-static void __pmac
+static void
 heathrow_wakeup(struct macio_chip* macio, int secondary)
 {
 	if (secondary) {
@@ -605,7 +605,7 @@
 	}
 }
 
-static long __pmac
+static long
 heathrow_sleep_state(struct device_node* node, long param, long value)
 {
 	if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
@@ -622,7 +622,7 @@
 	return 0;
 }
 
-static long __pmac
+static long
 core99_scc_enable(struct device_node* node, long param, long value)
 {
 	struct macio_chip*	macio;
@@ -723,7 +723,7 @@
 	return 0;
 }
 
-static long __pmac
+static long
 core99_modem_enable(struct device_node* node, long param, long value)
 {
 	struct macio_chip*	macio;
@@ -775,7 +775,7 @@
 	return 0;
 }
 
-static long __pmac
+static long
 pangea_modem_enable(struct device_node* node, long param, long value)
 {
 	struct macio_chip*	macio;
@@ -830,7 +830,7 @@
 	return 0;
 }
 
-static long __pmac
+static long
 core99_ata100_enable(struct device_node* node, long value)
 {
 	unsigned long flags;
@@ -860,7 +860,7 @@
     	return 0;
 }
 
-static long __pmac
+static long
 core99_ide_enable(struct device_node* node, long param, long value)
 {
 	/* Bus ID 0 to 2 are KeyLargo based IDE, busID 3 is U2
@@ -883,7 +883,7 @@
 	}
 }
 
-static long __pmac
+static long
 core99_ide_reset(struct device_node* node, long param, long value)
 {
 	switch(param) {
@@ -901,7 +901,7 @@
 	}
 }
 
-static long __pmac
+static long
 core99_gmac_enable(struct device_node* node, long param, long value)
 {
 	unsigned long flags;
@@ -918,7 +918,7 @@
 	return 0;
 }
 
-static long __pmac
+static long
 core99_gmac_phy_reset(struct device_node* node, long param, long value)
 {
 	unsigned long flags;
@@ -943,7 +943,7 @@
 	return 0;
 }
 
-static long __pmac
+static long
 core99_sound_chip_enable(struct device_node* node, long param, long value)
 {
 	struct macio_chip*	macio;
@@ -973,7 +973,7 @@
 	return 0;
 }
 
-static long __pmac
+static long
 core99_airport_enable(struct device_node* node, long param, long value)
 {
 	struct macio_chip*	macio;
@@ -1060,7 +1060,7 @@
 }
 
 #ifdef CONFIG_SMP
-static long __pmac
+static long
 core99_reset_cpu(struct device_node* node, long param, long value)
 {
 	unsigned int reset_io = 0;
@@ -1104,7 +1104,7 @@
 }
 #endif /* CONFIG_SMP */
 
-static long __pmac
+static long
 core99_usb_enable(struct device_node* node, long param, long value)
 {
 	struct macio_chip* macio;
@@ -1257,7 +1257,7 @@
 	return 0;
 }
 
-static long __pmac
+static long
 core99_firewire_enable(struct device_node* node, long param, long value)
 {
 	unsigned long flags;
@@ -1284,7 +1284,7 @@
 	return 0;
 }
 
-static long __pmac
+static long
 core99_firewire_cable_power(struct device_node* node, long param, long value)
 {
 	unsigned long flags;
@@ -1315,7 +1315,7 @@
 	return 0;
 }
 
-static long __pmac
+static long
 intrepid_aack_delay_enable(struct device_node* node, long param, long value)
 {
 	unsigned long flags;
@@ -1336,7 +1336,7 @@
 
 #endif /* CONFIG_POWER4 */
 
-static long __pmac
+static long
 core99_read_gpio(struct device_node* node, long param, long value)
 {
 	struct macio_chip* macio = &macio_chips[0];
@@ -1345,7 +1345,7 @@
 }
 
 
-static long __pmac
+static long
 core99_write_gpio(struct device_node* node, long param, long value)
 {
 	struct macio_chip* macio = &macio_chips[0];
@@ -1356,7 +1356,7 @@
 
 #ifdef CONFIG_POWER4
 
-static long __pmac
+static long
 g5_gmac_enable(struct device_node* node, long param, long value)
 {
 	struct macio_chip* macio = &macio_chips[0];
@@ -1380,7 +1380,7 @@
 	return 0;
 }
 
-static long __pmac
+static long
 g5_fw_enable(struct device_node* node, long param, long value)
 {
 	struct macio_chip* macio = &macio_chips[0];
@@ -1403,7 +1403,7 @@
 	return 0;
 }
 
-static long __pmac
+static long
 g5_mpic_enable(struct device_node* node, long param, long value)
 {
 	unsigned long flags;
@@ -1419,7 +1419,7 @@
 }
 
 #ifdef CONFIG_SMP
-static long __pmac
+static long
 g5_reset_cpu(struct device_node* node, long param, long value)
 {
 	unsigned int reset_io = 0;
@@ -1465,7 +1465,7 @@
  * This takes the second CPU off the bus on dual CPU machines
  * running UP
  */
-void __pmac g5_phy_disable_cpu1(void)
+void g5_phy_disable_cpu1(void)
 {
 	UN_OUT(U3_API_PHY_CONFIG_1, 0);
 }
@@ -1474,7 +1474,7 @@
 
 #ifndef CONFIG_POWER4
 
-static void __pmac
+static void
 keylargo_shutdown(struct macio_chip* macio, int sleep_mode)
 {
 	u32 temp;
@@ -1528,7 +1528,7 @@
 	(void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
 }
 
-static void __pmac
+static void
 pangea_shutdown(struct macio_chip* macio, int sleep_mode)
 {
 	u32 temp;
@@ -1562,7 +1562,7 @@
 	(void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
 }
 
-static void __pmac
+static void
 intrepid_shutdown(struct macio_chip* macio, int sleep_mode)
 {
 	u32 temp;
@@ -1591,7 +1591,7 @@
 }
 
 
-void __pmac pmac_tweak_clock_spreading(int enable)
+void pmac_tweak_clock_spreading(int enable)
 {
 	struct macio_chip* macio = &macio_chips[0];
 
@@ -1698,7 +1698,7 @@
 }
 
 
-static int __pmac
+static int
 core99_sleep(void)
 {
 	struct macio_chip* macio;
@@ -1791,7 +1791,7 @@
 	return 0;
 }
 
-static int __pmac
+static int
 core99_wake_up(void)
 {
 	struct macio_chip* macio;
@@ -1854,7 +1854,7 @@
 	return 0;
 }
 
-static long __pmac
+static long
 core99_sleep_state(struct device_node* node, long param, long value)
 {
 	/* Param == 1 means to enter the "fake sleep" mode that is
@@ -1884,7 +1884,7 @@
 
 #endif /* CONFIG_POWER4 */
 
-static long __pmac
+static long
 generic_dev_can_wake(struct device_node* node, long param, long value)
 {
 	/* Todo: eventually check we are really dealing with on-board
@@ -1896,7 +1896,7 @@
 	return 0;
 }
 
-static long __pmac
+static long
 generic_get_mb_info(struct device_node* node, long param, long value)
 {
 	switch(param) {
@@ -1919,7 +1919,7 @@
 
 /* Used on any machine
  */
-static struct feature_table_entry any_features[]  __pmacdata = {
+static struct feature_table_entry any_features[] = {
 	{ PMAC_FTR_GET_MB_INFO,		generic_get_mb_info },
 	{ PMAC_FTR_DEVICE_CAN_WAKE,	generic_dev_can_wake },
 	{ 0, NULL }
@@ -1931,7 +1931,7 @@
  * 2400,3400 and 3500 series powerbooks. Some older desktops seem
  * to have issues with turning on/off those asic cells
  */
-static struct feature_table_entry ohare_features[]  __pmacdata = {
+static struct feature_table_entry ohare_features[] = {
 	{ PMAC_FTR_SCC_ENABLE,		ohare_htw_scc_enable },
 	{ PMAC_FTR_SWIM3_ENABLE,	ohare_floppy_enable },
 	{ PMAC_FTR_MESH_ENABLE,		ohare_mesh_enable },
@@ -1945,7 +1945,7 @@
  * Separated as some features couldn't be properly tested
  * and the serial port control bits appear to confuse it.
  */
-static struct feature_table_entry heathrow_desktop_features[]  __pmacdata = {
+static struct feature_table_entry heathrow_desktop_features[] = {
 	{ PMAC_FTR_SWIM3_ENABLE,	heathrow_floppy_enable },
 	{ PMAC_FTR_MESH_ENABLE,		heathrow_mesh_enable },
 	{ PMAC_FTR_IDE_ENABLE,		heathrow_ide_enable },
@@ -1957,7 +1957,7 @@
 /* Heathrow based laptop, that is the Wallstreet and mainstreet
  * powerbooks.
  */
-static struct feature_table_entry heathrow_laptop_features[]  __pmacdata = {
+static struct feature_table_entry heathrow_laptop_features[] = {
 	{ PMAC_FTR_SCC_ENABLE,		ohare_htw_scc_enable },
 	{ PMAC_FTR_MODEM_ENABLE,	heathrow_modem_enable },
 	{ PMAC_FTR_SWIM3_ENABLE,	heathrow_floppy_enable },
@@ -1973,7 +1973,7 @@
 /* Paddington based machines
  * The lombard (101) powerbook, first iMac models, B&W G3 and Yikes G4.
  */
-static struct feature_table_entry paddington_features[]  __pmacdata = {
+static struct feature_table_entry paddington_features[] = {
 	{ PMAC_FTR_SCC_ENABLE,		ohare_htw_scc_enable },
 	{ PMAC_FTR_MODEM_ENABLE,	heathrow_modem_enable },
 	{ PMAC_FTR_SWIM3_ENABLE,	heathrow_floppy_enable },
@@ -1991,7 +1991,7 @@
  * chipset. The pangea chipset is the "combo" UniNorth/KeyLargo
  * used on iBook2 & iMac "flow power".
  */
-static struct feature_table_entry core99_features[]  __pmacdata = {
+static struct feature_table_entry core99_features[] = {
 	{ PMAC_FTR_SCC_ENABLE,		core99_scc_enable },
 	{ PMAC_FTR_MODEM_ENABLE,	core99_modem_enable },
 	{ PMAC_FTR_IDE_ENABLE,		core99_ide_enable },
@@ -2014,7 +2014,7 @@
 
 /* RackMac
  */
-static struct feature_table_entry rackmac_features[]  __pmacdata = {
+static struct feature_table_entry rackmac_features[] = {
 	{ PMAC_FTR_SCC_ENABLE,		core99_scc_enable },
 	{ PMAC_FTR_IDE_ENABLE,		core99_ide_enable },
 	{ PMAC_FTR_IDE_RESET,		core99_ide_reset },
@@ -2034,7 +2034,7 @@
 
 /* Pangea features
  */
-static struct feature_table_entry pangea_features[]  __pmacdata = {
+static struct feature_table_entry pangea_features[] = {
 	{ PMAC_FTR_SCC_ENABLE,		core99_scc_enable },
 	{ PMAC_FTR_MODEM_ENABLE,	pangea_modem_enable },
 	{ PMAC_FTR_IDE_ENABLE,		core99_ide_enable },
@@ -2054,7 +2054,7 @@
 
 /* Intrepid features
  */
-static struct feature_table_entry intrepid_features[]  __pmacdata = {
+static struct feature_table_entry intrepid_features[] = {
 	{ PMAC_FTR_SCC_ENABLE,		core99_scc_enable },
 	{ PMAC_FTR_MODEM_ENABLE,	pangea_modem_enable },
 	{ PMAC_FTR_IDE_ENABLE,		core99_ide_enable },
@@ -2077,7 +2077,7 @@
 
 /* G5 features
  */
-static struct feature_table_entry g5_features[]  __pmacdata = {
+static struct feature_table_entry g5_features[] = {
 	{ PMAC_FTR_GMAC_ENABLE,		g5_gmac_enable },
 	{ PMAC_FTR_1394_ENABLE,		g5_fw_enable },
 	{ PMAC_FTR_ENABLE_MPIC,		g5_mpic_enable },
@@ -2091,7 +2091,7 @@
 
 #endif /* CONFIG_POWER4 */
 
-static struct pmac_mb_def pmac_mb_defs[] __pmacdata = {
+static struct pmac_mb_def pmac_mb_defs[] = {
 #ifndef CONFIG_POWER4
 	/*
 	 * Desktops
@@ -2352,7 +2352,7 @@
 /*
  * The toplevel feature_call callback
  */
-long __pmac
+long
 pmac_do_feature_call(unsigned int selector, ...)
 {
 	struct device_node* node;
@@ -2935,8 +2935,8 @@
  * Early video resume hook
  */
 
-static void (*pmac_early_vresume_proc)(void *data) __pmacdata;
-static void *pmac_early_vresume_data __pmacdata;
+static void (*pmac_early_vresume_proc)(void *data);
+static void *pmac_early_vresume_data;
 
 void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
 {
@@ -2949,7 +2949,7 @@
 }
 EXPORT_SYMBOL(pmac_set_early_video_resume);
 
-void __pmac pmac_call_early_video_resume(void)
+void pmac_call_early_video_resume(void)
 {
 	if (pmac_early_vresume_proc)
 		pmac_early_vresume_proc(pmac_early_vresume_data);
@@ -2959,11 +2959,11 @@
  * AGP related suspend/resume code
  */
 
-static struct pci_dev *pmac_agp_bridge __pmacdata;
-static int (*pmac_agp_suspend)(struct pci_dev *bridge) __pmacdata;
-static int (*pmac_agp_resume)(struct pci_dev *bridge) __pmacdata;
+static struct pci_dev *pmac_agp_bridge;
+static int (*pmac_agp_suspend)(struct pci_dev *bridge);
+static int (*pmac_agp_resume)(struct pci_dev *bridge);
 
-void __pmac pmac_register_agp_pm(struct pci_dev *bridge,
+void pmac_register_agp_pm(struct pci_dev *bridge,
 				 int (*suspend)(struct pci_dev *bridge),
 				 int (*resume)(struct pci_dev *bridge))
 {
@@ -2980,7 +2980,7 @@
 }
 EXPORT_SYMBOL(pmac_register_agp_pm);
 
-void __pmac pmac_suspend_agp_for_card(struct pci_dev *dev)
+void pmac_suspend_agp_for_card(struct pci_dev *dev)
 {
 	if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL)
 		return;
@@ -2990,7 +2990,7 @@
 }
 EXPORT_SYMBOL(pmac_suspend_agp_for_card);
 
-void __pmac pmac_resume_agp_for_card(struct pci_dev *dev)
+void pmac_resume_agp_for_card(struct pci_dev *dev)
 {
 	if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL)
 		return;
diff --git a/arch/ppc/platforms/pmac_nvram.c b/arch/ppc/platforms/pmac_nvram.c
index c9de642..8c9b008 100644
--- a/arch/ppc/platforms/pmac_nvram.c
+++ b/arch/ppc/platforms/pmac_nvram.c
@@ -88,17 +88,17 @@
 static int (*core99_write_bank)(int bank, u8* datas);
 static int (*core99_erase_bank)(int bank);
 
-static char *nvram_image __pmacdata;
+static char *nvram_image;
 
 
-static unsigned char __pmac core99_nvram_read_byte(int addr)
+static unsigned char core99_nvram_read_byte(int addr)
 {
 	if (nvram_image == NULL)
 		return 0xff;
 	return nvram_image[addr];
 }
 
-static void __pmac core99_nvram_write_byte(int addr, unsigned char val)
+static void core99_nvram_write_byte(int addr, unsigned char val)
 {
 	if (nvram_image == NULL)
 		return;
@@ -106,18 +106,18 @@
 }
 
 
-static unsigned char __openfirmware direct_nvram_read_byte(int addr)
+static unsigned char direct_nvram_read_byte(int addr)
 {
 	return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]);
 }
 
-static void __openfirmware direct_nvram_write_byte(int addr, unsigned char val)
+static void direct_nvram_write_byte(int addr, unsigned char val)
 {
 	out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val);
 }
 
 
-static unsigned char __pmac indirect_nvram_read_byte(int addr)
+static unsigned char indirect_nvram_read_byte(int addr)
 {
 	unsigned char val;
 	unsigned long flags;
@@ -130,7 +130,7 @@
 	return val;
 }
 
-static void __pmac indirect_nvram_write_byte(int addr, unsigned char val)
+static void indirect_nvram_write_byte(int addr, unsigned char val)
 {
 	unsigned long flags;
 
@@ -143,13 +143,13 @@
 
 #ifdef CONFIG_ADB_PMU
 
-static void __pmac pmu_nvram_complete(struct adb_request *req)
+static void pmu_nvram_complete(struct adb_request *req)
 {
 	if (req->arg)
 		complete((struct completion *)req->arg);
 }
 
-static unsigned char __pmac pmu_nvram_read_byte(int addr)
+static unsigned char pmu_nvram_read_byte(int addr)
 {
 	struct adb_request req;
 	DECLARE_COMPLETION(req_complete); 
@@ -165,7 +165,7 @@
 	return req.reply[0];
 }
 
-static void __pmac pmu_nvram_write_byte(int addr, unsigned char val)
+static void pmu_nvram_write_byte(int addr, unsigned char val)
 {
 	struct adb_request req;
 	DECLARE_COMPLETION(req_complete); 
@@ -183,7 +183,7 @@
 #endif /* CONFIG_ADB_PMU */
 
 
-static u8 __pmac chrp_checksum(struct chrp_header* hdr)
+static u8 chrp_checksum(struct chrp_header* hdr)
 {
 	u8 *ptr;
 	u16 sum = hdr->signature;
@@ -194,7 +194,7 @@
 	return sum;
 }
 
-static u32 __pmac core99_calc_adler(u8 *buffer)
+static u32 core99_calc_adler(u8 *buffer)
 {
 	int cnt;
 	u32 low, high;
@@ -216,7 +216,7 @@
 	return (high << 16) | low;
 }
 
-static u32 __pmac core99_check(u8* datas)
+static u32 core99_check(u8* datas)
 {
 	struct core99_header* hdr99 = (struct core99_header*)datas;
 
@@ -235,7 +235,7 @@
 	return hdr99->generation;
 }
 
-static int __pmac sm_erase_bank(int bank)
+static int sm_erase_bank(int bank)
 {
 	int stat, i;
 	unsigned long timeout;
@@ -267,7 +267,7 @@
 	return 0;
 }
 
-static int __pmac sm_write_bank(int bank, u8* datas)
+static int sm_write_bank(int bank, u8* datas)
 {
 	int i, stat = 0;
 	unsigned long timeout;
@@ -302,7 +302,7 @@
 	return 0;
 }
 
-static int __pmac amd_erase_bank(int bank)
+static int amd_erase_bank(int bank)
 {
 	int i, stat = 0;
 	unsigned long timeout;
@@ -349,7 +349,7 @@
 	return 0;
 }
 
-static int __pmac amd_write_bank(int bank, u8* datas)
+static int amd_write_bank(int bank, u8* datas)
 {
 	int i, stat = 0;
 	unsigned long timeout;
@@ -430,7 +430,7 @@
 	DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]);
 }
 
-static void __pmac core99_nvram_sync(void)
+static void core99_nvram_sync(void)
 {
 	struct core99_header* hdr99;
 	unsigned long flags;
@@ -554,12 +554,12 @@
 	lookup_partitions();
 }
 
-int __pmac pmac_get_partition(int partition)
+int pmac_get_partition(int partition)
 {
 	return nvram_partitions[partition];
 }
 
-u8 __pmac pmac_xpram_read(int xpaddr)
+u8 pmac_xpram_read(int xpaddr)
 {
 	int offset = nvram_partitions[pmac_nvram_XPRAM];
 
@@ -569,7 +569,7 @@
 	return ppc_md.nvram_read_val(xpaddr + offset);
 }
 
-void __pmac pmac_xpram_write(int xpaddr, u8 data)
+void pmac_xpram_write(int xpaddr, u8 data)
 {
 	int offset = nvram_partitions[pmac_nvram_XPRAM];
 
diff --git a/arch/ppc/platforms/pmac_pci.c b/arch/ppc/platforms/pmac_pci.c
index 719fb49..1dc638f 100644
--- a/arch/ppc/platforms/pmac_pci.c
+++ b/arch/ppc/platforms/pmac_pci.c
@@ -141,7 +141,7 @@
 	|(((unsigned long)(off)) & 0xFCUL) \
 	|1UL)
 
-static void volatile __iomem * __pmac
+static void volatile __iomem *
 macrisc_cfg_access(struct pci_controller* hose, u8 bus, u8 dev_fn, u8 offset)
 {
 	unsigned int caddr;
@@ -162,7 +162,7 @@
 	return hose->cfg_data + offset;
 }
 
-static int __pmac
+static int
 macrisc_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
 		    int len, u32 *val)
 {
@@ -190,7 +190,7 @@
 	return PCIBIOS_SUCCESSFUL;
 }
 
-static int __pmac
+static int
 macrisc_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
 		     int len, u32 val)
 {
@@ -230,7 +230,7 @@
 /*
  * Verifiy that a specific (bus, dev_fn) exists on chaos
  */
-static int __pmac
+static int
 chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
 {
 	struct device_node *np;
@@ -252,7 +252,7 @@
 	return PCIBIOS_SUCCESSFUL;
 }
 
-static int __pmac
+static int
 chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
 		  int len, u32 *val)
 {
@@ -264,7 +264,7 @@
 	return macrisc_read_config(bus, devfn, offset, len, val);
 }
 
-static int __pmac
+static int
 chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
 		   int len, u32 val)
 {
@@ -294,7 +294,7 @@
 		+ (((unsigned long)bus) << 16) \
 		+ 0x01000000UL)
 
-static void volatile __iomem * __pmac
+static void volatile __iomem *
 u3_ht_cfg_access(struct pci_controller* hose, u8 bus, u8 devfn, u8 offset)
 {
 	if (bus == hose->first_busno) {
@@ -307,7 +307,7 @@
 		return hose->cfg_data + U3_HT_CFA1(bus, devfn, offset);
 }
 
-static int __pmac
+static int
 u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
 		    int len, u32 *val)
 {
@@ -357,7 +357,7 @@
 	return PCIBIOS_SUCCESSFUL;
 }
 
-static int __pmac
+static int
 u3_ht_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
 		     int len, u32 val)
 {
@@ -899,7 +899,7 @@
 	pcibios_fixup_OF_interrupts();
 }
 
-int __pmac
+int
 pmac_pci_enable_device_hook(struct pci_dev *dev, int initial)
 {
 	struct device_node* node;
@@ -1096,7 +1096,7 @@
  * Disable second function on K2-SATA, it's broken
  * and disable IO BARs on first one
  */
-void __pmac pmac_pci_fixup_k2_sata(struct pci_dev* dev)
+void pmac_pci_fixup_k2_sata(struct pci_dev* dev)
 {
 	int i;
 	u16 cmd;
diff --git a/arch/ppc/platforms/pmac_pic.c b/arch/ppc/platforms/pmac_pic.c
index 2ce0588..3349cfb 100644
--- a/arch/ppc/platforms/pmac_pic.c
+++ b/arch/ppc/platforms/pmac_pic.c
@@ -53,7 +53,7 @@
 };
 
 /* Default addresses */
-static volatile struct pmac_irq_hw *pmac_irq_hw[4] __pmacdata = {
+static volatile struct pmac_irq_hw *pmac_irq_hw[4] = {
         (struct pmac_irq_hw *) 0xf3000020,
         (struct pmac_irq_hw *) 0xf3000010,
         (struct pmac_irq_hw *) 0xf4000020,
@@ -64,22 +64,22 @@
 #define OHARE_LEVEL_MASK	0x1ff00000
 #define HEATHROW_LEVEL_MASK	0x1ff00000
 
-static int max_irqs __pmacdata;
-static int max_real_irqs __pmacdata;
-static u32 level_mask[4] __pmacdata;
+static int max_irqs;
+static int max_real_irqs;
+static u32 level_mask[4];
 
-static DEFINE_SPINLOCK(pmac_pic_lock __pmacdata);
+static DEFINE_SPINLOCK(pmac_pic_lock);
 
 
 #define GATWICK_IRQ_POOL_SIZE        10
-static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE] __pmacdata;
+static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
 
 /*
  * Mark an irq as "lost".  This is only used on the pmac
  * since it can lose interrupts (see pmac_set_irq_mask).
  * -- Cort
  */
-void __pmac
+void
 __set_lost(unsigned long irq_nr, int nokick)
 {
 	if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) {
@@ -89,7 +89,7 @@
 	}
 }
 
-static void __pmac
+static void
 pmac_mask_and_ack_irq(unsigned int irq_nr)
 {
         unsigned long bit = 1UL << (irq_nr & 0x1f);
@@ -114,7 +114,7 @@
 	spin_unlock_irqrestore(&pmac_pic_lock, flags);
 }
 
-static void __pmac pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
+static void pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
 {
         unsigned long bit = 1UL << (irq_nr & 0x1f);
         int i = irq_nr >> 5;
@@ -147,7 +147,7 @@
 /* When an irq gets requested for the first client, if it's an
  * edge interrupt, we clear any previous one on the controller
  */
-static unsigned int __pmac pmac_startup_irq(unsigned int irq_nr)
+static unsigned int pmac_startup_irq(unsigned int irq_nr)
 {
         unsigned long bit = 1UL << (irq_nr & 0x1f);
         int i = irq_nr >> 5;
@@ -160,20 +160,20 @@
 	return 0;
 }
 
-static void __pmac pmac_mask_irq(unsigned int irq_nr)
+static void pmac_mask_irq(unsigned int irq_nr)
 {
         clear_bit(irq_nr, ppc_cached_irq_mask);
         pmac_set_irq_mask(irq_nr, 0);
         mb();
 }
 
-static void __pmac pmac_unmask_irq(unsigned int irq_nr)
+static void pmac_unmask_irq(unsigned int irq_nr)
 {
         set_bit(irq_nr, ppc_cached_irq_mask);
         pmac_set_irq_mask(irq_nr, 0);
 }
 
-static void __pmac pmac_end_irq(unsigned int irq_nr)
+static void pmac_end_irq(unsigned int irq_nr)
 {
 	if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))
 	    && irq_desc[irq_nr].action) {
diff --git a/arch/ppc/platforms/pmac_setup.c b/arch/ppc/platforms/pmac_setup.c
index 4c56a47..e6a1218 100644
--- a/arch/ppc/platforms/pmac_setup.c
+++ b/arch/ppc/platforms/pmac_setup.c
@@ -123,7 +123,7 @@
 extern struct smp_ops_t core99_smp_ops;
 #endif /* CONFIG_SMP */
 
-static int __pmac
+static int
 pmac_show_cpuinfo(struct seq_file *m)
 {
 	struct device_node *np;
@@ -227,7 +227,7 @@
 	return 0;
 }
 
-static int __openfirmware
+static int
 pmac_show_percpuinfo(struct seq_file *m, int i)
 {
 #ifdef CONFIG_CPU_FREQ_PMAC
@@ -448,7 +448,7 @@
 	enable_kernel_fp();
 
 #ifdef CONFIG_ALTIVEC
-	if (cur_cpu_spec[0]->cpu_features & CPU_FTR_ALTIVEC)
+	if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
 		enable_kernel_altivec();
 #endif /* CONFIG_ALTIVEC */
 
@@ -486,7 +486,7 @@
 late_initcall(pmac_late_init);
 
 /* can't be __init - can be called whenever a disk is first accessed */
-void __pmac
+void
 note_bootable_part(dev_t dev, int part, int goodness)
 {
 	static int found_boot = 0;
@@ -512,7 +512,7 @@
 	}
 }
 
-static void __pmac
+static void
 pmac_restart(char *cmd)
 {
 #ifdef CONFIG_ADB_CUDA
@@ -537,7 +537,7 @@
 	}
 }
 
-static void __pmac
+static void
 pmac_power_off(void)
 {
 #ifdef CONFIG_ADB_CUDA
@@ -562,7 +562,7 @@
 	}
 }
 
-static void __pmac
+static void
 pmac_halt(void)
 {
    pmac_power_off();
diff --git a/arch/ppc/platforms/pmac_smp.c b/arch/ppc/platforms/pmac_smp.c
index 794a239..e613f0e 100644
--- a/arch/ppc/platforms/pmac_smp.c
+++ b/arch/ppc/platforms/pmac_smp.c
@@ -186,7 +186,7 @@
  */
 static unsigned long psurge_smp_message[NR_CPUS];
 
-void __pmac psurge_smp_message_recv(struct pt_regs *regs)
+void psurge_smp_message_recv(struct pt_regs *regs)
 {
 	int cpu = smp_processor_id();
 	int msg;
@@ -203,13 +203,13 @@
 			smp_message_recv(msg, regs);
 }
 
-irqreturn_t __pmac psurge_primary_intr(int irq, void *d, struct pt_regs *regs)
+irqreturn_t psurge_primary_intr(int irq, void *d, struct pt_regs *regs)
 {
 	psurge_smp_message_recv(regs);
 	return IRQ_HANDLED;
 }
 
-static void __pmac smp_psurge_message_pass(int target, int msg, unsigned long data,
+static void smp_psurge_message_pass(int target, int msg, unsigned long data,
 					   int wait)
 {
 	int i;
@@ -629,7 +629,7 @@
 
 
 /* PowerSurge-style Macs */
-struct smp_ops_t psurge_smp_ops __pmacdata = {
+struct smp_ops_t psurge_smp_ops = {
 	.message_pass	= smp_psurge_message_pass,
 	.probe		= smp_psurge_probe,
 	.kick_cpu	= smp_psurge_kick_cpu,
@@ -639,7 +639,7 @@
 };
 
 /* Core99 Macs (dual G4s) */
-struct smp_ops_t core99_smp_ops __pmacdata = {
+struct smp_ops_t core99_smp_ops = {
 	.message_pass	= smp_openpic_message_pass,
 	.probe		= smp_core99_probe,
 	.kick_cpu	= smp_core99_kick_cpu,
diff --git a/arch/ppc/platforms/pmac_time.c b/arch/ppc/platforms/pmac_time.c
index 778ce4f..ff6adff 100644
--- a/arch/ppc/platforms/pmac_time.c
+++ b/arch/ppc/platforms/pmac_time.c
@@ -77,7 +77,7 @@
 #endif
 }
 
-unsigned long __pmac
+unsigned long
 pmac_get_rtc_time(void)
 {
 #if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU)
@@ -118,7 +118,7 @@
 	return 0;
 }
 
-int __pmac
+int
 pmac_set_rtc_time(unsigned long nowtime)
 {
 #if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU)
@@ -210,7 +210,7 @@
 /*
  * Reset the time after a sleep.
  */
-static int __pmac
+static int
 time_sleep_notify(struct pmu_sleep_notifier *self, int when)
 {
 	static unsigned long time_diff;
@@ -235,7 +235,7 @@
 	return PBOOK_SLEEP_OK;
 }
 
-static struct pmu_sleep_notifier time_sleep_notifier __pmacdata = {
+static struct pmu_sleep_notifier time_sleep_notifier = {
 	time_sleep_notify, SLEEP_LEVEL_MISC,
 };
 #endif /* CONFIG_PM */
diff --git a/arch/ppc/platforms/prep_pci.c b/arch/ppc/platforms/prep_pci.c
index 4760cb6..e50b999 100644
--- a/arch/ppc/platforms/prep_pci.c
+++ b/arch/ppc/platforms/prep_pci.c
@@ -43,7 +43,7 @@
 /* Tables for known hardware */
 
 /* Motorola PowerStackII - Utah */
-static char Utah_pci_IRQ_map[23] __prepdata =
+static char Utah_pci_IRQ_map[23] =
 {
         0,   /* Slot 0  - unused */
         0,   /* Slot 1  - unused */
@@ -72,7 +72,7 @@
         0,   /* Slot 22 - unused */
 };
 
-static char Utah_pci_IRQ_routes[] __prepdata =
+static char Utah_pci_IRQ_routes[] =
 {
         0,   /* Line 0 - Unused */
         9,   /* Line 1 */
@@ -84,7 +84,7 @@
 
 /* Motorola PowerStackII - Omaha */
 /* no integrated SCSI or ethernet */
-static char Omaha_pci_IRQ_map[23] __prepdata =
+static char Omaha_pci_IRQ_map[23] =
 {
         0,   /* Slot 0  - unused */
         0,   /* Slot 1  - unused */
@@ -111,7 +111,7 @@
         0,
 };
 
-static char Omaha_pci_IRQ_routes[] __prepdata =
+static char Omaha_pci_IRQ_routes[] =
 {
         0,   /* Line 0 - Unused */
         9,   /* Line 1 */
@@ -121,7 +121,7 @@
 };
 
 /* Motorola PowerStack */
-static char Blackhawk_pci_IRQ_map[19] __prepdata =
+static char Blackhawk_pci_IRQ_map[19] =
 {
   	0,	/* Slot 0  - unused */
   	0,	/* Slot 1  - unused */
@@ -144,7 +144,7 @@
  	3,	/* Slot P5 */
 };
 
-static char Blackhawk_pci_IRQ_routes[] __prepdata =
+static char Blackhawk_pci_IRQ_routes[] =
 {
    	0,	/* Line 0 - Unused */
    	9,	/* Line 1 */
@@ -154,7 +154,7 @@
 };
 
 /* Motorola Mesquite */
-static char Mesquite_pci_IRQ_map[23] __prepdata =
+static char Mesquite_pci_IRQ_map[23] =
 {
 	0,	/* Slot 0  - unused */
 	0,	/* Slot 1  - unused */
@@ -182,7 +182,7 @@
 };
 
 /* Motorola Sitka */
-static char Sitka_pci_IRQ_map[21] __prepdata =
+static char Sitka_pci_IRQ_map[21] =
 {
 	0,      /* Slot 0  - unused */
 	0,      /* Slot 1  - unused */
@@ -208,7 +208,7 @@
 };
 
 /* Motorola MTX */
-static char MTX_pci_IRQ_map[23] __prepdata =
+static char MTX_pci_IRQ_map[23] =
 {
 	0,	/* Slot 0  - unused */
 	0,	/* Slot 1  - unused */
@@ -237,7 +237,7 @@
 
 /* Motorola MTX Plus */
 /* Secondary bus interrupt routing is not supported yet */
-static char MTXplus_pci_IRQ_map[23] __prepdata =
+static char MTXplus_pci_IRQ_map[23] =
 {
         0,      /* Slot 0  - unused */
         0,      /* Slot 1  - unused */
@@ -264,13 +264,13 @@
         0,      /* Slot 22 - unused */
 };
 
-static char Raven_pci_IRQ_routes[] __prepdata =
+static char Raven_pci_IRQ_routes[] =
 {
    	0,	/* This is a dummy structure */
 };
 
 /* Motorola MVME16xx */
-static char Genesis_pci_IRQ_map[16] __prepdata =
+static char Genesis_pci_IRQ_map[16] =
 {
   	0,	/* Slot 0  - unused */
   	0,	/* Slot 1  - unused */
@@ -290,7 +290,7 @@
   	0,	/* Slot 15 - unused */
 };
 
-static char Genesis_pci_IRQ_routes[] __prepdata =
+static char Genesis_pci_IRQ_routes[] =
 {
    	0,	/* Line 0 - Unused */
    	10,	/* Line 1 */
@@ -299,7 +299,7 @@
    	15	/* Line 4 */
 };
 
-static char Genesis2_pci_IRQ_map[23] __prepdata =
+static char Genesis2_pci_IRQ_map[23] =
 {
 	0,	/* Slot 0  - unused */
 	0,	/* Slot 1  - unused */
@@ -327,7 +327,7 @@
 };
 
 /* Motorola Series-E */
-static char Comet_pci_IRQ_map[23] __prepdata =
+static char Comet_pci_IRQ_map[23] =
 {
   	0,	/* Slot 0  - unused */
   	0,	/* Slot 1  - unused */
@@ -354,7 +354,7 @@
 	0,
 };
 
-static char Comet_pci_IRQ_routes[] __prepdata =
+static char Comet_pci_IRQ_routes[] =
 {
    	0,	/* Line 0 - Unused */
    	10,	/* Line 1 */
@@ -364,7 +364,7 @@
 };
 
 /* Motorola Series-EX */
-static char Comet2_pci_IRQ_map[23] __prepdata =
+static char Comet2_pci_IRQ_map[23] =
 {
 	0,	/* Slot 0  - unused */
 	0,	/* Slot 1  - unused */
@@ -391,7 +391,7 @@
 	0,
 };
 
-static char Comet2_pci_IRQ_routes[] __prepdata =
+static char Comet2_pci_IRQ_routes[] =
 {
 	0,	/* Line 0 - Unused */
 	10,	/* Line 1 */
@@ -405,7 +405,7 @@
  * This is actually based on the Carolina motherboard
  * -- Cort
  */
-static char ibm8xx_pci_IRQ_map[23] __prepdata = {
+static char ibm8xx_pci_IRQ_map[23] = {
         0, /* Slot 0  - unused */
         0, /* Slot 1  - unused */
         0, /* Slot 2  - unused */
@@ -431,7 +431,7 @@
         2, /* Slot 22 - PCI slot 1 PCIINTx# (See below) */
 };
 
-static char ibm8xx_pci_IRQ_routes[] __prepdata = {
+static char ibm8xx_pci_IRQ_routes[] = {
         0,      /* Line 0 - unused */
         15,     /* Line 1 */
         15,     /* Line 2 */
@@ -443,7 +443,7 @@
  * a 6015 ibm board
  * -- Cort
  */
-static char ibm6015_pci_IRQ_map[23] __prepdata = {
+static char ibm6015_pci_IRQ_map[23] = {
         0, /* Slot 0  - unused */
         0, /* Slot 1  - unused */
         0, /* Slot 2  - unused */
@@ -469,7 +469,7 @@
         2, /* Slot 22 -  */
 };
 
-static char ibm6015_pci_IRQ_routes[] __prepdata = {
+static char ibm6015_pci_IRQ_routes[] = {
         0,      /* Line 0 - unused */
         13,     /* Line 1 */
         15,     /* Line 2 */
@@ -479,7 +479,7 @@
 
 
 /* IBM Nobis and Thinkpad 850 */
-static char Nobis_pci_IRQ_map[23] __prepdata ={
+static char Nobis_pci_IRQ_map[23] ={
         0, /* Slot 0  - unused */
         0, /* Slot 1  - unused */
         0, /* Slot 2  - unused */
@@ -498,7 +498,7 @@
         0, /* Slot 15 - unused */
 };
 
-static char Nobis_pci_IRQ_routes[] __prepdata = {
+static char Nobis_pci_IRQ_routes[] = {
         0, /* Line 0 - Unused */
         13, /* Line 1 */
         13, /* Line 2 */
@@ -510,7 +510,7 @@
  * IBM RS/6000 43p/140  -- paulus
  * XXX we should get all this from the residual data
  */
-static char ibm43p_pci_IRQ_map[23] __prepdata = {
+static char ibm43p_pci_IRQ_map[23] = {
         0, /* Slot 0  - unused */
         0, /* Slot 1  - unused */
         0, /* Slot 2  - unused */
@@ -536,7 +536,7 @@
         1, /* Slot 22 - PCI slot 1 PCIINTx# (See below) */
 };
 
-static char ibm43p_pci_IRQ_routes[] __prepdata = {
+static char ibm43p_pci_IRQ_routes[] = {
         0,      /* Line 0 - unused */
         15,     /* Line 1 */
         15,     /* Line 2 */
@@ -559,7 +559,7 @@
  * are routed to OpenPIC inputs 5-8.  These values are offset by
  * 16 in the table to reflect the Linux kernel interrupt value.
  */
-struct powerplus_irq_list Powerplus_pci_IRQ_list __prepdata =
+struct powerplus_irq_list Powerplus_pci_IRQ_list =
 {
 	{25, 26, 27, 28},
 	{21, 22, 23, 24}
@@ -572,7 +572,7 @@
  * are routed to OpenPIC inputs 12-15. These values are offset by
  * 16 in the table to reflect the Linux kernel interrupt value.
  */
-struct powerplus_irq_list Mesquite_pci_IRQ_list __prepdata =
+struct powerplus_irq_list Mesquite_pci_IRQ_list =
 {
 	{24, 25, 26, 27},
 	{28, 29, 30, 31}
@@ -582,7 +582,7 @@
  * This table represents the standard PCI swizzle defined in the
  * PCI bus specification.
  */
-static unsigned char prep_pci_intpins[4][4] __prepdata =
+static unsigned char prep_pci_intpins[4][4] =
 {
 	{ 1, 2, 3, 4},  /* Buses 0, 4, 8, ... */
 	{ 2, 3, 4, 1},  /* Buses 1, 5, 9, ... */
@@ -622,7 +622,7 @@
 #define MIN_DEVNR	11
 #define MAX_DEVNR	22
 
-static int __prep
+static int
 prep_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
 		 int len, u32 *val)
 {
@@ -652,7 +652,7 @@
 	return PCIBIOS_SUCCESSFUL;
 }
 
-static int __prep
+static int
 prep_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
 		  int len, u32 val)
 {
@@ -804,7 +804,7 @@
 	void            (*map_non0_bus)(struct pci_dev *);      /* For boards with more than bus 0 devices. */
 	struct powerplus_irq_list *pci_irq_list; /* List of PCI MPIC inputs */
 	unsigned char   secondary_bridge_devfn; /* devfn of secondary bus transparent bridge */
-} mot_info[] __prepdata = {
+} mot_info[] = {
 	{0x300, 0x00, 0x00, "MVME 2400",			Genesis2_pci_IRQ_map,	Raven_pci_IRQ_routes, Powerplus_Map_Non0, &Powerplus_pci_IRQ_list, 0xFF},
 	{0x010, 0x00, 0x00, "Genesis",				Genesis_pci_IRQ_map,	Genesis_pci_IRQ_routes, Powerplus_Map_Non0, &Powerplus_pci_IRQ_list, 0x00},
 	{0x020, 0x00, 0x00, "Powerstack (Series E)",		Comet_pci_IRQ_map,	Comet_pci_IRQ_routes, NULL, NULL, 0x00},
diff --git a/arch/ppc/platforms/prep_setup.c b/arch/ppc/platforms/prep_setup.c
index bc926be..8bc734f 100644
--- a/arch/ppc/platforms/prep_setup.c
+++ b/arch/ppc/platforms/prep_setup.c
@@ -89,9 +89,6 @@
 #define cached_21	(((char *)(ppc_cached_irq_mask))[3])
 #define cached_A1	(((char *)(ppc_cached_irq_mask))[2])
 
-/* for the mac fs */
-dev_t boot_dev;
-
 #ifdef CONFIG_SOUND_CS4232
 long ppc_cs4232_dma, ppc_cs4232_dma2;
 #endif
@@ -173,7 +170,7 @@
 }
 
 /* cpuinfo code common to all IBM PReP */
-static void __prep
+static void
 prep_ibm_cpuinfo(struct seq_file *m)
 {
 	unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -209,14 +206,14 @@
 	}
 }
 
-static int __prep
+static int
 prep_gen_cpuinfo(struct seq_file *m)
 {
 	prep_ibm_cpuinfo(m);
 	return 0;
 }
 
-static int __prep
+static int
 prep_sandalfoot_cpuinfo(struct seq_file *m)
 {
 	unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -243,7 +240,7 @@
 	return 0;
 }
 
-static int __prep
+static int
 prep_thinkpad_cpuinfo(struct seq_file *m)
 {
 	unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -314,7 +311,7 @@
 	return 0;
 }
 
-static int __prep
+static int
 prep_carolina_cpuinfo(struct seq_file *m)
 {
 	unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -350,7 +347,7 @@
 	return 0;
 }
 
-static int __prep
+static int
 prep_tiger1_cpuinfo(struct seq_file *m)
 {
 	unsigned int l2_reg = inb(PREP_IBM_L2INFO);
@@ -393,7 +390,7 @@
 
 
 /* Used by all Motorola PReP */
-static int __prep
+static int
 prep_mot_cpuinfo(struct seq_file *m)
 {
 	unsigned int cachew = *((unsigned char *)CACHECRBA);
@@ -454,7 +451,7 @@
 	return 0;
 }
 
-static void __prep
+static void
 prep_restart(char *cmd)
 {
 #define PREP_SP92	0x92	/* Special Port 92 */
@@ -473,7 +470,7 @@
 #undef PREP_SP92
 }
 
-static void __prep
+static void
 prep_halt(void)
 {
 	local_irq_disable(); /* no interrupts */
@@ -488,7 +485,7 @@
 /* Carrera is the power manager in the Thinkpads. Unfortunately not much is
  * known about it, so we can't power down.
  */
-static void __prep
+static void
 prep_carrera_poweroff(void)
 {
 	prep_halt();
@@ -501,7 +498,7 @@
  * somewhat in the IBM Carolina Technical Specification.
  * -Hollis
  */
-static void __prep
+static void
 utah_sig87c750_setbit(unsigned int bytenum, unsigned int bitnum, int value)
 {
 	/*
@@ -539,7 +536,7 @@
 	udelay(100);				/* important: let controller recover */
 }
 
-static void __prep
+static void
 prep_sig750_poweroff(void)
 {
 	/* tweak the power manager found in most IBM PRePs (except Thinkpads) */
@@ -554,7 +551,7 @@
 	/* not reached */
 }
 
-static int __prep
+static int
 prep_show_percpuinfo(struct seq_file *m, int i)
 {
 	/* PREP's without residual data will give incorrect values here */
@@ -700,12 +697,12 @@
 /*
  * IBM 3-digit status LED
  */
-static unsigned int ibm_statusled_base __prepdata;
+static unsigned int ibm_statusled_base;
 
-static void __prep
+static void
 ibm_statusled_progress(char *s, unsigned short hex);
 
-static int __prep
+static int
 ibm_statusled_panic(struct notifier_block *dummy1, unsigned long dummy2,
 		    void * dummy3)
 {
@@ -713,13 +710,13 @@
 	return NOTIFY_DONE;
 }
 
-static struct notifier_block ibm_statusled_block __prepdata = {
+static struct notifier_block ibm_statusled_block = {
 	ibm_statusled_panic,
 	NULL,
 	INT_MAX /* try to do it first */
 };
 
-static void __prep
+static void
 ibm_statusled_progress(char *s, unsigned short hex)
 {
 	static int notifier_installed;
@@ -945,7 +942,7 @@
 		todc_calibrate_decr();
 }
 
-static unsigned int __prep
+static unsigned int
 prep_irq_canonicalize(u_int irq)
 {
 	if (irq == 2)
@@ -996,7 +993,7 @@
 /*
  * IDE stuff.
  */
-static int __prep
+static int
 prep_ide_default_irq(unsigned long base)
 {
 	switch (base) {
@@ -1010,7 +1007,7 @@
 	}
 }
 
-static unsigned long __prep
+static unsigned long
 prep_ide_default_io_base(int index)
 {
 	switch (index) {
@@ -1055,7 +1052,7 @@
 		do_openpic_setup_cpu();
 }
 
-static struct smp_ops_t prep_smp_ops __prepdata = {
+static struct smp_ops_t prep_smp_ops = {
 	smp_openpic_message_pass,
 	smp_prep_probe,
 	smp_prep_kick_cpu,
diff --git a/arch/ppc/platforms/radstone_ppc7d.c b/arch/ppc/platforms/radstone_ppc7d.c
index c30607a..06ec30f 100644
--- a/arch/ppc/platforms/radstone_ppc7d.c
+++ b/arch/ppc/platforms/radstone_ppc7d.c
@@ -1185,18 +1185,18 @@
 		ROOT_DEV = Root_HDA1;
 #endif
 
-	if ((cur_cpu_spec[0]->cpu_features & CPU_FTR_SPEC7450) ||
-	    (cur_cpu_spec[0]->cpu_features & CPU_FTR_L3CR))
+	if ((cur_cpu_spec->cpu_features & CPU_FTR_SPEC7450) ||
+	    (cur_cpu_spec->cpu_features & CPU_FTR_L3CR))
 		/* 745x is different.  We only want to pass along enable. */
 		_set_L2CR(L2CR_L2E);
-	else if (cur_cpu_spec[0]->cpu_features & CPU_FTR_L2CR)
+	else if (cur_cpu_spec->cpu_features & CPU_FTR_L2CR)
 		/* All modules have 1MB of L2.  We also assume that an
 		 * L2 divisor of 3 will work.
 		 */
 		_set_L2CR(L2CR_L2E | L2CR_L2SIZ_1MB | L2CR_L2CLK_DIV3
 			  | L2CR_L2RAM_PIPE | L2CR_L2OH_1_0 | L2CR_L2DF);
 
-	if (cur_cpu_spec[0]->cpu_features & CPU_FTR_L3CR)
+	if (cur_cpu_spec->cpu_features & CPU_FTR_L3CR)
 		/* No L3 cache */
 		_set_L3CR(0);
 
diff --git a/arch/ppc/platforms/residual.c b/arch/ppc/platforms/residual.c
index 0f84ca6..c991160 100644
--- a/arch/ppc/platforms/residual.c
+++ b/arch/ppc/platforms/residual.c
@@ -47,7 +47,7 @@
 #include <asm/ide.h>
 
 
-unsigned char __res[sizeof(RESIDUAL)] __prepdata = {0,};
+unsigned char __res[sizeof(RESIDUAL)] = {0,};
 RESIDUAL *res = (RESIDUAL *)&__res;
 
 char * PnP_BASE_TYPES[] __initdata = {
diff --git a/arch/ppc/syslib/Makefile b/arch/ppc/syslib/Makefile
index b8d08f3..1b0a849 100644
--- a/arch/ppc/syslib/Makefile
+++ b/arch/ppc/syslib/Makefile
@@ -5,6 +5,7 @@
 CFLAGS_prom_init.o      += -fPIC
 CFLAGS_btext.o          += -fPIC
 
+ifneq ($(CONFIG_PPC_MERGE),y)
 wdt-mpc8xx-$(CONFIG_8xx_WDT)	+= m8xx_wdt.o
 
 obj-$(CONFIG_PPCBUG_NVRAM)	+= prep_nvram.o
@@ -109,3 +110,16 @@
 ifeq ($(CONFIG_PPC_MPC52xx),y)
 obj-$(CONFIG_PCI)		+= mpc52xx_pci.o
 endif
+
+else
+# Stuff still needed by the merged powerpc sources
+
+obj-$(CONFIG_PPCBUG_NVRAM)	+= prep_nvram.o
+obj-$(CONFIG_PPC_OF)		+= prom_init.o prom.o of_device.o
+obj-$(CONFIG_PPC_PMAC)		+= indirect_pci.o
+obj-$(CONFIG_PPC_CHRP)		+= indirect_pci.o i8259.o
+obj-$(CONFIG_PPC_PREP)		+= indirect_pci.o i8259.o todc_time.o
+obj-$(CONFIG_BOOTX_TEXT)	+= btext.o
+obj-$(CONFIG_MPC10X_BRIDGE)	+= mpc10x_common.o indirect_pci.o ppc_sys.o
+
+endif
diff --git a/arch/ppc/syslib/btext.c b/arch/ppc/syslib/btext.c
index 7734f68..12fa83e 100644
--- a/arch/ppc/syslib/btext.c
+++ b/arch/ppc/syslib/btext.c
@@ -53,8 +53,8 @@
  * chrp only uses it during early boot.
  */
 #ifdef CONFIG_XMON
-#define BTEXT	__pmac
-#define BTDATA	__pmacdata
+#define BTEXT
+#define BTDATA
 #else
 #define BTEXT	__init
 #define BTDATA	__initdata
@@ -187,7 +187,7 @@
  *    changes.
  */
 
-void __openfirmware
+void
 map_boot_text(void)
 {
 	unsigned long base, offset, size;
diff --git a/arch/ppc/syslib/ibm440gx_common.c b/arch/ppc/syslib/ibm440gx_common.c
index 0bb9198..c36db27 100644
--- a/arch/ppc/syslib/ibm440gx_common.c
+++ b/arch/ppc/syslib/ibm440gx_common.c
@@ -236,9 +236,9 @@
 	/* Disable L2C on rev.A, rev.B and 800MHz version of rev.C,
 	   enable it on all other revisions
 	 */
-	if (strcmp(cur_cpu_spec[0]->cpu_name, "440GX Rev. A") == 0 ||
-			strcmp(cur_cpu_spec[0]->cpu_name, "440GX Rev. B") == 0
-			|| (strcmp(cur_cpu_spec[0]->cpu_name, "440GX Rev. C")
+	if (strcmp(cur_cpu_spec->cpu_name, "440GX Rev. A") == 0 ||
+			strcmp(cur_cpu_spec->cpu_name, "440GX Rev. B") == 0
+			|| (strcmp(cur_cpu_spec->cpu_name, "440GX Rev. C")
 				== 0 && p->cpu > 667000000))
 		ibm440gx_l2c_disable();
 	else
diff --git a/arch/ppc/syslib/prep_nvram.c b/arch/ppc/syslib/prep_nvram.c
index 8599850..2c6364d 100644
--- a/arch/ppc/syslib/prep_nvram.c
+++ b/arch/ppc/syslib/prep_nvram.c
@@ -22,14 +22,14 @@
 static char nvramData[MAX_PREP_NVRAM];
 static NVRAM_MAP *nvram=(NVRAM_MAP *)&nvramData[0];
 
-unsigned char __prep prep_nvram_read_val(int addr)
+unsigned char prep_nvram_read_val(int addr)
 {
 	outb(addr, PREP_NVRAM_AS0);
 	outb(addr>>8, PREP_NVRAM_AS1);
 	return inb(PREP_NVRAM_DATA);
 }
 
-void __prep prep_nvram_write_val(int           addr,
+void prep_nvram_write_val(int           addr,
 			  unsigned char val)
 {
 	outb(addr, PREP_NVRAM_AS0);
@@ -81,8 +81,7 @@
 	}
 }
 
-__prep
-char __prep *prep_nvram_get_var(const char *name)
+char *prep_nvram_get_var(const char *name)
 {
 	char *cp;
 	int  namelen;
@@ -101,8 +100,7 @@
 	return NULL;
 }
 
-__prep
-char __prep *prep_nvram_first_var(void)
+char *prep_nvram_first_var(void)
 {
         if (nvram->Header.GELength == 0) {
 		return NULL;
@@ -112,8 +110,7 @@
 	}
 }
 
-__prep
-char __prep *prep_nvram_next_var(char *name)
+char *prep_nvram_next_var(char *name)
 {
 	char *cp;
 
diff --git a/arch/ppc/syslib/prom.c b/arch/ppc/syslib/prom.c
index 2c64ed6..278da6e 100644
--- a/arch/ppc/syslib/prom.c
+++ b/arch/ppc/syslib/prom.c
@@ -89,7 +89,7 @@
 extern boot_infos_t *boot_infos;
 unsigned long dev_tree_size;
 
-void __openfirmware
+void
 phys_call_rtas(int service, int nargs, int nret, ...)
 {
 	va_list list;
@@ -862,7 +862,7 @@
 /*
  * Returns all nodes linked together
  */
-struct device_node * __openfirmware
+struct device_node *
 find_all_nodes(void)
 {
 	struct device_node *head, **prevp, *np;
@@ -1165,7 +1165,7 @@
 /*
  * Add a property to a node
  */
-void __openfirmware
+void
 prom_add_property(struct device_node* np, struct property* prop)
 {
 	struct property **next = &np->properties;
@@ -1177,7 +1177,7 @@
 }
 
 /* I quickly hacked that one, check against spec ! */
-static inline unsigned long __openfirmware
+static inline unsigned long
 bus_space_to_resource_flags(unsigned int bus_space)
 {
 	u8 space = (bus_space >> 24) & 0xf;
@@ -1194,7 +1194,7 @@
 	}
 }
 
-static struct resource* __openfirmware
+static struct resource*
 find_parent_pci_resource(struct pci_dev* pdev, struct address_range *range)
 {
 	unsigned long mask;
@@ -1224,7 +1224,7 @@
  * or other nodes attached to the root node. Ultimately, put some
  * link to resources in the OF node.
  */
-struct resource* __openfirmware
+struct resource*
 request_OF_resource(struct device_node* node, int index, const char* name_postfix)
 {
 	struct pci_dev* pcidev;
@@ -1280,7 +1280,7 @@
 	return NULL;
 }
 
-int __openfirmware
+int
 release_OF_resource(struct device_node* node, int index)
 {
 	struct pci_dev* pcidev;
@@ -1346,7 +1346,7 @@
 }
 
 #if 0
-void __openfirmware
+void
 print_properties(struct device_node *np)
 {
 	struct property *pp;
@@ -1400,7 +1400,7 @@
 static DEFINE_SPINLOCK(rtas_lock);
 
 /* this can be called after setup -- Cort */
-int __openfirmware
+int
 call_rtas(const char *service, int nargs, int nret,
 	  unsigned long *outputs, ...)
 {
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
index c658650..2462121 100644
--- a/arch/ppc64/Kconfig
+++ b/arch/ppc64/Kconfig
@@ -357,7 +357,6 @@
 
 config PROC_DEVICETREE
 	bool "Support for Open Firmware device tree in /proc"
-	depends on !PPC_ISERIES
 	help
 	  This option adds a device-tree directory under /proc which contains
 	  an image of the device tree that the kernel copies from Open
@@ -461,7 +460,7 @@
 	depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
 	default y
 
-source "arch/ppc64/oprofile/Kconfig"
+source "arch/powerpc/oprofile/Kconfig"
 
 source "arch/ppc64/Kconfig.debug"
 
diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile
index 521c2a5..d4eb55f 100644
--- a/arch/ppc64/Makefile
+++ b/arch/ppc64/Makefile
@@ -84,8 +84,9 @@
 libs-y				+= arch/ppc64/lib/
 core-y				+= arch/ppc64/kernel/
 core-y				+= arch/ppc64/mm/
+core-y				+= arch/powerpc/platforms/
 core-$(CONFIG_XMON)		+= arch/ppc64/xmon/
-drivers-$(CONFIG_OPROFILE)	+= arch/ppc64/oprofile/
+drivers-$(CONFIG_OPROFILE)	+= arch/powerpc/oprofile/
 
 boot := arch/ppc64/boot
 
diff --git a/arch/ppc64/kernel/HvLpEvent.c b/arch/ppc64/kernel/HvLpEvent.c
deleted file mode 100644
index 90032b1..0000000
--- a/arch/ppc64/kernel/HvLpEvent.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright 2001 Mike Corrigan IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/stddef.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/system.h>
-#include <asm/iSeries/HvLpEvent.h>
-#include <asm/iSeries/HvCallEvent.h>
-#include <asm/iSeries/ItLpNaca.h>
-
-/* Array of LpEvent handler functions */
-LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
-unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
-
-/* Register a handler for an LpEvent type */
-
-int HvLpEvent_registerHandler( HvLpEvent_Type eventType, LpEventHandler handler )
-{
-	int rc = 1;
-	if ( eventType < HvLpEvent_Type_NumTypes ) {
-		lpEventHandler[eventType] = handler;
-		rc = 0;
-	}
-	return rc;
-	
-}
-
-int HvLpEvent_unregisterHandler( HvLpEvent_Type eventType )
-{
-	int rc = 1;
-
-	might_sleep();
-
-	if ( eventType < HvLpEvent_Type_NumTypes ) {
-		if ( !lpEventHandlerPaths[eventType] ) {
-			lpEventHandler[eventType] = NULL;
-			rc = 0;
-
-			/* We now sleep until all other CPUs have scheduled. This ensures that
-			 * the deletion is seen by all other CPUs, and that the deleted handler
-			 * isn't still running on another CPU when we return. */
-			synchronize_rcu();
-		}
-	}
-	return rc;
-}
-EXPORT_SYMBOL(HvLpEvent_registerHandler);
-EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
-
-/* (lpIndex is the partition index of the target partition.  
- * needed only for VirtualIo, VirtualLan and SessionMgr.  Zero
- * indicates to use our partition index - for the other types)
- */
-int HvLpEvent_openPath( HvLpEvent_Type eventType, HvLpIndex lpIndex )
-{
-	int rc = 1;
-	if ( eventType < HvLpEvent_Type_NumTypes &&
-	     lpEventHandler[eventType] ) {
-		if ( lpIndex == 0 )
-			lpIndex = itLpNaca.xLpIndex;
-		HvCallEvent_openLpEventPath( lpIndex, eventType );
-		++lpEventHandlerPaths[eventType];
-		rc = 0;
-	}
-	return rc;
-}
-
-int HvLpEvent_closePath( HvLpEvent_Type eventType, HvLpIndex lpIndex )
-{
-	int rc = 1;
-	if ( eventType < HvLpEvent_Type_NumTypes &&
-	     lpEventHandler[eventType] &&
-	     lpEventHandlerPaths[eventType] ) {
-		if ( lpIndex == 0 )
-			lpIndex = itLpNaca.xLpIndex;
-		HvCallEvent_closeLpEventPath( lpIndex, eventType );
-		--lpEventHandlerPaths[eventType];
-		rc = 0;
-	}
-	return rc;
-}
-
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile
index ae60eb1..bb5946b 100644
--- a/arch/ppc64/kernel/Makefile
+++ b/arch/ppc64/kernel/Makefile
@@ -11,23 +11,16 @@
 			udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \
 			ptrace32.o signal32.o rtc.o init_task.o \
 			lmb.o cputable.o cpu_setup_power4.o idle_power4.o \
-			iommu.o sysfs.o vdso.o pmc.o firmware.o
+			iommu.o sysfs.o vdso.o pmc.o firmware.o prom.o
 obj-y += vdso32/ vdso64/
 
 obj-$(CONFIG_PPC_OF) +=	of_device.o
 
-pci-obj-$(CONFIG_PPC_ISERIES)	+= iSeries_pci.o iSeries_irq.o \
-				iSeries_VpdInfo.o
 pci-obj-$(CONFIG_PPC_MULTIPLATFORM)	+= pci_dn.o pci_direct_iommu.o
 
 obj-$(CONFIG_PCI)	+= pci.o pci_iommu.o iomap.o $(pci-obj-y)
 
-obj-$(CONFIG_PPC_ISERIES) += HvCall.o HvLpConfig.o LparData.o \
-			     iSeries_setup.o ItLpQueue.o hvCall.o \
-			     mf.o HvLpEvent.o iSeries_proc.o iSeries_htab.o \
-			     iSeries_iommu.o
-
-obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o i8259.o prom_init.o prom.o
+obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o i8259.o prom_init.o
 
 obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \
 			     pSeries_nvram.o rtasd.o ras.o pSeries_reconfig.o \
@@ -45,14 +38,12 @@
 obj-$(CONFIG_PPC_RTAS)		+= rtas.o rtas_pci.o
 obj-$(CONFIG_RTAS_PROC)		+= rtas-proc.o
 obj-$(CONFIG_SCANLOG)		+= scanlog.o
-obj-$(CONFIG_VIOPATH)		+= viopath.o
 obj-$(CONFIG_LPARCFG)		+= lparcfg.o
 obj-$(CONFIG_HVC_CONSOLE)	+= hvconsole.o
 obj-$(CONFIG_BOOTX_TEXT)	+= btext.o
 obj-$(CONFIG_HVCS)		+= hvcserver.o
 
 vio-obj-$(CONFIG_PPC_PSERIES)	+= pSeries_vio.o
-vio-obj-$(CONFIG_PPC_ISERIES)	+= iSeries_vio.o
 obj-$(CONFIG_IBMVIO)		+= vio.o $(vio-obj-y)
 obj-$(CONFIG_XICS)		+= xics.o
 obj-$(CONFIG_MPIC)		+= mpic.o
@@ -68,7 +59,6 @@
 
 ifdef CONFIG_SMP
 obj-$(CONFIG_PPC_PMAC)		+= pmac_smp.o smp-tbsync.o
-obj-$(CONFIG_PPC_ISERIES)	+= iSeries_smp.o
 obj-$(CONFIG_PPC_PSERIES)	+= pSeries_smp.o
 obj-$(CONFIG_PPC_BPA)		+= pSeries_smp.o
 obj-$(CONFIG_PPC_MAPLE)		+= smp-tbsync.o
@@ -83,3 +73,6 @@
 arch/ppc64/kernel/head.o: arch/ppc64/kernel/lparmap.s
 AFLAGS_head.o += -Iarch/ppc64/kernel
 endif
+
+# These are here while we do the architecture merge
+vecemu-y			+= ../../powerpc/kernel/vecemu.o
diff --git a/arch/ppc64/kernel/bpa_iommu.c b/arch/ppc64/kernel/bpa_iommu.c
index f33a7bc..0cc463f 100644
--- a/arch/ppc64/kernel/bpa_iommu.c
+++ b/arch/ppc64/kernel/bpa_iommu.c
@@ -39,8 +39,8 @@
 #include <asm/pmac_feature.h>
 #include <asm/abs_addr.h>
 #include <asm/system.h>
+#include <asm/ppc-pci.h>
 
-#include "pci.h"
 #include "bpa_iommu.h"
 
 static inline unsigned long 
diff --git a/arch/ppc64/kernel/bpa_setup.c b/arch/ppc64/kernel/bpa_setup.c
index 57b3db6..9f915f4 100644
--- a/arch/ppc64/kernel/bpa_setup.c
+++ b/arch/ppc64/kernel/bpa_setup.c
@@ -43,8 +43,8 @@
 #include <asm/time.h>
 #include <asm/nvram.h>
 #include <asm/cputable.h>
+#include <asm/ppc-pci.h>
 
-#include "pci.h"
 #include "bpa_iic.h"
 #include "bpa_iommu.h"
 
diff --git a/arch/ppc64/kernel/cputable.c b/arch/ppc64/kernel/cputable.c
index 8831a28..5134c53 100644
--- a/arch/ppc64/kernel/cputable.c
+++ b/arch/ppc64/kernel/cputable.c
@@ -37,26 +37,13 @@
 extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
 extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec);
 
-
-/* We only set the altivec features if the kernel was compiled with altivec
- * support
- */
-#ifdef CONFIG_ALTIVEC
-#define CPU_FTR_ALTIVEC_COMP	CPU_FTR_ALTIVEC
-#define PPC_FEATURE_HAS_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC
-#else
-#define CPU_FTR_ALTIVEC_COMP	0
-#define PPC_FEATURE_HAS_ALTIVEC_COMP    0
-#endif
-
 struct cpu_spec	cpu_specs[] = {
 	{	/* Power3 */
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00400000,
 		.cpu_name		= "POWER3 (630)",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
-		.cpu_user_features = COMMON_USER_PPC64,
+		.cpu_features		= CPU_FTRS_POWER3,
+		.cpu_user_features	= COMMON_USER_PPC64,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
 		.num_pmcs		= 8,
@@ -70,8 +57,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00410000,
 		.cpu_name		= "POWER3 (630+)",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
+		.cpu_features		= CPU_FTRS_POWER3,
 		.cpu_user_features	= COMMON_USER_PPC64,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
@@ -86,9 +72,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00330000,
 		.cpu_name		= "RS64-II (northstar)",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
-			CPU_FTR_MMCRA | CPU_FTR_CTRL,
+		.cpu_features		= CPU_FTRS_RS64,
 		.cpu_user_features	= COMMON_USER_PPC64,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
@@ -103,9 +87,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00340000,
 		.cpu_name		= "RS64-III (pulsar)",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
-			CPU_FTR_MMCRA | CPU_FTR_CTRL,
+		.cpu_features		= CPU_FTRS_RS64,
 		.cpu_user_features	= COMMON_USER_PPC64,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
@@ -120,9 +102,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00360000,
 		.cpu_name		= "RS64-III (icestar)",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
-			CPU_FTR_MMCRA | CPU_FTR_CTRL,
+		.cpu_features		= CPU_FTRS_RS64,
 		.cpu_user_features	= COMMON_USER_PPC64,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
@@ -137,9 +117,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00370000,
 		.cpu_name		= "RS64-IV (sstar)",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
-			CPU_FTR_MMCRA | CPU_FTR_CTRL,
+		.cpu_features		= CPU_FTRS_RS64,
 		.cpu_user_features	= COMMON_USER_PPC64,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
@@ -154,9 +132,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00350000,
 		.cpu_name		= "POWER4 (gp)",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
-			CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
+		.cpu_features		= CPU_FTRS_POWER4,
 		.cpu_user_features	= COMMON_USER_PPC64,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
@@ -171,9 +147,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00380000,
 		.cpu_name		= "POWER4+ (gq)",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
-			CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
+		.cpu_features		= CPU_FTRS_POWER4,
 		.cpu_user_features	= COMMON_USER_PPC64,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
@@ -188,10 +162,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00390000,
 		.cpu_name		= "PPC970",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
-			CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
-			CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
+		.cpu_features		= CPU_FTRS_PPC970,
 		.cpu_user_features	= COMMON_USER_PPC64 |
 			PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 128,
@@ -207,10 +178,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x003c0000,
 		.cpu_name		= "PPC970FX",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
-			CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
-			CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
+		.cpu_features		= CPU_FTRS_PPC970,
 		.cpu_user_features	= COMMON_USER_PPC64 |
 			PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 128,
@@ -226,10 +194,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00440000,
 		.cpu_name		= "PPC970MP",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
-			CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
-			CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
+		.cpu_features		= CPU_FTRS_PPC970,
 		.cpu_user_features	= COMMON_USER_PPC64 |
 			PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 128,
@@ -244,11 +209,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x003a0000,
 		.cpu_name		= "POWER5 (gr)",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
-			CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT |
-			CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
-			CPU_FTR_MMCRA_SIHV,
+		.cpu_features		= CPU_FTRS_POWER5,
 		.cpu_user_features	= COMMON_USER_PPC64,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
@@ -263,11 +224,7 @@
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x003b0000,
 		.cpu_name		= "POWER5 (gs)",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
-			CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT |
-			CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
-			CPU_FTR_MMCRA_SIHV,
+		.cpu_features		= CPU_FTRS_POWER5,
 		.cpu_user_features	= COMMON_USER_PPC64,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
@@ -281,11 +238,8 @@
 	{	/* BE DD1.x */
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00700000,
-		.cpu_name		= "Broadband Engine",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
-			CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
-			CPU_FTR_SMT,
+		.cpu_name		= "Cell Broadband Engine",
+		.cpu_features		= CPU_FTRS_CELL,
 		.cpu_user_features	= COMMON_USER_PPC64 |
 			PPC_FEATURE_HAS_ALTIVEC_COMP,
 		.icache_bsize		= 128,
@@ -296,9 +250,7 @@
 		.pvr_mask		= 0x00000000,
 		.pvr_value		= 0x00000000,
 		.cpu_name		= "POWER4 (compatible)",
-		.cpu_features		= CPU_FTR_SPLIT_ID_CACHE |
-			CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
-			CPU_FTR_PPCAS_ARCH_V2,
+		.cpu_features		= CPU_FTRS_COMPATIBLE,
 		.cpu_user_features	= COMMON_USER_PPC64,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
diff --git a/arch/ppc64/kernel/eeh.c b/arch/ppc64/kernel/eeh.c
index ba93fd7..035d1b1 100644
--- a/arch/ppc64/kernel/eeh.c
+++ b/arch/ppc64/kernel/eeh.c
@@ -33,7 +33,7 @@
 #include <asm/rtas.h>
 #include <asm/atomic.h>
 #include <asm/systemcfg.h>
-#include "pci.h"
+#include <asm/ppc-pci.h>
 
 #undef DEBUG
 
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index 72c6104..db0cd35 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -1253,7 +1253,7 @@
  *
  * On iSeries, the hypervisor must fill in at least one entry before
  * we get control (with relocate on).  The address is give to the hv
- * as a page number (see xLparMap in LparData.c), so this must be at a
+ * as a page number (see xLparMap in lpardata.c), so this must be at a
  * fixed address (the linker can't compute (u64)&initial_stab >>
  * PAGE_SHIFT).
  */
@@ -1364,6 +1364,7 @@
 	addi	r2,r2,0x4000
 
 	bl	.iSeries_early_setup
+	bl	.early_setup
 
 	/* relocation is on at this point */
 
@@ -1970,20 +1971,22 @@
 	blr
 #endif
 
-#if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES))
+#if defined(CONFIG_KEXEC) || defined(CONFIG_SMP)
 _GLOBAL(smp_release_cpus)
 	/* All secondary cpus are spinning on a common
 	 * spinloop, release them all now so they can start
 	 * to spin on their individual paca spinloops.
 	 * For non SMP kernels, the secondary cpus never
 	 * get out of the common spinloop.
+	 * XXX This does nothing useful on iSeries, secondaries are
+	 * already waiting on their paca.
 	 */
 	li	r3,1
 	LOADADDR(r5,__secondary_hold_spinloop)
 	std	r3,0(r5)
 	sync
 	blr
-#endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */
+#endif /* CONFIG_SMP */
 
 
 /*
@@ -1992,7 +1995,7 @@
  */
 	.section ".bss"
 
-	.align	12
+	.align	PAGE_SHIFT
 
 	.globl	empty_zero_page
 empty_zero_page:
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index 7e80d49..abb90e6 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -395,7 +395,6 @@
 		if (post_kprobe_handler(args->regs))
 			ret = NOTIFY_STOP;
 		break;
-	case DIE_GPF:
 	case DIE_PAGE_FAULT:
 		if (kprobe_running() &&
 		    kprobe_fault_handler(args->regs, args->trapnr))
diff --git a/arch/ppc64/kernel/maple_pci.c b/arch/ppc64/kernel/maple_pci.c
index 1d297e0..0937649 100644
--- a/arch/ppc64/kernel/maple_pci.c
+++ b/arch/ppc64/kernel/maple_pci.c
@@ -23,8 +23,7 @@
 #include <asm/pci-bridge.h>
 #include <asm/machdep.h>
 #include <asm/iommu.h>
-
-#include "pci.h"
+#include <asm/ppc-pci.h>
 
 #ifdef DEBUG
 #define DBG(x...) printk(x)
diff --git a/arch/ppc64/kernel/maple_setup.c b/arch/ppc64/kernel/maple_setup.c
index fc05674..2a7fae0 100644
--- a/arch/ppc64/kernel/maple_setup.c
+++ b/arch/ppc64/kernel/maple_setup.c
@@ -59,8 +59,7 @@
 #include <asm/time.h>
 #include <asm/of_device.h>
 #include <asm/lmb.h>
-
-#include "mpic.h"
+#include <asm/mpic.h>
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
index e7241ad..d069bbd 100644
--- a/arch/ppc64/kernel/misc.S
+++ b/arch/ppc64/kernel/misc.S
@@ -64,44 +64,6 @@
 _GLOBAL(get_sp)
 	mr	r3,r1
 	blr
-		
-#ifdef CONFIG_PPC_ISERIES
-/* unsigned long local_save_flags(void) */
-_GLOBAL(local_get_flags)
-	lbz	r3,PACAPROCENABLED(r13)
-	blr
-
-/* unsigned long local_irq_disable(void) */
-_GLOBAL(local_irq_disable)
-	lbz	r3,PACAPROCENABLED(r13)
-	li	r4,0
-	stb	r4,PACAPROCENABLED(r13)
-	blr			/* Done */
-
-/* void local_irq_restore(unsigned long flags) */	
-_GLOBAL(local_irq_restore)
-	lbz	r5,PACAPROCENABLED(r13)
-	 /* Check if things are setup the way we want _already_. */
-	cmpw	0,r3,r5
-	beqlr
-	/* are we enabling interrupts? */
-	cmpdi	0,r3,0
-	stb	r3,PACAPROCENABLED(r13)
-	beqlr
-	/* Check pending interrupts */
-	/*   A decrementer, IPI or PMC interrupt may have occurred
-	 *   while we were in the hypervisor (which enables) */
-	ld	r4,PACALPPACA+LPPACAANYINT(r13)
-	cmpdi	r4,0
-	beqlr
-
-	/*
-	 * Handle pending interrupts in interrupt context
-	 */
-	li	r0,0x5555
-	sc
-	blr
-#endif /* CONFIG_PPC_ISERIES */
 
 #ifdef CONFIG_IRQSTACKS
 _GLOBAL(call_do_softirq)
@@ -329,7 +291,7 @@
 
 /* Flush the dcache */
  	ld	r7,PPC64_CACHES@toc(r2)
-	clrrdi	r3,r3,12           	    /* Page align */
+	clrrdi	r3,r3,PAGE_SHIFT           	    /* Page align */
 	lwz	r4,DCACHEL1LINESPERPAGE(r7)	/* Get # dcache lines per page */
 	lwz	r5,DCACHEL1LINESIZE(r7)		/* Get dcache line size */
 	mr	r6,r3
diff --git a/arch/ppc64/kernel/mpic.c b/arch/ppc64/kernel/mpic.c
index cc262a0..ec22321 100644
--- a/arch/ppc64/kernel/mpic.c
+++ b/arch/ppc64/kernel/mpic.c
@@ -31,8 +31,7 @@
 #include <asm/pgtable.h>
 #include <asm/irq.h>
 #include <asm/machdep.h>
-
-#include "mpic.h"
+#include <asm/mpic.h>
 
 #ifdef DEBUG
 #define DBG(fmt...) printk(fmt)
diff --git a/arch/ppc64/kernel/pSeries_iommu.c b/arch/ppc64/kernel/pSeries_iommu.c
index d17f010..9e90d4113 100644
--- a/arch/ppc64/kernel/pSeries_iommu.c
+++ b/arch/ppc64/kernel/pSeries_iommu.c
@@ -46,7 +46,8 @@
 #include <asm/pSeries_reconfig.h>
 #include <asm/systemcfg.h>
 #include <asm/firmware.h>
-#include "pci.h"
+#include <asm/tce.h>
+#include <asm/ppc-pci.h>
 
 #define DBG(fmt...)
 
@@ -59,6 +60,9 @@
 	union tce_entry t;
 	union tce_entry *tp;
 
+	index <<= TCE_PAGE_FACTOR;
+	npages <<= TCE_PAGE_FACTOR;
+
 	t.te_word = 0;
 	t.te_rdwr = 1; // Read allowed 
 
@@ -69,11 +73,11 @@
 
 	while (npages--) {
 		/* can't move this out since we might cross LMB boundary */
-		t.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT;
+		t.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
 	
 		tp->te_word = t.te_word;
 
-		uaddr += PAGE_SIZE;
+		uaddr += TCE_PAGE_SIZE;
 		tp++;
 	}
 }
@@ -84,6 +88,9 @@
 	union tce_entry t;
 	union tce_entry *tp;
 
+	npages <<= TCE_PAGE_FACTOR;
+	index <<= TCE_PAGE_FACTOR;
+
 	t.te_word = 0;
 	tp  = ((union tce_entry *)tbl->it_base) + index;
 		
@@ -103,7 +110,7 @@
 	union tce_entry tce;
 
 	tce.te_word = 0;
-	tce.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT;
+	tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
 	tce.te_rdwr = 1;
 	if (direction != DMA_TO_DEVICE)
 		tce.te_pciwr = 1;
@@ -136,6 +143,9 @@
 	union tce_entry tce, *tcep;
 	long l, limit;
 
+	tcenum <<= TCE_PAGE_FACTOR;
+	npages <<= TCE_PAGE_FACTOR;
+
 	if (npages == 1)
 		return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
 					   direction);
@@ -155,7 +165,7 @@
 	}
 
 	tce.te_word = 0;
-	tce.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT;
+	tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
 	tce.te_rdwr = 1;
 	if (direction != DMA_TO_DEVICE)
 		tce.te_pciwr = 1;
@@ -166,7 +176,7 @@
 		 * Set up the page with TCE data, looping through and setting
 		 * the values.
 		 */
-		limit = min_t(long, npages, PAGE_SIZE/sizeof(union tce_entry));
+		limit = min_t(long, npages, 4096/sizeof(union tce_entry));
 
 		for (l = 0; l < limit; l++) {
 			tcep[l] = tce;
@@ -196,6 +206,9 @@
 	u64 rc;
 	union tce_entry tce;
 
+	tcenum <<= TCE_PAGE_FACTOR;
+	npages <<= TCE_PAGE_FACTOR;
+
 	tce.te_word = 0;
 
 	while (npages--) {
@@ -221,6 +234,9 @@
 	u64 rc;
 	union tce_entry tce;
 
+	tcenum <<= TCE_PAGE_FACTOR;
+	npages <<= TCE_PAGE_FACTOR;
+
 	tce.te_word = 0;
 
 	rc = plpar_tce_stuff((u64)tbl->it_index,
diff --git a/arch/ppc64/kernel/pSeries_lpar.c b/arch/ppc64/kernel/pSeries_lpar.c
index a6de83f..268d836 100644
--- a/arch/ppc64/kernel/pSeries_lpar.c
+++ b/arch/ppc64/kernel/pSeries_lpar.c
@@ -486,8 +486,7 @@
  * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
  * lock.
  */
-void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number,
-				   int local)
+void pSeries_lpar_flush_hash_range(unsigned long number, int local)
 {
 	int i;
 	unsigned long flags = 0;
@@ -498,7 +497,7 @@
 		spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
 
 	for (i = 0; i < number; i++)
-		flush_hash_page(context, batch->addr[i], batch->pte[i], local);
+		flush_hash_page(batch->vaddr[i], batch->pte[i], local);
 
 	if (lock_tlbie)
 		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
diff --git a/arch/ppc64/kernel/pSeries_pci.c b/arch/ppc64/kernel/pSeries_pci.c
index 1f5f141..2dd477e 100644
--- a/arch/ppc64/kernel/pSeries_pci.c
+++ b/arch/ppc64/kernel/pSeries_pci.c
@@ -29,8 +29,7 @@
 
 #include <asm/pci-bridge.h>
 #include <asm/prom.h>
-
-#include "pci.h"
+#include <asm/ppc-pci.h>
 
 static int __initdata s7a_workaround = -1;
 
diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/ppc64/kernel/pSeries_setup.c
index 3009701..5a9fe96 100644
--- a/arch/ppc64/kernel/pSeries_setup.c
+++ b/arch/ppc64/kernel/pSeries_setup.c
@@ -62,10 +62,10 @@
 #include <asm/xics.h>
 #include <asm/firmware.h>
 #include <asm/pmc.h>
+#include <asm/mpic.h>
+#include <asm/ppc-pci.h>
 
 #include "i8259.h"
-#include "mpic.h"
-#include "pci.h"
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
diff --git a/arch/ppc64/kernel/pSeries_smp.c b/arch/ppc64/kernel/pSeries_smp.c
index d2c7e2c..5d1ed85 100644
--- a/arch/ppc64/kernel/pSeries_smp.c
+++ b/arch/ppc64/kernel/pSeries_smp.c
@@ -46,8 +46,8 @@
 #include <asm/rtas.h>
 #include <asm/plpar_wrappers.h>
 #include <asm/pSeries_reconfig.h>
+#include <asm/mpic.h>
 
-#include "mpic.h"
 #include "bpa_iic.h"
 
 #ifdef DEBUG
diff --git a/arch/ppc64/kernel/pSeries_vio.c b/arch/ppc64/kernel/pSeries_vio.c
index e0ae06f..866379b 100644
--- a/arch/ppc64/kernel/pSeries_vio.c
+++ b/arch/ppc64/kernel/pSeries_vio.c
@@ -22,6 +22,7 @@
 #include <asm/prom.h>
 #include <asm/vio.h>
 #include <asm/hvcall.h>
+#include <asm/tce.h>
 
 extern struct subsystem devices_subsys; /* needed for vio_find_name() */
 
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c
index ff4be1d..feec06b 100644
--- a/arch/ppc64/kernel/pci.c
+++ b/arch/ppc64/kernel/pci.c
@@ -31,8 +31,7 @@
 #include <asm/irq.h>
 #include <asm/machdep.h>
 #include <asm/udbg.h>
-
-#include "pci.h"
+#include <asm/ppc-pci.h>
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
diff --git a/arch/ppc64/kernel/pci_direct_iommu.c b/arch/ppc64/kernel/pci_direct_iommu.c
index b8f7f58..57980a5 100644
--- a/arch/ppc64/kernel/pci_direct_iommu.c
+++ b/arch/ppc64/kernel/pci_direct_iommu.c
@@ -27,8 +27,7 @@
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 #include <asm/abs_addr.h>
-
-#include "pci.h"
+#include <asm/ppc-pci.h>
 
 static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
 				   dma_addr_t *dma_handle, unsigned int __nocast flag)
diff --git a/arch/ppc64/kernel/pci_dn.c b/arch/ppc64/kernel/pci_dn.c
index a86389d..493bbe4 100644
--- a/arch/ppc64/kernel/pci_dn.c
+++ b/arch/ppc64/kernel/pci_dn.c
@@ -30,8 +30,7 @@
 #include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/pSeries_reconfig.h>
-
-#include "pci.h"
+#include <asm/ppc-pci.h>
 
 /*
  * Traverse_func that inits the PCI fields of the device node.
diff --git a/arch/ppc64/kernel/pci_iommu.c b/arch/ppc64/kernel/pci_iommu.c
index 14647e0..6c9dc67 100644
--- a/arch/ppc64/kernel/pci_iommu.c
+++ b/arch/ppc64/kernel/pci_iommu.c
@@ -37,7 +37,7 @@
 #include <asm/iommu.h>
 #include <asm/pci-bridge.h>
 #include <asm/machdep.h>
-#include "pci.h"
+#include <asm/ppc-pci.h>
 
 #ifdef CONFIG_PPC_ISERIES
 #include <asm/iSeries/iSeries_pci.h>
@@ -61,13 +61,7 @@
 	} else
 		pdev = to_pci_dev(dev);
 
-#ifdef CONFIG_PPC_ISERIES
-	return ISERIES_DEVNODE(pdev)->iommu_table;
-#endif /* CONFIG_PPC_ISERIES */
-
-#ifdef CONFIG_PPC_MULTIPLATFORM
 	return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
-#endif /* CONFIG_PPC_MULTIPLATFORM */
 }
 
 
diff --git a/arch/ppc64/kernel/pmac_feature.c b/arch/ppc64/kernel/pmac_feature.c
index eb4e6c3..26075f1 100644
--- a/arch/ppc64/kernel/pmac_feature.c
+++ b/arch/ppc64/kernel/pmac_feature.c
@@ -53,7 +53,7 @@
  * We use a single global lock to protect accesses. Each driver has
  * to take care of its own locking
  */
-static DEFINE_SPINLOCK(feature_lock  __pmacdata);
+static DEFINE_SPINLOCK(feature_lock);
 
 #define LOCK(flags)	spin_lock_irqsave(&feature_lock, flags);
 #define UNLOCK(flags)	spin_unlock_irqrestore(&feature_lock, flags);
@@ -62,9 +62,9 @@
 /*
  * Instance of some macio stuffs
  */
-struct macio_chip macio_chips[MAX_MACIO_CHIPS]  __pmacdata;
+struct macio_chip macio_chips[MAX_MACIO_CHIPS] ;
 
-struct macio_chip* __pmac macio_find(struct device_node* child, int type)
+struct macio_chip* macio_find(struct device_node* child, int type)
 {
 	while(child) {
 		int	i;
@@ -79,7 +79,7 @@
 }
 EXPORT_SYMBOL_GPL(macio_find);
 
-static const char* macio_names[] __pmacdata =
+static const char* macio_names[] =
 {
 	"Unknown",
 	"Grand Central",
@@ -106,9 +106,9 @@
 #define UN_BIS(r,v)	(UN_OUT((r), UN_IN(r) | (v)))
 #define UN_BIC(r,v)	(UN_OUT((r), UN_IN(r) & ~(v)))
 
-static struct device_node* uninorth_node __pmacdata;
-static u32* uninorth_base __pmacdata;
-static u32 uninorth_rev __pmacdata;
+static struct device_node* uninorth_node;
+static u32* uninorth_base;
+static u32 uninorth_rev;
 static void *u3_ht;
 
 extern struct device_node *k2_skiplist[2];
@@ -133,14 +133,14 @@
 	struct feature_table_entry* 	features;
 	unsigned long			board_flags;
 };
-static struct pmac_mb_def pmac_mb __pmacdata;
+static struct pmac_mb_def pmac_mb;
 
 /*
  * Here are the chip specific feature functions
  */
 
 
-static long __pmac g5_read_gpio(struct device_node* node, long param, long value)
+static long g5_read_gpio(struct device_node* node, long param, long value)
 {
 	struct macio_chip* macio = &macio_chips[0];
 
@@ -148,7 +148,7 @@
 }
 
 
-static long __pmac g5_write_gpio(struct device_node* node, long param, long value)
+static long g5_write_gpio(struct device_node* node, long param, long value)
 {
 	struct macio_chip* macio = &macio_chips[0];
 
@@ -156,7 +156,7 @@
 	return 0;
 }
 
-static long __pmac g5_gmac_enable(struct device_node* node, long param, long value)
+static long g5_gmac_enable(struct device_node* node, long param, long value)
 {
 	struct macio_chip* macio = &macio_chips[0];
 	unsigned long flags;
@@ -181,7 +181,7 @@
 	return 0;
 }
 
-static long __pmac g5_fw_enable(struct device_node* node, long param, long value)
+static long g5_fw_enable(struct device_node* node, long param, long value)
 {
 	struct macio_chip* macio = &macio_chips[0];
 	unsigned long flags;
@@ -206,7 +206,7 @@
 	return 0;
 }
 
-static long __pmac g5_mpic_enable(struct device_node* node, long param, long value)
+static long g5_mpic_enable(struct device_node* node, long param, long value)
 {
 	unsigned long flags;
 
@@ -220,7 +220,7 @@
 	return 0;
 }
 
-static long __pmac g5_eth_phy_reset(struct device_node* node, long param, long value)
+static long g5_eth_phy_reset(struct device_node* node, long param, long value)
 {
 	struct macio_chip* macio = &macio_chips[0];
 	struct device_node *phy;
@@ -250,7 +250,7 @@
 	return 0;
 }
 
-static long __pmac g5_i2s_enable(struct device_node *node, long param, long value)
+static long g5_i2s_enable(struct device_node *node, long param, long value)
 {
 	/* Very crude implementation for now */
 	struct macio_chip* macio = &macio_chips[0];
@@ -275,7 +275,7 @@
 
 
 #ifdef CONFIG_SMP
-static long __pmac g5_reset_cpu(struct device_node* node, long param, long value)
+static long g5_reset_cpu(struct device_node* node, long param, long value)
 {
 	unsigned int reset_io = 0;
 	unsigned long flags;
@@ -320,12 +320,12 @@
  * This takes the second CPU off the bus on dual CPU machines
  * running UP
  */
-void __pmac g5_phy_disable_cpu1(void)
+void g5_phy_disable_cpu1(void)
 {
 	UN_OUT(U3_API_PHY_CONFIG_1, 0);
 }
 
-static long __pmac generic_get_mb_info(struct device_node* node, long param, long value)
+static long generic_get_mb_info(struct device_node* node, long param, long value)
 {
 	switch(param) {
 		case PMAC_MB_INFO_MODEL:
@@ -347,14 +347,14 @@
 
 /* Used on any machine
  */
-static struct feature_table_entry any_features[]  __pmacdata = {
+static struct feature_table_entry any_features[] = {
 	{ PMAC_FTR_GET_MB_INFO,		generic_get_mb_info },
 	{ 0, NULL }
 };
 
 /* G5 features
  */
-static struct feature_table_entry g5_features[]  __pmacdata = {
+static struct feature_table_entry g5_features[] = {
 	{ PMAC_FTR_GMAC_ENABLE,		g5_gmac_enable },
 	{ PMAC_FTR_1394_ENABLE,		g5_fw_enable },
 	{ PMAC_FTR_ENABLE_MPIC,		g5_mpic_enable },
@@ -368,7 +368,7 @@
 	{ 0, NULL }
 };
 
-static struct pmac_mb_def pmac_mb_defs[] __pmacdata = {
+static struct pmac_mb_def pmac_mb_defs[] = {
 	{	"PowerMac7,2",			"PowerMac G5",
 		PMAC_TYPE_POWERMAC_G5,		g5_features,
 		0,
@@ -394,7 +394,7 @@
 /*
  * The toplevel feature_call callback
  */
-long __pmac pmac_do_feature_call(unsigned int selector, ...)
+long pmac_do_feature_call(unsigned int selector, ...)
 {
 	struct device_node* node;
 	long param, value;
@@ -706,8 +706,8 @@
  * Early video resume hook
  */
 
-static void (*pmac_early_vresume_proc)(void *data) __pmacdata;
-static void *pmac_early_vresume_data __pmacdata;
+static void (*pmac_early_vresume_proc)(void *data);
+static void *pmac_early_vresume_data;
 
 void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
 {
@@ -725,11 +725,11 @@
  * AGP related suspend/resume code
  */
 
-static struct pci_dev *pmac_agp_bridge __pmacdata;
-static int (*pmac_agp_suspend)(struct pci_dev *bridge) __pmacdata;
-static int (*pmac_agp_resume)(struct pci_dev *bridge) __pmacdata;
+static struct pci_dev *pmac_agp_bridge;
+static int (*pmac_agp_suspend)(struct pci_dev *bridge);
+static int (*pmac_agp_resume)(struct pci_dev *bridge);
 
-void __pmac pmac_register_agp_pm(struct pci_dev *bridge,
+void pmac_register_agp_pm(struct pci_dev *bridge,
 				 int (*suspend)(struct pci_dev *bridge),
 				 int (*resume)(struct pci_dev *bridge))
 {
@@ -746,7 +746,7 @@
 }
 EXPORT_SYMBOL(pmac_register_agp_pm);
 
-void __pmac pmac_suspend_agp_for_card(struct pci_dev *dev)
+void pmac_suspend_agp_for_card(struct pci_dev *dev)
 {
 	if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL)
 		return;
@@ -756,7 +756,7 @@
 }
 EXPORT_SYMBOL(pmac_suspend_agp_for_card);
 
-void __pmac pmac_resume_agp_for_card(struct pci_dev *dev)
+void pmac_resume_agp_for_card(struct pci_dev *dev)
 {
 	if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL)
 		return;
diff --git a/arch/ppc64/kernel/pmac_nvram.c b/arch/ppc64/kernel/pmac_nvram.c
index e32a902..11586d5 100644
--- a/arch/ppc64/kernel/pmac_nvram.c
+++ b/arch/ppc64/kernel/pmac_nvram.c
@@ -82,10 +82,10 @@
 static int (*core99_write_bank)(int bank, u8* datas);
 static int (*core99_erase_bank)(int bank);
 
-static char *nvram_image __pmacdata;
+static char *nvram_image;
 
 
-static ssize_t __pmac core99_nvram_read(char *buf, size_t count, loff_t *index)
+static ssize_t core99_nvram_read(char *buf, size_t count, loff_t *index)
 {
 	int i;
 
@@ -103,7 +103,7 @@
 	return count;
 }
 
-static ssize_t __pmac core99_nvram_write(char *buf, size_t count, loff_t *index)
+static ssize_t core99_nvram_write(char *buf, size_t count, loff_t *index)
 {
 	int i;
 
@@ -121,14 +121,14 @@
 	return count;
 }
 
-static ssize_t __pmac core99_nvram_size(void)
+static ssize_t core99_nvram_size(void)
 {
 	if (nvram_image == NULL)
 		return -ENODEV;
 	return NVRAM_SIZE;
 }
 
-static u8 __pmac chrp_checksum(struct chrp_header* hdr)
+static u8 chrp_checksum(struct chrp_header* hdr)
 {
 	u8 *ptr;
 	u16 sum = hdr->signature;
@@ -139,7 +139,7 @@
 	return sum;
 }
 
-static u32 __pmac core99_calc_adler(u8 *buffer)
+static u32 core99_calc_adler(u8 *buffer)
 {
 	int cnt;
 	u32 low, high;
@@ -161,7 +161,7 @@
 	return (high << 16) | low;
 }
 
-static u32 __pmac core99_check(u8* datas)
+static u32 core99_check(u8* datas)
 {
 	struct core99_header* hdr99 = (struct core99_header*)datas;
 
@@ -180,7 +180,7 @@
 	return hdr99->generation;
 }
 
-static int __pmac sm_erase_bank(int bank)
+static int sm_erase_bank(int bank)
 {
 	int stat, i;
 	unsigned long timeout;
@@ -212,7 +212,7 @@
 	return 0;
 }
 
-static int __pmac sm_write_bank(int bank, u8* datas)
+static int sm_write_bank(int bank, u8* datas)
 {
 	int i, stat = 0;
 	unsigned long timeout;
@@ -247,7 +247,7 @@
 	return 0;
 }
 
-static int __pmac amd_erase_bank(int bank)
+static int amd_erase_bank(int bank)
 {
 	int i, stat = 0;
 	unsigned long timeout;
@@ -294,7 +294,7 @@
 	return 0;
 }
 
-static int __pmac amd_write_bank(int bank, u8* datas)
+static int amd_write_bank(int bank, u8* datas)
 {
 	int i, stat = 0;
 	unsigned long timeout;
@@ -341,7 +341,7 @@
 }
 
 
-static int __pmac core99_nvram_sync(void)
+static int core99_nvram_sync(void)
 {
 	struct core99_header* hdr99;
 	unsigned long flags;
@@ -431,7 +431,7 @@
 	return 0;
 }
 
-int __pmac pmac_get_partition(int partition)
+int pmac_get_partition(int partition)
 {
 	struct nvram_partition *part;
 	const char *name;
@@ -459,7 +459,7 @@
 	return part->index;
 }
 
-u8 __pmac pmac_xpram_read(int xpaddr)
+u8 pmac_xpram_read(int xpaddr)
 {
 	int offset = pmac_get_partition(pmac_nvram_XPRAM);
 	loff_t index;
@@ -476,7 +476,7 @@
 	return buf;
 }
 
-void __pmac pmac_xpram_write(int xpaddr, u8 data)
+void pmac_xpram_write(int xpaddr, u8 data)
 {
 	int offset = pmac_get_partition(pmac_nvram_XPRAM);
 	loff_t index;
diff --git a/arch/ppc64/kernel/pmac_pci.c b/arch/ppc64/kernel/pmac_pci.c
index dc40a0c..f139fc0 100644
--- a/arch/ppc64/kernel/pmac_pci.c
+++ b/arch/ppc64/kernel/pmac_pci.c
@@ -27,8 +27,8 @@
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 #include <asm/iommu.h>
+#include <asm/ppc-pci.h>
 
-#include "pci.h"
 #include "pmac.h"
 
 #define DEBUG
@@ -121,7 +121,7 @@
 	|(((unsigned long)(off)) & 0xFCUL) \
 	|1UL)
 
-static unsigned long __pmac macrisc_cfg_access(struct pci_controller* hose,
+static unsigned long macrisc_cfg_access(struct pci_controller* hose,
 					       u8 bus, u8 dev_fn, u8 offset)
 {
 	unsigned int caddr;
@@ -142,7 +142,7 @@
 	return ((unsigned long)hose->cfg_data) + offset;
 }
 
-static int __pmac macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
+static int macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
 				      int offset, int len, u32 *val)
 {
 	struct pci_controller *hose;
@@ -173,7 +173,7 @@
 	return PCIBIOS_SUCCESSFUL;
 }
 
-static int __pmac macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
+static int macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
 				       int offset, int len, u32 val)
 {
 	struct pci_controller *hose;
@@ -265,7 +265,7 @@
 		+ (((unsigned long)bus) << 16) \
 		+ 0x01000000UL)
 
-static unsigned long __pmac u3_ht_cfg_access(struct pci_controller* hose,
+static unsigned long u3_ht_cfg_access(struct pci_controller* hose,
 					     u8 bus, u8 devfn, u8 offset)
 {
 	if (bus == hose->first_busno) {
@@ -277,7 +277,7 @@
 		return ((unsigned long)hose->cfg_data) + U3_HT_CFA1(bus, devfn, offset);
 }
 
-static int __pmac u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
+static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
 				    int offset, int len, u32 *val)
 {
 	struct pci_controller *hose;
@@ -327,7 +327,7 @@
 	return PCIBIOS_SUCCESSFUL;
 }
 
-static int __pmac u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
+static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
 				     int offset, int len, u32 val)
 {
 	struct pci_controller *hose;
diff --git a/arch/ppc64/kernel/pmac_setup.c b/arch/ppc64/kernel/pmac_setup.c
index 2575525..497c3cd9 100644
--- a/arch/ppc64/kernel/pmac_setup.c
+++ b/arch/ppc64/kernel/pmac_setup.c
@@ -72,9 +72,9 @@
 #include <asm/lmb.h>
 #include <asm/smu.h>
 #include <asm/pmc.h>
+#include <asm/mpic.h>
 
 #include "pmac.h"
-#include "mpic.h"
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
@@ -98,7 +98,7 @@
 
 extern void udbg_init_scc(struct device_node *np);
 
-static void __pmac pmac_show_cpuinfo(struct seq_file *m)
+static void pmac_show_cpuinfo(struct seq_file *m)
 {
 	struct device_node *np;
 	char *pp;
@@ -210,7 +210,7 @@
 late_initcall(pmac_late_init);
 
 /* can't be __init - can be called whenever a disk is first accessed */
-void __pmac note_bootable_part(dev_t dev, int part, int goodness)
+void note_bootable_part(dev_t dev, int part, int goodness)
 {
 	extern dev_t boot_dev;
 	char *p;
@@ -231,7 +231,7 @@
 	}
 }
 
-static void __pmac pmac_restart(char *cmd)
+static void pmac_restart(char *cmd)
 {
 	switch(sys_ctrler) {
 #ifdef CONFIG_ADB_PMU
@@ -250,7 +250,7 @@
 	}
 }
 
-static void __pmac pmac_power_off(void)
+static void pmac_power_off(void)
 {
 	switch(sys_ctrler) {
 #ifdef CONFIG_ADB_PMU
@@ -268,7 +268,7 @@
 	}
 }
 
-static void __pmac pmac_halt(void)
+static void pmac_halt(void)
 {
 	pmac_power_off();
 }
diff --git a/arch/ppc64/kernel/pmac_smp.c b/arch/ppc64/kernel/pmac_smp.c
index a23de37..3a1683f 100644
--- a/arch/ppc64/kernel/pmac_smp.c
+++ b/arch/ppc64/kernel/pmac_smp.c
@@ -51,8 +51,7 @@
 #include <asm/cacheflush.h>
 #include <asm/keylargo.h>
 #include <asm/pmac_low_i2c.h>
-
-#include "mpic.h"
+#include <asm/mpic.h>
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
@@ -310,7 +309,7 @@
 	}
 }
 
-struct smp_ops_t core99_smp_ops __pmacdata = {
+struct smp_ops_t core99_smp_ops = {
 	.message_pass	= smp_mpic_message_pass,
 	.probe		= smp_core99_probe,
 	.kick_cpu	= smp_core99_kick_cpu,
diff --git a/arch/ppc64/kernel/pmac_time.c b/arch/ppc64/kernel/pmac_time.c
index 41bbb8c..9d8c97d 100644
--- a/arch/ppc64/kernel/pmac_time.c
+++ b/arch/ppc64/kernel/pmac_time.c
@@ -51,7 +51,7 @@
 extern struct timezone sys_tz;
 extern void to_tm(int tim, struct rtc_time * tm);
 
-void __pmac pmac_get_rtc_time(struct rtc_time *tm)
+void pmac_get_rtc_time(struct rtc_time *tm)
 {
 	switch(sys_ctrler) {
 #ifdef CONFIG_ADB_PMU
@@ -92,7 +92,7 @@
 	}
 }
 
-int __pmac pmac_set_rtc_time(struct rtc_time *tm)
+int pmac_set_rtc_time(struct rtc_time *tm)
 {
 	switch(sys_ctrler) {
 #ifdef CONFIG_ADB_PMU
diff --git a/arch/ppc64/kernel/ppc_ksyms.c b/arch/ppc64/kernel/ppc_ksyms.c
index 705742f..84006e2 100644
--- a/arch/ppc64/kernel/ppc_ksyms.c
+++ b/arch/ppc64/kernel/ppc_ksyms.c
@@ -19,7 +19,6 @@
 #include <asm/hw_irq.h>
 #include <asm/abs_addr.h>
 #include <asm/cacheflush.h>
-#include <asm/iSeries/HvCallSc.h>
 
 EXPORT_SYMBOL(strcpy);
 EXPORT_SYMBOL(strncpy);
@@ -46,17 +45,6 @@
 
 EXPORT_SYMBOL(reloc_offset);
 
-#ifdef CONFIG_PPC_ISERIES
-EXPORT_SYMBOL(HvCall0);
-EXPORT_SYMBOL(HvCall1);
-EXPORT_SYMBOL(HvCall2);
-EXPORT_SYMBOL(HvCall3);
-EXPORT_SYMBOL(HvCall4);
-EXPORT_SYMBOL(HvCall5);
-EXPORT_SYMBOL(HvCall6);
-EXPORT_SYMBOL(HvCall7);
-#endif
-
 EXPORT_SYMBOL(_insb);
 EXPORT_SYMBOL(_outsb);
 EXPORT_SYMBOL(_insw);
@@ -77,14 +65,6 @@
 EXPORT_SYMBOL(__flush_icache_range);
 EXPORT_SYMBOL(flush_dcache_range);
 
-#ifdef CONFIG_SMP
-#ifdef CONFIG_PPC_ISERIES
-EXPORT_SYMBOL(local_get_flags);
-EXPORT_SYMBOL(local_irq_disable);
-EXPORT_SYMBOL(local_irq_restore);
-#endif
-#endif
-
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memmove);
diff --git a/arch/ppc64/kernel/rtas_pci.c b/arch/ppc64/kernel/rtas_pci.c
index 4a9719b..20361bc 100644
--- a/arch/ppc64/kernel/rtas_pci.c
+++ b/arch/ppc64/kernel/rtas_pci.c
@@ -38,9 +38,8 @@
 #include <asm/pci-bridge.h>
 #include <asm/iommu.h>
 #include <asm/rtas.h>
-
-#include "mpic.h"
-#include "pci.h"
+#include <asm/mpic.h>
+#include <asm/ppc-pci.h>
 
 /* RTAS tokens */
 static int read_pci_config;
diff --git a/arch/ppc64/kernel/rtc.c b/arch/ppc64/kernel/rtc.c
index 6ff52bc..88ae13f 100644
--- a/arch/ppc64/kernel/rtc.c
+++ b/arch/ppc64/kernel/rtc.c
@@ -43,11 +43,8 @@
 #include <asm/time.h>
 #include <asm/rtas.h>
 
-#include <asm/iSeries/mf.h>
 #include <asm/machdep.h>
 
-extern int piranha_simulator;
-
 /*
  *	We sponge a minor off of the misc major. No need slurping
  *	up another valuable major dev number for this. If you add
@@ -265,40 +262,6 @@
         return len;
 }
 
-#ifdef CONFIG_PPC_ISERIES
-/*
- * Get the RTC from the virtual service processor
- * This requires flowing LpEvents to the primary partition
- */
-void iSeries_get_rtc_time(struct rtc_time *rtc_tm)
-{
-	if (piranha_simulator)
-		return;
-
-	mf_get_rtc(rtc_tm);
-	rtc_tm->tm_mon--;
-}
-
-/*
- * Set the RTC in the virtual service processor
- * This requires flowing LpEvents to the primary partition
- */
-int iSeries_set_rtc_time(struct rtc_time *tm)
-{
-	mf_set_rtc(tm);
-	return 0;
-}
-
-void iSeries_get_boot_time(struct rtc_time *tm)
-{
-	if ( piranha_simulator )
-		return;
-
-	mf_get_boot_rtc(tm);
-	tm->tm_mon  -= 1;
-}
-#endif
-
 #ifdef CONFIG_PPC_RTAS
 #define MAX_RTC_WAIT 5000	/* 5 sec */
 #define RTAS_CLOCK_BUSY (-2)
diff --git a/arch/ppc64/kernel/setup.c b/arch/ppc64/kernel/setup.c
index 5ac48bd..776b55b 100644
--- a/arch/ppc64/kernel/setup.c
+++ b/arch/ppc64/kernel/setup.c
@@ -58,6 +58,7 @@
 #include <asm/mmu.h>
 #include <asm/lmb.h>
 #include <asm/iSeries/ItLpNaca.h>
+#include <asm/firmware.h>
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
@@ -153,7 +154,7 @@
 	.orig_video_points = 16
 };
 
-#if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
 
 static int smt_enabled_cmdline;
 
@@ -306,15 +307,13 @@
 
 	systemcfg->processorCount = num_present_cpus();
 }
-#endif /* defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP) */
-
-
-#ifdef CONFIG_PPC_MULTIPLATFORM
+#endif /* CONFIG_SMP */
 
 extern struct machdep_calls pSeries_md;
 extern struct machdep_calls pmac_md;
 extern struct machdep_calls maple_md;
 extern struct machdep_calls bpa_md;
+extern struct machdep_calls iseries_md;
 
 /* Ultimately, stuff them in an elf section like initcalls... */
 static struct machdep_calls __initdata *machines[] = {
@@ -330,6 +329,9 @@
 #ifdef CONFIG_PPC_BPA
 	&bpa_md,
 #endif
+#ifdef CONFIG_PPC_ISERIES
+	&iseries_md,
+#endif
 	NULL
 };
 
@@ -401,7 +403,8 @@
 	/*
 	 * Initialize stab / SLB management
 	 */
-	stab_initialize(lpaca->stab_real);
+	if (!firmware_has_feature(FW_FEATURE_ISERIES))
+		stab_initialize(lpaca->stab_real);
 
 	/*
 	 * Initialize the MMU Hash table and create the linear mapping
@@ -532,8 +535,6 @@
 #endif /* CONFIG_BLK_DEV_INITRD */
 }
 
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
 /*
  * Do some initial setup of the system.  The parameters are those which 
  * were passed in from the bootloader.
@@ -542,14 +543,6 @@
 {
 	DBG(" -> setup_system()\n");
 
-#ifdef CONFIG_PPC_ISERIES
-	/* pSeries systems are identified in prom.c via OF. */
-	if (itLpNaca.xLparInstalled == 1)
-		systemcfg->platform = PLATFORM_ISERIES_LPAR;
-
-	ppc_md.init_early();
-#else /* CONFIG_PPC_ISERIES */
-
 	/*
 	 * Unflatten the device-tree passed by prom_init or kexec
 	 */
@@ -607,9 +600,8 @@
 	strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
 
 	parse_early_param();
-#endif /* !CONFIG_PPC_ISERIES */
 
-#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES)
+#ifdef CONFIG_SMP
 	/*
 	 * iSeries has already initialized the cpu maps at this point.
 	 */
@@ -619,7 +611,7 @@
 	 * we can map physical -> logical CPU ids
 	 */
 	smp_release_cpus();
-#endif /* defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES) */
+#endif
 
 	printk("Starting Linux PPC64 %s\n", system_utsname.version);
 
diff --git a/arch/ppc64/kernel/smp.c b/arch/ppc64/kernel/smp.c
index 793b562..6f4f3da 100644
--- a/arch/ppc64/kernel/smp.c
+++ b/arch/ppc64/kernel/smp.c
@@ -45,8 +45,7 @@
 #include <asm/cputable.h>
 #include <asm/system.h>
 #include <asm/abs_addr.h>
-
-#include "mpic.h"
+#include <asm/mpic.h>
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
diff --git a/arch/ppc64/kernel/sys_ppc32.c b/arch/ppc64/kernel/sys_ppc32.c
index e93c134..1cacf61 100644
--- a/arch/ppc64/kernel/sys_ppc32.c
+++ b/arch/ppc64/kernel/sys_ppc32.c
@@ -53,8 +53,7 @@
 #include <asm/time.h>
 #include <asm/mmu_context.h>
 #include <asm/systemcfg.h>
-
-#include "pci.h"
+#include <asm/ppc-pci.h>
 
 /* readdir & getdents */
 #define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c
index 9939c20..7f63755 100644
--- a/arch/ppc64/kernel/time.c
+++ b/arch/ppc64/kernel/time.c
@@ -319,7 +319,7 @@
  * timer_interrupt - gets called when the decrementer overflows,
  * with interrupts disabled.
  */
-int timer_interrupt(struct pt_regs * regs)
+void timer_interrupt(struct pt_regs * regs)
 {
 	int next_dec;
 	unsigned long cur_tb;
@@ -377,8 +377,6 @@
 	}
 
 	irq_exit();
-
-	return 1;
 }
 
 /*
@@ -467,7 +465,7 @@
 
 EXPORT_SYMBOL(do_settimeofday);
 
-#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_MAPLE) || defined(CONFIG_PPC_BPA)
+#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_MAPLE) || defined(CONFIG_PPC_BPA) || defined(CONFIG_PPC_ISERIES)
 void __init generic_calibrate_decr(void)
 {
 	struct device_node *cpu;
diff --git a/arch/ppc64/kernel/traps.c b/arch/ppc64/kernel/traps.c
index 7467ae5..a728c9f 100644
--- a/arch/ppc64/kernel/traps.c
+++ b/arch/ppc64/kernel/traps.c
@@ -62,7 +62,7 @@
 EXPORT_SYMBOL(__debugger_fault_handler);
 #endif
 
-struct notifier_block *ppc64_die_chain;
+struct notifier_block *powerpc_die_chain;
 static DEFINE_SPINLOCK(die_notifier_lock);
 
 int register_die_notifier(struct notifier_block *nb)
@@ -71,7 +71,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&die_notifier_lock, flags);
-	err = notifier_chain_register(&ppc64_die_chain, nb);
+	err = notifier_chain_register(&powerpc_die_chain, nb);
 	spin_unlock_irqrestore(&die_notifier_lock, flags);
 	return err;
 }
@@ -390,12 +390,12 @@
 		/* this is a WARN_ON rather than BUG/BUG_ON */
 		printk(KERN_ERR "Badness in %s at %s:%d\n",
 		       bug->function, bug->file,
-		      (unsigned int)bug->line & ~BUG_WARNING_TRAP);
+		       bug->line & ~BUG_WARNING_TRAP);
 		show_stack(current, (void *)regs->gpr[1]);
 		return 1;
 	}
 	printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
-	       bug->function, bug->file, (unsigned int)bug->line);
+	       bug->function, bug->file, bug->line);
 	return 0;
 }
 
diff --git a/arch/ppc64/kernel/u3_iommu.c b/arch/ppc64/kernel/u3_iommu.c
index 41ea09c..fba871a 100644
--- a/arch/ppc64/kernel/u3_iommu.c
+++ b/arch/ppc64/kernel/u3_iommu.c
@@ -44,39 +44,11 @@
 #include <asm/abs_addr.h>
 #include <asm/cacheflush.h>
 #include <asm/lmb.h>
-
-#include "pci.h"
+#include <asm/dart.h>
+#include <asm/ppc-pci.h>
 
 extern int iommu_force_on;
 
-/* physical base of DART registers */
-#define DART_BASE        0xf8033000UL
-
-/* Offset from base to control register */
-#define DARTCNTL   0
-/* Offset from base to exception register */
-#define DARTEXCP   0x10
-/* Offset from base to TLB tag registers */
-#define DARTTAG    0x1000
-
-
-/* Control Register fields */
-
-/* base address of table (pfn) */
-#define DARTCNTL_BASE_MASK    0xfffff
-#define DARTCNTL_BASE_SHIFT   12
-
-#define DARTCNTL_FLUSHTLB     0x400
-#define DARTCNTL_ENABLE       0x200
-
-/* size of table in pages */
-#define DARTCNTL_SIZE_MASK    0x1ff
-#define DARTCNTL_SIZE_SHIFT   0
-
-/* DART table fields */
-#define DARTMAP_VALID   0x80000000
-#define DARTMAP_RPNMASK 0x00ffffff
-
 /* Physical base address and size of the DART table */
 unsigned long dart_tablebase; /* exported to htab_initialize */
 static unsigned long dart_tablesize;
@@ -152,18 +124,21 @@
 
 	DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr);
 
+	index <<= DART_PAGE_FACTOR;
+	npages <<= DART_PAGE_FACTOR;
+
 	dp = ((unsigned int*)tbl->it_base) + index;
 	
 	/* On U3, all memory is contigous, so we can move this
 	 * out of the loop.
 	 */
 	while (npages--) {
-		rpn = virt_to_abs(uaddr) >> PAGE_SHIFT;
+		rpn = virt_to_abs(uaddr) >> DART_PAGE_SHIFT;
 
 		*(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK);
 
 		rpn++;
-		uaddr += PAGE_SIZE;
+		uaddr += DART_PAGE_SIZE;
 	}
 
 	dart_dirty = 1;
@@ -181,6 +156,9 @@
 
 	DBG("dart: free at: %lx, %lx\n", index, npages);
 
+	index <<= DART_PAGE_FACTOR;
+	npages <<= DART_PAGE_FACTOR;
+
 	dp  = ((unsigned int *)tbl->it_base) + index;
 		
 	while (npages--)
@@ -209,10 +187,10 @@
 	 * that to work around what looks like a problem with the HT bridge
 	 * prefetching into invalid pages and corrupting data
 	 */
-	tmp = lmb_alloc(PAGE_SIZE, PAGE_SIZE);
+	tmp = lmb_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
 	if (!tmp)
 		panic("U3-DART: Cannot allocate spare page!");
-	dart_emptyval = DARTMAP_VALID | ((tmp >> PAGE_SHIFT) & DARTMAP_RPNMASK);
+	dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) & DARTMAP_RPNMASK);
 
 	/* Map in DART registers. FIXME: Use device node to get base address */
 	dart = ioremap(DART_BASE, 0x7000);
@@ -223,8 +201,8 @@
 	 * table size and enable bit
 	 */
 	regword = DARTCNTL_ENABLE | 
-		((dart_tablebase >> PAGE_SHIFT) << DARTCNTL_BASE_SHIFT) |
-		(((dart_tablesize >> PAGE_SHIFT) & DARTCNTL_SIZE_MASK)
+		((dart_tablebase >> DART_PAGE_SHIFT) << DARTCNTL_BASE_SHIFT) |
+		(((dart_tablesize >> DART_PAGE_SHIFT) & DARTCNTL_SIZE_MASK)
 				 << DARTCNTL_SIZE_SHIFT);
 	dart_vbase = ioremap(virt_to_abs(dart_tablebase), dart_tablesize);
 
diff --git a/arch/ppc64/kernel/vecemu.c b/arch/ppc64/kernel/vecemu.c
deleted file mode 100644
index cb20762..0000000
--- a/arch/ppc64/kernel/vecemu.c
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * Routines to emulate some Altivec/VMX instructions, specifically
- * those that can trap when given denormalized operands in Java mode.
- */
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/uaccess.h>
-
-/* Functions in vector.S */
-extern void vaddfp(vector128 *dst, vector128 *a, vector128 *b);
-extern void vsubfp(vector128 *dst, vector128 *a, vector128 *b);
-extern void vmaddfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
-extern void vnmsubfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
-extern void vrefp(vector128 *dst, vector128 *src);
-extern void vrsqrtefp(vector128 *dst, vector128 *src);
-extern void vexptep(vector128 *dst, vector128 *src);
-
-static unsigned int exp2s[8] = {
-	0x800000,
-	0x8b95c2,
-	0x9837f0,
-	0xa5fed7,
-	0xb504f3,
-	0xc5672a,
-	0xd744fd,
-	0xeac0c7
-};
-
-/*
- * Computes an estimate of 2^x.  The `s' argument is the 32-bit
- * single-precision floating-point representation of x.
- */
-static unsigned int eexp2(unsigned int s)
-{
-	int exp, pwr;
-	unsigned int mant, frac;
-
-	/* extract exponent field from input */
-	exp = ((s >> 23) & 0xff) - 127;
-	if (exp > 7) {
-		/* check for NaN input */
-		if (exp == 128 && (s & 0x7fffff) != 0)
-			return s | 0x400000;	/* return QNaN */
-		/* 2^-big = 0, 2^+big = +Inf */
-		return (s & 0x80000000)? 0: 0x7f800000;	/* 0 or +Inf */
-	}
-	if (exp < -23)
-		return 0x3f800000;	/* 1.0 */
-
-	/* convert to fixed point integer in 9.23 representation */
-	pwr = (s & 0x7fffff) | 0x800000;
-	if (exp > 0)
-		pwr <<= exp;
-	else
-		pwr >>= -exp;
-	if (s & 0x80000000)
-		pwr = -pwr;
-
-	/* extract integer part, which becomes exponent part of result */
-	exp = (pwr >> 23) + 126;
-	if (exp >= 254)
-		return 0x7f800000;
-	if (exp < -23)
-		return 0;
-
-	/* table lookup on top 3 bits of fraction to get mantissa */
-	mant = exp2s[(pwr >> 20) & 7];
-
-	/* linear interpolation using remaining 20 bits of fraction */
-	asm("mulhwu %0,%1,%2" : "=r" (frac)
-	    : "r" (pwr << 12), "r" (0x172b83ff));
-	asm("mulhwu %0,%1,%2" : "=r" (frac) : "r" (frac), "r" (mant));
-	mant += frac;
-
-	if (exp >= 0)
-		return mant + (exp << 23);
-
-	/* denormalized result */
-	exp = -exp;
-	mant += 1 << (exp - 1);
-	return mant >> exp;
-}
-
-/*
- * Computes an estimate of log_2(x).  The `s' argument is the 32-bit
- * single-precision floating-point representation of x.
- */
-static unsigned int elog2(unsigned int s)
-{
-	int exp, mant, lz, frac;
-
-	exp = s & 0x7f800000;
-	mant = s & 0x7fffff;
-	if (exp == 0x7f800000) {	/* Inf or NaN */
-		if (mant != 0)
-			s |= 0x400000;	/* turn NaN into QNaN */
-		return s;
-	}
-	if ((exp | mant) == 0)		/* +0 or -0 */
-		return 0xff800000;	/* return -Inf */
-
-	if (exp == 0) {
-		/* denormalized */
-		asm("cntlzw %0,%1" : "=r" (lz) : "r" (mant));
-		mant <<= lz - 8;
-		exp = (-118 - lz) << 23;
-	} else {
-		mant |= 0x800000;
-		exp -= 127 << 23;
-	}
-
-	if (mant >= 0xb504f3) {				/* 2^0.5 * 2^23 */
-		exp |= 0x400000;			/* 0.5 * 2^23 */
-		asm("mulhwu %0,%1,%2" : "=r" (mant)
-		    : "r" (mant), "r" (0xb504f334));	/* 2^-0.5 * 2^32 */
-	}
-	if (mant >= 0x9837f0) {				/* 2^0.25 * 2^23 */
-		exp |= 0x200000;			/* 0.25 * 2^23 */
-		asm("mulhwu %0,%1,%2" : "=r" (mant)
-		    : "r" (mant), "r" (0xd744fccb));	/* 2^-0.25 * 2^32 */
-	}
-	if (mant >= 0x8b95c2) {				/* 2^0.125 * 2^23 */
-		exp |= 0x100000;			/* 0.125 * 2^23 */
-		asm("mulhwu %0,%1,%2" : "=r" (mant)
-		    : "r" (mant), "r" (0xeac0c6e8));	/* 2^-0.125 * 2^32 */
-	}
-	if (mant > 0x800000) {				/* 1.0 * 2^23 */
-		/* calculate (mant - 1) * 1.381097463 */
-		/* 1.381097463 == 0.125 / (2^0.125 - 1) */
-		asm("mulhwu %0,%1,%2" : "=r" (frac)
-		    : "r" ((mant - 0x800000) << 1), "r" (0xb0c7cd3a));
-		exp += frac;
-	}
-	s = exp & 0x80000000;
-	if (exp != 0) {
-		if (s)
-			exp = -exp;
-		asm("cntlzw %0,%1" : "=r" (lz) : "r" (exp));
-		lz = 8 - lz;
-		if (lz > 0)
-			exp >>= lz;
-		else if (lz < 0)
-			exp <<= -lz;
-		s += ((lz + 126) << 23) + exp;
-	}
-	return s;
-}
-
-#define VSCR_SAT	1
-
-static int ctsxs(unsigned int x, int scale, unsigned int *vscrp)
-{
-	int exp, mant;
-
-	exp = (x >> 23) & 0xff;
-	mant = x & 0x7fffff;
-	if (exp == 255 && mant != 0)
-		return 0;		/* NaN -> 0 */
-	exp = exp - 127 + scale;
-	if (exp < 0)
-		return 0;		/* round towards zero */
-	if (exp >= 31) {
-		/* saturate, unless the result would be -2^31 */
-		if (x + (scale << 23) != 0xcf000000)
-			*vscrp |= VSCR_SAT;
-		return (x & 0x80000000)? 0x80000000: 0x7fffffff;
-	}
-	mant |= 0x800000;
-	mant = (mant << 7) >> (30 - exp);
-	return (x & 0x80000000)? -mant: mant;
-}
-
-static unsigned int ctuxs(unsigned int x, int scale, unsigned int *vscrp)
-{
-	int exp;
-	unsigned int mant;
-
-	exp = (x >> 23) & 0xff;
-	mant = x & 0x7fffff;
-	if (exp == 255 && mant != 0)
-		return 0;		/* NaN -> 0 */
-	exp = exp - 127 + scale;
-	if (exp < 0)
-		return 0;		/* round towards zero */
-	if (x & 0x80000000) {
-		/* negative => saturate to 0 */
-		*vscrp |= VSCR_SAT;
-		return 0;
-	}
-	if (exp >= 32) {
-		/* saturate */
-		*vscrp |= VSCR_SAT;
-		return 0xffffffff;
-	}
-	mant |= 0x800000;
-	mant = (mant << 8) >> (31 - exp);
-	return mant;
-}
-
-/* Round to floating integer, towards 0 */
-static unsigned int rfiz(unsigned int x)
-{
-	int exp;
-
-	exp = ((x >> 23) & 0xff) - 127;
-	if (exp == 128 && (x & 0x7fffff) != 0)
-		return x | 0x400000;	/* NaN -> make it a QNaN */
-	if (exp >= 23)
-		return x;		/* it's an integer already (or Inf) */
-	if (exp < 0)
-		return x & 0x80000000;	/* |x| < 1.0 rounds to 0 */
-	return x & ~(0x7fffff >> exp);
-}
-
-/* Round to floating integer, towards +/- Inf */
-static unsigned int rfii(unsigned int x)
-{
-	int exp, mask;
-
-	exp = ((x >> 23) & 0xff) - 127;
-	if (exp == 128 && (x & 0x7fffff) != 0)
-		return x | 0x400000;	/* NaN -> make it a QNaN */
-	if (exp >= 23)
-		return x;		/* it's an integer already (or Inf) */
-	if ((x & 0x7fffffff) == 0)
-		return x;		/* +/-0 -> +/-0 */
-	if (exp < 0)
-		/* 0 < |x| < 1.0 rounds to +/- 1.0 */
-		return (x & 0x80000000) | 0x3f800000;
-	mask = 0x7fffff >> exp;
-	/* mantissa overflows into exponent - that's OK,
-	   it can't overflow into the sign bit */
-	return (x + mask) & ~mask;
-}
-
-/* Round to floating integer, to nearest */
-static unsigned int rfin(unsigned int x)
-{
-	int exp, half;
-
-	exp = ((x >> 23) & 0xff) - 127;
-	if (exp == 128 && (x & 0x7fffff) != 0)
-		return x | 0x400000;	/* NaN -> make it a QNaN */
-	if (exp >= 23)
-		return x;		/* it's an integer already (or Inf) */
-	if (exp < -1)
-		return x & 0x80000000;	/* |x| < 0.5 -> +/-0 */
-	if (exp == -1)
-		/* 0.5 <= |x| < 1.0 rounds to +/- 1.0 */
-		return (x & 0x80000000) | 0x3f800000;
-	half = 0x400000 >> exp;
-	/* add 0.5 to the magnitude and chop off the fraction bits */
-	return (x + half) & ~(0x7fffff >> exp);
-}
-
-int
-emulate_altivec(struct pt_regs *regs)
-{
-	unsigned int instr, i;
-	unsigned int va, vb, vc, vd;
-	vector128 *vrs;
-
-	if (get_user(instr, (unsigned int __user *) regs->nip))
-		return -EFAULT;
-	if ((instr >> 26) != 4)
-		return -EINVAL;		/* not an altivec instruction */
-	vd = (instr >> 21) & 0x1f;
-	va = (instr >> 16) & 0x1f;
-	vb = (instr >> 11) & 0x1f;
-	vc = (instr >> 6) & 0x1f;
-
-	vrs = current->thread.vr;
-	switch (instr & 0x3f) {
-	case 10:
-		switch (vc) {
-		case 0:	/* vaddfp */
-			vaddfp(&vrs[vd], &vrs[va], &vrs[vb]);
-			break;
-		case 1:	/* vsubfp */
-			vsubfp(&vrs[vd], &vrs[va], &vrs[vb]);
-			break;
-		case 4:	/* vrefp */
-			vrefp(&vrs[vd], &vrs[vb]);
-			break;
-		case 5:	/* vrsqrtefp */
-			vrsqrtefp(&vrs[vd], &vrs[vb]);
-			break;
-		case 6:	/* vexptefp */
-			for (i = 0; i < 4; ++i)
-				vrs[vd].u[i] = eexp2(vrs[vb].u[i]);
-			break;
-		case 7:	/* vlogefp */
-			for (i = 0; i < 4; ++i)
-				vrs[vd].u[i] = elog2(vrs[vb].u[i]);
-			break;
-		case 8:		/* vrfin */
-			for (i = 0; i < 4; ++i)
-				vrs[vd].u[i] = rfin(vrs[vb].u[i]);
-			break;
-		case 9:		/* vrfiz */
-			for (i = 0; i < 4; ++i)
-				vrs[vd].u[i] = rfiz(vrs[vb].u[i]);
-			break;
-		case 10:	/* vrfip */
-			for (i = 0; i < 4; ++i) {
-				u32 x = vrs[vb].u[i];
-				x = (x & 0x80000000)? rfiz(x): rfii(x);
-				vrs[vd].u[i] = x;
-			}
-			break;
-		case 11:	/* vrfim */
-			for (i = 0; i < 4; ++i) {
-				u32 x = vrs[vb].u[i];
-				x = (x & 0x80000000)? rfii(x): rfiz(x);
-				vrs[vd].u[i] = x;
-			}
-			break;
-		case 14:	/* vctuxs */
-			for (i = 0; i < 4; ++i)
-				vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va,
-						&current->thread.vscr.u[3]);
-			break;
-		case 15:	/* vctsxs */
-			for (i = 0; i < 4; ++i)
-				vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va,
-						&current->thread.vscr.u[3]);
-			break;
-		default:
-			return -EINVAL;
-		}
-		break;
-	case 46:	/* vmaddfp */
-		vmaddfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
-		break;
-	case 47:	/* vnmsubfp */
-		vnmsubfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/arch/ppc64/kernel/vmlinux.lds.S b/arch/ppc64/kernel/vmlinux.lds.S
index 0306510..f34d514 100644
--- a/arch/ppc64/kernel/vmlinux.lds.S
+++ b/arch/ppc64/kernel/vmlinux.lds.S
@@ -1,3 +1,4 @@
+#include <asm/page.h>
 #include <asm-generic/vmlinux.lds.h>
 
 OUTPUT_ARCH(powerpc:common64)
@@ -17,7 +18,7 @@
 	LOCK_TEXT
 	KPROBES_TEXT
 	*(.fixup)
-	. = ALIGN(4096);
+	. = ALIGN(PAGE_SIZE);
 	_etext = .;
 	}
 
@@ -43,7 +44,7 @@
 
 
   /* will be freed after init */
-  . = ALIGN(4096);
+  . = ALIGN(PAGE_SIZE);
   __init_begin = .;
 
   .init.text : {
@@ -83,7 +84,7 @@
 
   SECURITY_INIT
 
-  . = ALIGN(4096);
+  . = ALIGN(PAGE_SIZE);
   .init.ramfs : {
 	__initramfs_start = .;
 	*(.init.ramfs)
@@ -96,18 +97,21 @@
 	__per_cpu_end = .;
 	}
 
+  . = ALIGN(PAGE_SIZE);
   . = ALIGN(16384);
   __init_end = .;
   /* freed after init ends here */
 
 
   /* Read/write sections */
+  . = ALIGN(PAGE_SIZE);
   . = ALIGN(16384);
   /* The initial task and kernel stack */
   .data.init_task : {
 	*(.data.init_task)
 	}
 
+  . = ALIGN(PAGE_SIZE);
   .data.page_aligned : {
 	*(.data.page_aligned)
 	}
@@ -129,18 +133,18 @@
 	__toc_start = .;
 	*(.got)
 	*(.toc)
-	. = ALIGN(4096);
+	. = ALIGN(PAGE_SIZE);
 	_edata = .;
 	}
 
 
-  . = ALIGN(4096);
+  . = ALIGN(PAGE_SIZE);
   .bss : {
 	__bss_start = .;
 	*(.bss)
 	__bss_stop = .;
 	}
 
-  . = ALIGN(4096);
+  . = ALIGN(PAGE_SIZE);
   _end = . ;
 }
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
index bfd385b..174d145 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/ppc64/mm/hash_native.c
@@ -335,10 +335,9 @@
 	local_irq_restore(flags);
 }
 
-static void native_flush_hash_range(unsigned long context,
-				    unsigned long number, int local)
+static void native_flush_hash_range(unsigned long number, int local)
 {
-	unsigned long vsid, vpn, va, hash, secondary, slot, flags, avpn;
+	unsigned long va, vpn, hash, secondary, slot, flags, avpn;
 	int i, j;
 	hpte_t *hptep;
 	unsigned long hpte_v;
@@ -349,13 +348,7 @@
 
 	j = 0;
 	for (i = 0; i < number; i++) {
-		if (batch->addr[i] < KERNELBASE)
-			vsid = get_vsid(context, batch->addr[i]);
-		else
-			vsid = get_kernel_vsid(batch->addr[i]);
-
-		va = (vsid << 28) | (batch->addr[i] & 0x0fffffff);
-		batch->vaddr[j] = va;
+		va = batch->vaddr[j];
 		if (large)
 			vpn = va >> HPAGE_SHIFT;
 		else
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c
index 09475c8..8350743 100644
--- a/arch/ppc64/mm/hash_utils.c
+++ b/arch/ppc64/mm/hash_utils.c
@@ -90,7 +90,6 @@
 		;
 }
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
 static inline void create_pte_mapping(unsigned long start, unsigned long end,
 				      unsigned long mode, int large)
 {
@@ -111,7 +110,7 @@
 		unsigned long vpn, hash, hpteg;
 		unsigned long vsid = get_kernel_vsid(addr);
 		unsigned long va = (vsid << 28) | (addr & 0xfffffff);
-		int ret;
+		int ret = -1;
 
 		if (large)
 			vpn = va >> HPAGE_SHIFT;
@@ -129,16 +128,25 @@
 
 		hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
 
+#ifdef CONFIG_PPC_ISERIES
+		if (systemcfg->platform & PLATFORM_ISERIES_LPAR)
+			ret = iSeries_hpte_bolt_or_insert(hpteg, va,
+				virt_to_abs(addr) >> PAGE_SHIFT,
+				vflags, tmp_mode);
+		else
+#endif
 #ifdef CONFIG_PPC_PSERIES
 		if (systemcfg->platform & PLATFORM_LPAR)
 			ret = pSeries_lpar_hpte_insert(hpteg, va,
 				virt_to_abs(addr) >> PAGE_SHIFT,
 				vflags, tmp_mode);
 		else
-#endif /* CONFIG_PPC_PSERIES */
+#endif
+#ifdef CONFIG_PPC_MULTIPLATFORM
 			ret = native_hpte_insert(hpteg, va,
 				virt_to_abs(addr) >> PAGE_SHIFT,
 				vflags, tmp_mode);
+#endif
 
 		if (ret == -1) {
 			ppc64_terminate_msg(0x20, "create_pte_mapping");
@@ -261,7 +269,6 @@
 }
 #undef KB
 #undef MB
-#endif /* CONFIG_PPC_MULTIPLATFORM */
 
 /*
  * Called by asm hashtable.S for doing lazy icache flush
@@ -355,18 +362,11 @@
 	return ret;
 }
 
-void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
-		     int local)
+void flush_hash_page(unsigned long va, pte_t pte, int local)
 {
-	unsigned long vsid, vpn, va, hash, secondary, slot;
+	unsigned long vpn, hash, secondary, slot;
 	unsigned long huge = pte_huge(pte);
 
-	if (ea < KERNELBASE)
-		vsid = get_vsid(context, ea);
-	else
-		vsid = get_kernel_vsid(ea);
-
-	va = (vsid << 28) | (ea & 0x0fffffff);
 	if (huge)
 		vpn = va >> HPAGE_SHIFT;
 	else
@@ -381,17 +381,17 @@
 	ppc_md.hpte_invalidate(slot, va, huge, local);
 }
 
-void flush_hash_range(unsigned long context, unsigned long number, int local)
+void flush_hash_range(unsigned long number, int local)
 {
 	if (ppc_md.flush_hash_range) {
-		ppc_md.flush_hash_range(context, number, local);
+		ppc_md.flush_hash_range(number, local);
 	} else {
 		int i;
-		struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+		struct ppc64_tlb_batch *batch =
+			&__get_cpu_var(ppc64_tlb_batch);
 
 		for (i = 0; i < number; i++)
-			flush_hash_page(context, batch->addr[i], batch->pte[i],
-					local);
+			flush_hash_page(batch->vaddr[i], batch->pte[i], local);
 	}
 }
 
diff --git a/arch/ppc64/mm/tlb.c b/arch/ppc64/mm/tlb.c
index 21fbffb..09ab81a 100644
--- a/arch/ppc64/mm/tlb.c
+++ b/arch/ppc64/mm/tlb.c
@@ -128,12 +128,10 @@
 void hpte_update(struct mm_struct *mm, unsigned long addr,
 		 unsigned long pte, int wrprot)
 {
-	int i;
-	unsigned long context = 0;
 	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+	unsigned long vsid;
+	int i;
 
-	if (REGION_ID(addr) == USER_REGION_ID)
-		context = mm->context.id;
 	i = batch->index;
 
 	/*
@@ -143,19 +141,21 @@
 	 * up scanning and resetting referenced bits then our batch context
 	 * will change mid stream.
 	 */
-	if (i != 0 && (context != batch->context ||
-		       batch->large != pte_huge(pte))) {
+	if (i != 0 && (mm != batch->mm || batch->large != pte_huge(pte))) {
 		flush_tlb_pending();
 		i = 0;
 	}
-
 	if (i == 0) {
-		batch->context = context;
 		batch->mm = mm;
 		batch->large = pte_huge(pte);
 	}
+	if (addr < KERNELBASE) {
+		vsid = get_vsid(mm->context.id, addr);
+		WARN_ON(vsid == 0);
+	} else
+		vsid = get_kernel_vsid(addr);
+	batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff);
 	batch->pte[i] = __pte(pte);
-	batch->addr[i] = addr;
 	batch->index = ++i;
 	if (i >= PPC64_TLB_BATCH_NR)
 		flush_tlb_pending();
@@ -177,10 +177,9 @@
 		local = 1;
 
 	if (i == 1)
-		flush_hash_page(batch->context, batch->addr[0], batch->pte[0],
-				local);
+		flush_hash_page(batch->vaddr[0], batch->pte[0], local);
 	else
-		flush_hash_range(batch->context, i, local);
+		flush_hash_range(i, local);
 	batch->index = 0;
 	put_cpu();
 }
diff --git a/arch/ppc64/oprofile/Kconfig b/arch/ppc64/oprofile/Kconfig
deleted file mode 100644
index 5ade198..0000000
--- a/arch/ppc64/oprofile/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
-
-menu "Profiling support"
-	depends on EXPERIMENTAL
-
-config PROFILING
-	bool "Profiling support (EXPERIMENTAL)"
-	help
-	  Say Y here to enable the extended profiling support mechanisms used
-	  by profilers such as OProfile.
-	  
-
-config OPROFILE
-	tristate "OProfile system profiling (EXPERIMENTAL)"
-	depends on PROFILING
-	help
-	  OProfile is a profiling system capable of profiling the
-	  whole system, include the kernel, kernel modules, libraries,
-	  and applications.
-
-	  If unsure, say N.
-
-endmenu
-
diff --git a/arch/ppc64/oprofile/Makefile b/arch/ppc64/oprofile/Makefile
deleted file mode 100644
index 162dbf0..0000000
--- a/arch/ppc64/oprofile/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
-		oprof.o cpu_buffer.o buffer_sync.o \
-		event_buffer.o oprofile_files.o \
-		oprofilefs.o oprofile_stats.o \
-		timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) common.o op_model_rs64.o op_model_power4.o
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 87d1f8a..d8c3d8e 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -81,7 +81,7 @@
 	
 } pmac_ide_hwif_t;
 
-static pmac_ide_hwif_t pmac_ide[MAX_HWIFS] __pmacdata;
+static pmac_ide_hwif_t pmac_ide[MAX_HWIFS];
 static int pmac_ide_count;
 
 enum {
@@ -242,7 +242,7 @@
 	int	cycleTime;
 };
 
-struct mdma_timings_t mdma_timings_33[] __pmacdata =
+struct mdma_timings_t mdma_timings_33[] =
 {
     { 240, 240, 480 },
     { 180, 180, 360 },
@@ -255,7 +255,7 @@
     {   0,   0,   0 }
 };
 
-struct mdma_timings_t mdma_timings_33k[] __pmacdata =
+struct mdma_timings_t mdma_timings_33k[] =
 {
     { 240, 240, 480 },
     { 180, 180, 360 },
@@ -268,7 +268,7 @@
     {   0,   0,   0 }
 };
 
-struct mdma_timings_t mdma_timings_66[] __pmacdata =
+struct mdma_timings_t mdma_timings_66[] =
 {
     { 240, 240, 480 },
     { 180, 180, 360 },
@@ -286,7 +286,7 @@
 	int	addrSetup; /* ??? */
 	int	rdy2pause;
 	int	wrDataSetup;
-} kl66_udma_timings[] __pmacdata =
+} kl66_udma_timings[] =
 {
     {   0, 180,  120 },	/* Mode 0 */
     {   0, 150,  90 },	/*      1 */
@@ -301,7 +301,7 @@
 	u32	timing_reg;
 };
 
-static struct kauai_timing	kauai_pio_timings[] __pmacdata =
+static struct kauai_timing	kauai_pio_timings[] =
 {
 	{ 930	, 0x08000fff },
 	{ 600	, 0x08000a92 },
@@ -316,7 +316,7 @@
 	{ 120	, 0x04000148 }
 };
 
-static struct kauai_timing	kauai_mdma_timings[] __pmacdata =
+static struct kauai_timing	kauai_mdma_timings[] =
 {
 	{ 1260	, 0x00fff000 },
 	{ 480	, 0x00618000 },
@@ -330,7 +330,7 @@
 	{ 0	, 0 },
 };
 
-static struct kauai_timing	kauai_udma_timings[] __pmacdata =
+static struct kauai_timing	kauai_udma_timings[] =
 {
 	{ 120	, 0x000070c0 },
 	{ 90	, 0x00005d80 },
@@ -341,7 +341,7 @@
 	{ 0	, 0 },
 };
 
-static struct kauai_timing	shasta_pio_timings[] __pmacdata =
+static struct kauai_timing	shasta_pio_timings[] =
 {
 	{ 930	, 0x08000fff },
 	{ 600	, 0x0A000c97 },
@@ -356,7 +356,7 @@
 	{ 120	, 0x0400010a }
 };
 
-static struct kauai_timing	shasta_mdma_timings[] __pmacdata =
+static struct kauai_timing	shasta_mdma_timings[] =
 {
 	{ 1260	, 0x00fff000 },
 	{ 480	, 0x00820800 },
@@ -370,7 +370,7 @@
 	{ 0	, 0 },
 };
 
-static struct kauai_timing	shasta_udma133_timings[] __pmacdata =
+static struct kauai_timing	shasta_udma133_timings[] =
 {
 	{ 120   , 0x00035901, },
 	{ 90    , 0x000348b1, },
@@ -522,7 +522,7 @@
  * N.B. this can't be an initfunc, because the media-bay task can
  * call ide_[un]register at any time.
  */
-void __pmac
+void
 pmac_ide_init_hwif_ports(hw_regs_t *hw,
 			      unsigned long data_port, unsigned long ctrl_port,
 			      int *irq)
@@ -559,7 +559,7 @@
  * timing register when selecting that unit. This version is for
  * ASICs with a single timing register
  */
-static void __pmac
+static void
 pmac_ide_selectproc(ide_drive_t *drive)
 {
 	pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -579,7 +579,7 @@
  * timing register when selecting that unit. This version is for
  * ASICs with a dual timing register (Kauai)
  */
-static void __pmac
+static void
 pmac_ide_kauai_selectproc(ide_drive_t *drive)
 {
 	pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -600,7 +600,7 @@
 /*
  * Force an update of controller timing values for a given drive
  */
-static void __pmac
+static void
 pmac_ide_do_update_timings(ide_drive_t *drive)
 {
 	pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -633,7 +633,7 @@
  * to sort that out sooner or later and see if I can finally get the
  * common version to work properly in all cases
  */
-static int __pmac
+static int
 pmac_ide_do_setfeature(ide_drive_t *drive, u8 command)
 {
 	ide_hwif_t *hwif = HWIF(drive);
@@ -710,7 +710,7 @@
 /*
  * Old tuning functions (called on hdparm -p), sets up drive PIO timings
  */
-static void __pmac
+static void
 pmac_ide_tuneproc(ide_drive_t *drive, u8 pio)
 {
 	ide_pio_data_t d;
@@ -801,7 +801,7 @@
 /*
  * Calculate KeyLargo ATA/66 UDMA timings
  */
-static int __pmac
+static int
 set_timings_udma_ata4(u32 *timings, u8 speed)
 {
 	unsigned rdyToPauseTicks, wrDataSetupTicks, addrTicks;
@@ -829,7 +829,7 @@
 /*
  * Calculate Kauai ATA/100 UDMA timings
  */
-static int __pmac
+static int
 set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed)
 {
 	struct ide_timing *t = ide_timing_find_mode(speed);
@@ -849,7 +849,7 @@
 /*
  * Calculate Shasta ATA/133 UDMA timings
  */
-static int __pmac
+static int
 set_timings_udma_shasta(u32 *pio_timings, u32 *ultra_timings, u8 speed)
 {
 	struct ide_timing *t = ide_timing_find_mode(speed);
@@ -869,7 +869,7 @@
 /*
  * Calculate MDMA timings for all cells
  */
-static int __pmac
+static int
 set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
 			u8 speed, int drive_cycle_time)
 {
@@ -1014,7 +1014,7 @@
  * our dedicated function is more precise as it uses the drive provided
  * cycle time value. We should probably fix this one to deal with that too...
  */
-static int __pmac
+static int
 pmac_ide_tune_chipset (ide_drive_t *drive, byte speed)
 {
 	int unit = (drive->select.b.unit & 0x01);
@@ -1092,7 +1092,7 @@
  * Blast some well known "safe" values to the timing registers at init or
  * wakeup from sleep time, before we do real calculation
  */
-static void __pmac
+static void
 sanitize_timings(pmac_ide_hwif_t *pmif)
 {
 	unsigned int value, value2 = 0;
@@ -1123,13 +1123,13 @@
 	pmif->timings[2] = pmif->timings[3] = value2;
 }
 
-unsigned long __pmac
+unsigned long
 pmac_ide_get_base(int index)
 {
 	return pmac_ide[index].regbase;
 }
 
-int __pmac
+int
 pmac_ide_check_base(unsigned long base)
 {
 	int ix;
@@ -1140,7 +1140,7 @@
 	return -1;
 }
 
-int __pmac
+int
 pmac_ide_get_irq(unsigned long base)
 {
 	int ix;
@@ -1151,7 +1151,7 @@
 	return 0;
 }
 
-static int ide_majors[]  __pmacdata = { 3, 22, 33, 34, 56, 57 };
+static int ide_majors[] = { 3, 22, 33, 34, 56, 57 };
 
 dev_t __init
 pmac_find_ide_boot(char *bootdevice, int n)
@@ -1701,7 +1701,7 @@
  * pmac_ide_build_dmatable builds the DBDMA command list
  * for a transfer and sets the DBDMA channel to point to it.
  */
-static int __pmac
+static int
 pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
 {
 	struct dbdma_cmd *table;
@@ -1785,7 +1785,7 @@
 }
 
 /* Teardown mappings after DMA has completed.  */
-static void __pmac
+static void
 pmac_ide_destroy_dmatable (ide_drive_t *drive)
 {
 	ide_hwif_t *hwif = drive->hwif;
@@ -1802,7 +1802,7 @@
 /*
  * Pick up best MDMA timing for the drive and apply it
  */
-static int __pmac
+static int
 pmac_ide_mdma_enable(ide_drive_t *drive, u16 mode)
 {
 	ide_hwif_t *hwif = HWIF(drive);
@@ -1859,7 +1859,7 @@
 /*
  * Pick up best UDMA timing for the drive and apply it
  */
-static int __pmac
+static int
 pmac_ide_udma_enable(ide_drive_t *drive, u16 mode)
 {
 	ide_hwif_t *hwif = HWIF(drive);
@@ -1915,7 +1915,7 @@
  * Check what is the best DMA timing setting for the drive and
  * call appropriate functions to apply it.
  */
-static int __pmac
+static int
 pmac_ide_dma_check(ide_drive_t *drive)
 {
 	struct hd_driveid *id = drive->id;
@@ -1967,7 +1967,7 @@
  * Prepare a DMA transfer. We build the DMA table, adjust the timings for
  * a read on KeyLargo ATA/66 and mark us as waiting for DMA completion
  */
-static int __pmac
+static int
 pmac_ide_dma_setup(ide_drive_t *drive)
 {
 	ide_hwif_t *hwif = HWIF(drive);
@@ -1997,7 +1997,7 @@
 	return 0;
 }
 
-static void __pmac
+static void
 pmac_ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
 {
 	/* issue cmd to drive */
@@ -2008,7 +2008,7 @@
  * Kick the DMA controller into life after the DMA command has been issued
  * to the drive.
  */
-static void __pmac
+static void
 pmac_ide_dma_start(ide_drive_t *drive)
 {
 	pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -2024,7 +2024,7 @@
 /*
  * After a DMA transfer, make sure the controller is stopped
  */
-static int __pmac
+static int
 pmac_ide_dma_end (ide_drive_t *drive)
 {
 	pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -2052,7 +2052,7 @@
  * that's not implemented yet), on the other hand, we don't have shared interrupts
  * so it's not really a problem
  */
-static int __pmac
+static int
 pmac_ide_dma_test_irq (ide_drive_t *drive)
 {
 	pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
@@ -2108,19 +2108,19 @@
 	return 1;
 }
 
-static int __pmac
+static int
 pmac_ide_dma_host_off (ide_drive_t *drive)
 {
 	return 0;
 }
 
-static int __pmac
+static int
 pmac_ide_dma_host_on (ide_drive_t *drive)
 {
 	return 0;
 }
 
-static int __pmac
+static int
 pmac_ide_dma_lostirq (ide_drive_t *drive)
 {
 	pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index 5e0811d..2b8a6e8 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -27,7 +27,7 @@
 
 #undef DEBUG
 
-static void __pmac
+static void
 anslcd_write_byte_ctrl ( unsigned char c )
 {
 #ifdef DEBUG
@@ -43,14 +43,14 @@
 	}
 }
 
-static void __pmac
+static void
 anslcd_write_byte_data ( unsigned char c )
 {
 	out_8(anslcd_ptr + ANSLCD_DATA_IX, c);
 	udelay(anslcd_short_delay);
 }
 
-static ssize_t __pmac
+static ssize_t
 anslcd_write( struct file * file, const char __user * buf, 
 				size_t count, loff_t *ppos )
 {
@@ -73,7 +73,7 @@
 	return p - buf;
 }
 
-static int __pmac
+static int
 anslcd_ioctl( struct inode * inode, struct file * file,
 				unsigned int cmd, unsigned long arg )
 {
@@ -115,7 +115,7 @@
 	}
 }
 
-static int __pmac
+static int
 anslcd_open( struct inode * inode, struct file * file )
 {
 	return 0;
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index c0712a1..b856bb6 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -167,19 +167,19 @@
  * Functions for polling content of media bay
  */
  
-static u8 __pmac
+static u8
 ohare_mb_content(struct media_bay_info *bay)
 {
 	return (MB_IN32(bay, OHARE_MBCR) >> 12) & 7;
 }
 
-static u8 __pmac
+static u8
 heathrow_mb_content(struct media_bay_info *bay)
 {
 	return (MB_IN32(bay, HEATHROW_MBCR) >> 12) & 7;
 }
 
-static u8 __pmac
+static u8
 keylargo_mb_content(struct media_bay_info *bay)
 {
 	int new_gpio;
@@ -205,7 +205,7 @@
  * into reset state as well
  */
 
-static void __pmac
+static void
 ohare_mb_power(struct media_bay_info* bay, int on_off)
 {
 	if (on_off) {
@@ -224,7 +224,7 @@
 	MB_BIC(bay, OHARE_MBCR, 0x00000F00);
 }
 
-static void __pmac
+static void
 heathrow_mb_power(struct media_bay_info* bay, int on_off)
 {
 	if (on_off) {
@@ -243,7 +243,7 @@
 	MB_BIC(bay, HEATHROW_MBCR, 0x00000F00);
 }
 
-static void __pmac
+static void
 keylargo_mb_power(struct media_bay_info* bay, int on_off)
 {
 	if (on_off) {
@@ -267,7 +267,7 @@
  * enable the related busses
  */
 
-static int __pmac
+static int
 ohare_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
 {
 	switch(device_id) {
@@ -287,7 +287,7 @@
 	return -ENODEV;
 }
 
-static int __pmac
+static int
 heathrow_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
 {
 	switch(device_id) {
@@ -307,7 +307,7 @@
 	return -ENODEV;
 }
 
-static int __pmac
+static int
 keylargo_mb_setup_bus(struct media_bay_info* bay, u8 device_id)
 {
 	switch(device_id) {
@@ -330,43 +330,43 @@
  * Functions for tweaking resets
  */
 
-static void __pmac
+static void
 ohare_mb_un_reset(struct media_bay_info* bay)
 {
 	MB_BIS(bay, OHARE_FCR, OH_BAY_RESET_N);
 }
 
-static void __pmac keylargo_mb_init(struct media_bay_info *bay)
+static void keylargo_mb_init(struct media_bay_info *bay)
 {
 	MB_BIS(bay, KEYLARGO_MBCR, KL_MBCR_MB0_ENABLE);
 }
 
-static void __pmac heathrow_mb_un_reset(struct media_bay_info* bay)
+static void heathrow_mb_un_reset(struct media_bay_info* bay)
 {
 	MB_BIS(bay, HEATHROW_FCR, HRW_BAY_RESET_N);
 }
 
-static void __pmac keylargo_mb_un_reset(struct media_bay_info* bay)
+static void keylargo_mb_un_reset(struct media_bay_info* bay)
 {
 	MB_BIS(bay, KEYLARGO_MBCR, KL_MBCR_MB0_DEV_RESET);
 }
 
-static void __pmac ohare_mb_un_reset_ide(struct media_bay_info* bay)
+static void ohare_mb_un_reset_ide(struct media_bay_info* bay)
 {
 	MB_BIS(bay, OHARE_FCR, OH_IDE1_RESET_N);
 }
 
-static void __pmac heathrow_mb_un_reset_ide(struct media_bay_info* bay)
+static void heathrow_mb_un_reset_ide(struct media_bay_info* bay)
 {
 	MB_BIS(bay, HEATHROW_FCR, HRW_IDE1_RESET_N);
 }
 
-static void __pmac keylargo_mb_un_reset_ide(struct media_bay_info* bay)
+static void keylargo_mb_un_reset_ide(struct media_bay_info* bay)
 {
 	MB_BIS(bay, KEYLARGO_FCR1, KL1_EIDE0_RESET_N);
 }
 
-static inline void __pmac set_mb_power(struct media_bay_info* bay, int onoff)
+static inline void set_mb_power(struct media_bay_info* bay, int onoff)
 {
 	/* Power up up and assert the bay reset line */
 	if (onoff) {
@@ -382,7 +382,7 @@
 	bay->timer = msecs_to_jiffies(MB_POWER_DELAY);
 }
 
-static void __pmac poll_media_bay(struct media_bay_info* bay)
+static void poll_media_bay(struct media_bay_info* bay)
 {
 	int id = bay->ops->content(bay);
 
@@ -415,7 +415,7 @@
 	}
 }
 
-int __pmac check_media_bay(struct device_node *which_bay, int what)
+int check_media_bay(struct device_node *which_bay, int what)
 {
 #ifdef CONFIG_BLK_DEV_IDE
 	int	i;
@@ -432,7 +432,7 @@
 }
 EXPORT_SYMBOL(check_media_bay);
 
-int __pmac check_media_bay_by_base(unsigned long base, int what)
+int check_media_bay_by_base(unsigned long base, int what)
 {
 #ifdef CONFIG_BLK_DEV_IDE
 	int	i;
@@ -449,7 +449,7 @@
 	return -ENODEV;
 }
 
-int __pmac media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base,
+int media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base,
 	int irq, int index)
 {
 #ifdef CONFIG_BLK_DEV_IDE
@@ -489,7 +489,7 @@
 	return -ENODEV;
 }
 
-static void __pmac media_bay_step(int i)
+static void media_bay_step(int i)
 {
 	struct media_bay_info* bay = &media_bays[i];
 
@@ -619,7 +619,7 @@
  * with the IDE driver.  It needs to be a thread because
  * ide_register can't be called from interrupt context.
  */
-static int __pmac media_bay_task(void *x)
+static int media_bay_task(void *x)
 {
 	int	i;
 
@@ -704,7 +704,7 @@
 
 }
 
-static int __pmac media_bay_suspend(struct macio_dev *mdev, pm_message_t state)
+static int media_bay_suspend(struct macio_dev *mdev, pm_message_t state)
 {
 	struct media_bay_info	*bay = macio_get_drvdata(mdev);
 
@@ -719,7 +719,7 @@
 	return 0;
 }
 
-static int __pmac media_bay_resume(struct macio_dev *mdev)
+static int media_bay_resume(struct macio_dev *mdev)
 {
 	struct media_bay_info	*bay = macio_get_drvdata(mdev);
 
@@ -760,7 +760,7 @@
 
 /* Definitions of "ops" structures.
  */
-static struct mb_ops ohare_mb_ops __pmacdata = {
+static struct mb_ops ohare_mb_ops = {
 	.name		= "Ohare",
 	.content	= ohare_mb_content,
 	.power		= ohare_mb_power,
@@ -769,7 +769,7 @@
 	.un_reset_ide	= ohare_mb_un_reset_ide,
 };
 
-static struct mb_ops heathrow_mb_ops __pmacdata = {
+static struct mb_ops heathrow_mb_ops = {
 	.name		= "Heathrow",
 	.content	= heathrow_mb_content,
 	.power		= heathrow_mb_power,
@@ -778,7 +778,7 @@
 	.un_reset_ide	= heathrow_mb_un_reset_ide,
 };
 
-static struct mb_ops keylargo_mb_ops __pmacdata = {
+static struct mb_ops keylargo_mb_ops = {
 	.name		= "KeyLargo",
 	.init		= keylargo_mb_init,
 	.content	= keylargo_mb_content,
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index a85ac18..5f28352 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -1092,7 +1092,7 @@
 }
 
 
-static struct file_operations smu_device_fops __pmacdata = {
+static struct file_operations smu_device_fops = {
 	.llseek		= no_llseek,
 	.read		= smu_read,
 	.write		= smu_write,
@@ -1101,7 +1101,7 @@
 	.release	= smu_release,
 };
 
-static struct miscdevice pmu_device __pmacdata = {
+static struct miscdevice pmu_device = {
 	MISC_DYNAMIC_MINOR, "smu", &smu_device_fops
 };
 
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index 417deb5..d843a6c 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -37,7 +37,6 @@
 
 #ifdef CONFIG_MAC
 #define CUDA_IRQ IRQ_MAC_ADB
-#define __openfirmware
 #define eieio()
 #else
 #define CUDA_IRQ vias->intrs[0].line
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 645a2e5..5034618 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -244,7 +244,7 @@
  * - the number of response bytes which the PMU will return, or
  *   -1 if it will send a length byte.
  */
-static const s8 pmu_data_len[256][2] __openfirmwaredata = {
+static const s8 pmu_data_len[256][2] = {
 /*	   0	   1	   2	   3	   4	   5	   6	   7  */
 /*00*/	{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
 /*08*/	{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
@@ -295,7 +295,7 @@
 };
 #endif /* CONFIG_PMAC_BACKLIGHT */
 
-int __openfirmware
+int
 find_via_pmu(void)
 {
 	if (via != 0)
@@ -374,7 +374,7 @@
 }
 
 #ifdef CONFIG_ADB
-static int __openfirmware
+static int
 pmu_probe(void)
 {
 	return vias == NULL? -ENODEV: 0;
@@ -405,7 +405,7 @@
 	bright_req_2.complete = 1;
 	batt_req.complete = 1;
 
-#ifdef CONFIG_PPC32
+#if defined(CONFIG_PPC32) && !defined(CONFIG_PPC_MERGE)
 	if (pmu_kind == PMU_KEYLARGO_BASED)
 		openpic_set_irq_priority(vias->intrs[0].line,
 					 OPENPIC_PRIORITY_DEFAULT + 1);
@@ -520,7 +520,7 @@
 
 device_initcall(via_pmu_dev_init);
 
-static int __openfirmware
+static int
 init_pmu(void)
 {
 	int timeout;
@@ -625,7 +625,7 @@
 /* This new version of the code for 2400/3400/3500 powerbooks
  * is inspired from the implementation in gkrellm-pmu
  */
-static void __pmac
+static void
 done_battery_state_ohare(struct adb_request* req)
 {
 	/* format:
@@ -713,7 +713,7 @@
 	clear_bit(0, &async_req_locks);
 }
 
-static void __pmac
+static void
 done_battery_state_smart(struct adb_request* req)
 {
 	/* format:
@@ -791,7 +791,7 @@
 	clear_bit(0, &async_req_locks);
 }
 
-static void __pmac
+static void
 query_battery_state(void)
 {
 	if (test_and_set_bit(0, &async_req_locks))
@@ -804,7 +804,7 @@
 			2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1);
 }
 
-static int __pmac
+static int
 proc_get_info(char *page, char **start, off_t off,
 		int count, int *eof, void *data)
 {
@@ -819,7 +819,7 @@
 	return p - page;
 }
 
-static int __pmac
+static int
 proc_get_irqstats(char *page, char **start, off_t off,
 		  int count, int *eof, void *data)
 {
@@ -846,7 +846,7 @@
 	return p - page;
 }
 
-static int __pmac
+static int
 proc_get_batt(char *page, char **start, off_t off,
 		int count, int *eof, void *data)
 {
@@ -870,7 +870,7 @@
 	return p - page;
 }
 
-static int __pmac
+static int
 proc_read_options(char *page, char **start, off_t off,
 			int count, int *eof, void *data)
 {
@@ -887,7 +887,7 @@
 	return p - page;
 }
 			
-static int __pmac
+static int
 proc_write_options(struct file *file, const char __user *buffer,
 			unsigned long count, void *data)
 {
@@ -934,7 +934,7 @@
 
 #ifdef CONFIG_ADB
 /* Send an ADB command */
-static int __pmac
+static int
 pmu_send_request(struct adb_request *req, int sync)
 {
 	int i, ret;
@@ -1014,7 +1014,7 @@
 }
 
 /* Enable/disable autopolling */
-static int __pmac
+static int
 pmu_adb_autopoll(int devs)
 {
 	struct adb_request req;
@@ -1037,7 +1037,7 @@
 }
 
 /* Reset the ADB bus */
-static int __pmac
+static int
 pmu_adb_reset_bus(void)
 {
 	struct adb_request req;
@@ -1072,7 +1072,7 @@
 #endif /* CONFIG_ADB */
 
 /* Construct and send a pmu request */
-int __openfirmware
+int
 pmu_request(struct adb_request *req, void (*done)(struct adb_request *),
 	    int nbytes, ...)
 {
@@ -1098,7 +1098,7 @@
 	return pmu_queue_request(req);
 }
 
-int __pmac
+int
 pmu_queue_request(struct adb_request *req)
 {
 	unsigned long flags;
@@ -1190,7 +1190,7 @@
 		(*done)(req);
 }
 
-static void __pmac
+static void
 pmu_start(void)
 {
 	struct adb_request *req;
@@ -1214,7 +1214,7 @@
 	send_byte(req->data[0]);
 }
 
-void __openfirmware
+void
 pmu_poll(void)
 {
 	if (!via)
@@ -1224,7 +1224,7 @@
 	via_pmu_interrupt(0, NULL, NULL);
 }
 
-void __openfirmware
+void
 pmu_poll_adb(void)
 {
 	if (!via)
@@ -1239,7 +1239,7 @@
 		|| req_awaiting_reply));
 }
 
-void __openfirmware
+void
 pmu_wait_complete(struct adb_request *req)
 {
 	if (!via)
@@ -1253,7 +1253,7 @@
  * This is done to avoid spurrious shutdowns when we know we'll have
  * interrupts switched off for a long time
  */
-void __openfirmware
+void
 pmu_suspend(void)
 {
 	unsigned long flags;
@@ -1293,7 +1293,7 @@
 	} while (1);
 }
 
-void __openfirmware
+void
 pmu_resume(void)
 {
 	unsigned long flags;
@@ -1323,7 +1323,7 @@
 }
 
 /* Interrupt data could be the result data from an ADB cmd */
-static void __pmac
+static void
 pmu_handle_data(unsigned char *data, int len, struct pt_regs *regs)
 {
 	unsigned char ints, pirq;
@@ -1435,7 +1435,7 @@
 	goto next;
 }
 
-static struct adb_request* __pmac
+static struct adb_request*
 pmu_sr_intr(struct pt_regs *regs)
 {
 	struct adb_request *req;
@@ -1541,7 +1541,7 @@
 	return NULL;
 }
 
-static irqreturn_t __pmac
+static irqreturn_t
 via_pmu_interrupt(int irq, void *arg, struct pt_regs *regs)
 {
 	unsigned long flags;
@@ -1629,7 +1629,7 @@
 	return IRQ_RETVAL(handled);
 }
 
-void __pmac
+void
 pmu_unlock(void)
 {
 	unsigned long flags;
@@ -1642,7 +1642,7 @@
 }
 
 
-static irqreturn_t __pmac
+static irqreturn_t
 gpio1_interrupt(int irq, void *arg, struct pt_regs *regs)
 {
 	unsigned long flags;
@@ -1663,12 +1663,12 @@
 }
 
 #ifdef CONFIG_PMAC_BACKLIGHT
-static int backlight_to_bright[] __pmacdata = {
+static int backlight_to_bright[] = {
 	0x7f, 0x46, 0x42, 0x3e, 0x3a, 0x36, 0x32, 0x2e,
 	0x2a, 0x26, 0x22, 0x1e, 0x1a, 0x16, 0x12, 0x0e
 };
  
-static int __openfirmware
+static int
 pmu_set_backlight_enable(int on, int level, void* data)
 {
 	struct adb_request req;
@@ -1688,7 +1688,7 @@
 	return 0;
 }
 
-static void __openfirmware
+static void
 pmu_bright_complete(struct adb_request *req)
 {
 	if (req == &bright_req_1)
@@ -1697,7 +1697,7 @@
 		clear_bit(2, &async_req_locks);
 }
 
-static int __openfirmware
+static int
 pmu_set_backlight_level(int level, void* data)
 {
 	if (vias == NULL)
@@ -1717,7 +1717,7 @@
 }
 #endif /* CONFIG_PMAC_BACKLIGHT */
 
-void __pmac
+void
 pmu_enable_irled(int on)
 {
 	struct adb_request req;
@@ -1732,7 +1732,7 @@
 	pmu_wait_complete(&req);
 }
 
-void __pmac
+void
 pmu_restart(void)
 {
 	struct adb_request req;
@@ -1757,7 +1757,7 @@
 		;
 }
 
-void __pmac
+void
 pmu_shutdown(void)
 {
 	struct adb_request req;
@@ -2076,7 +2076,7 @@
 }
 
 /* Sleep is broadcast last-to-first */
-static int __pmac
+static int
 broadcast_sleep(int when, int fallback)
 {
 	int ret = PBOOK_SLEEP_OK;
@@ -2101,7 +2101,7 @@
 }
 
 /* Wake is broadcast first-to-last */
-static int __pmac
+static int
 broadcast_wake(void)
 {
 	int ret = PBOOK_SLEEP_OK;
@@ -2132,7 +2132,7 @@
 } *pbook_pci_saves;
 static int pbook_npci_saves;
 
-static void __pmac
+static void
 pbook_alloc_pci_save(void)
 {
 	int npci;
@@ -2149,7 +2149,7 @@
 	pbook_npci_saves = npci;
 }
 
-static void __pmac
+static void
 pbook_free_pci_save(void)
 {
 	if (pbook_pci_saves == NULL)
@@ -2159,7 +2159,7 @@
 	pbook_npci_saves = 0;
 }
 
-static void __pmac
+static void
 pbook_pci_save(void)
 {
 	struct pci_save *ps = pbook_pci_saves;
@@ -2190,7 +2190,7 @@
  * during boot, it will be in the pci dev list. If it's disabled at this point
  * (and it will probably be), then you can't access it's config space.
  */
-static void __pmac
+static void
 pbook_pci_restore(void)
 {
 	u16 cmd;
@@ -2238,7 +2238,7 @@
 
 #ifdef DEBUG_SLEEP
 /* N.B. This doesn't work on the 3400 */
-void  __pmac
+void 
 pmu_blink(int n)
 {
 	struct adb_request req;
@@ -2277,9 +2277,9 @@
  * Put the powerbook to sleep.
  */
  
-static u32 save_via[8] __pmacdata;
+static u32 save_via[8];
 
-static void __pmac
+static void
 save_via_state(void)
 {
 	save_via[0] = in_8(&via[ANH]);
@@ -2291,7 +2291,7 @@
 	save_via[6] = in_8(&via[T1CL]);
 	save_via[7] = in_8(&via[T1CH]);
 }
-static void __pmac
+static void
 restore_via_state(void)
 {
 	out_8(&via[ANH], save_via[0]);
@@ -2307,7 +2307,7 @@
 	out_8(&via[IER], IER_SET | SR_INT | CB1_INT);
 }
 
-static int __pmac
+static int
 pmac_suspend_devices(void)
 {
 	int ret;
@@ -2397,7 +2397,7 @@
 	return 0;
 }
 
-static int __pmac
+static int
 pmac_wakeup_devices(void)
 {
 	mdelay(100);
@@ -2436,7 +2436,7 @@
 #define	GRACKLE_NAP	(1<<4)
 #define	GRACKLE_SLEEP	(1<<3)
 
-int __pmac
+int
 powerbook_sleep_grackle(void)
 {
 	unsigned long save_l2cr;
@@ -2520,7 +2520,7 @@
 	return 0;
 }
 
-static int __pmac
+static int
 powerbook_sleep_Core99(void)
 {
 	unsigned long save_l2cr;
@@ -2620,7 +2620,7 @@
 #define PB3400_MEM_CTRL		0xf8000000
 #define PB3400_MEM_CTRL_SLEEP	0x70
 
-static int __pmac
+static int
 powerbook_sleep_3400(void)
 {
 	int ret, i, x;
@@ -2720,9 +2720,9 @@
 };
 
 static LIST_HEAD(all_pmu_pvt);
-static DEFINE_SPINLOCK(all_pvt_lock __pmacdata);
+static DEFINE_SPINLOCK(all_pvt_lock);
 
-static void __pmac
+static void
 pmu_pass_intr(unsigned char *data, int len)
 {
 	struct pmu_private *pp;
@@ -2751,7 +2751,7 @@
 	spin_unlock_irqrestore(&all_pvt_lock, flags);
 }
 
-static int __pmac
+static int
 pmu_open(struct inode *inode, struct file *file)
 {
 	struct pmu_private *pp;
@@ -2773,7 +2773,7 @@
 	return 0;
 }
 
-static ssize_t  __pmac
+static ssize_t 
 pmu_read(struct file *file, char __user *buf,
 			size_t count, loff_t *ppos)
 {
@@ -2825,14 +2825,14 @@
 	return ret;
 }
 
-static ssize_t __pmac
+static ssize_t
 pmu_write(struct file *file, const char __user *buf,
 			 size_t count, loff_t *ppos)
 {
 	return 0;
 }
 
-static unsigned int __pmac
+static unsigned int
 pmu_fpoll(struct file *filp, poll_table *wait)
 {
 	struct pmu_private *pp = filp->private_data;
@@ -2849,7 +2849,7 @@
 	return mask;
 }
 
-static int __pmac
+static int
 pmu_release(struct inode *inode, struct file *file)
 {
 	struct pmu_private *pp = file->private_data;
@@ -2874,8 +2874,7 @@
 	return 0;
 }
 
-/* Note: removed __openfirmware here since it causes link errors */
-static int __pmac
+static int
 pmu_ioctl(struct inode * inode, struct file *filp,
 		     u_int cmd, u_long arg)
 {
@@ -2957,7 +2956,7 @@
 	return error;
 }
 
-static struct file_operations pmu_device_fops __pmacdata = {
+static struct file_operations pmu_device_fops = {
 	.read		= pmu_read,
 	.write		= pmu_write,
 	.poll		= pmu_fpoll,
@@ -2966,7 +2965,7 @@
 	.release	= pmu_release,
 };
 
-static struct miscdevice pmu_device __pmacdata = {
+static struct miscdevice pmu_device = {
 	PMU_MINOR, "pmu", &pmu_device_fops
 };
 
@@ -2982,7 +2981,7 @@
 
 
 #ifdef DEBUG_SLEEP
-static inline void  __pmac
+static inline void 
 polled_handshake(volatile unsigned char __iomem *via)
 {
 	via[B] &= ~TREQ; eieio();
@@ -2993,7 +2992,7 @@
 		;
 }
 
-static inline void  __pmac
+static inline void 
 polled_send_byte(volatile unsigned char __iomem *via, int x)
 {
 	via[ACR] |= SR_OUT | SR_EXT; eieio();
@@ -3001,7 +3000,7 @@
 	polled_handshake(via);
 }
 
-static inline int __pmac
+static inline int
 polled_recv_byte(volatile unsigned char __iomem *via)
 {
 	int x;
@@ -3013,7 +3012,7 @@
 	return x;
 }
 
-int __pmac
+int
 pmu_polled_request(struct adb_request *req)
 {
 	unsigned long flags;
diff --git a/drivers/macintosh/via-pmu68k.c b/drivers/macintosh/via-pmu68k.c
index 820dc52..6f80d76 100644
--- a/drivers/macintosh/via-pmu68k.c
+++ b/drivers/macintosh/via-pmu68k.c
@@ -835,7 +835,7 @@
 } *pbook_pci_saves;
 static int n_pbook_pci_saves;
 
-static inline void __openfirmware
+static inline void
 pbook_pci_save(void)
 {
 	int npci;
@@ -863,7 +863,7 @@
 	}
 }
 
-static inline void __openfirmware
+static inline void
 pbook_pci_restore(void)
 {
 	u16 cmd;
@@ -902,7 +902,7 @@
 #define IRQ_ENABLE	((unsigned int *)0xf3000024)
 #define MEM_CTRL	((unsigned int *)0xf8000070)
 
-int __openfirmware powerbook_sleep(void)
+int powerbook_sleep(void)
 {
 	int ret, i, x;
 	static int save_backlight;
@@ -1001,25 +1001,24 @@
 /*
  * Support for /dev/pmu device
  */
-static int __openfirmware pmu_open(struct inode *inode, struct file *file)
+static int pmu_open(struct inode *inode, struct file *file)
 {
 	return 0;
 }
 
-static ssize_t __openfirmware pmu_read(struct file *file, char *buf,
+static ssize_t pmu_read(struct file *file, char *buf,
 			size_t count, loff_t *ppos)
 {
 	return 0;
 }
 
-static ssize_t __openfirmware pmu_write(struct file *file, const char *buf,
+static ssize_t pmu_write(struct file *file, const char *buf,
 			 size_t count, loff_t *ppos)
 {
 	return 0;
 }
 
-/* Note: removed __openfirmware here since it causes link errors */
-static int /*__openfirmware*/ pmu_ioctl(struct inode * inode, struct file *filp,
+static int pmu_ioctl(struct inode * inode, struct file *filp,
 		     u_int cmd, u_long arg)
 {
 	int error;
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index a345355..5b6b0b6 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -629,12 +629,4 @@
 	if (entry)
 		entry->proc_fops = &proc_sysrq_trigger_operations;
 #endif
-#ifdef CONFIG_PPC32
-	{
-		extern struct file_operations ppc_htab_operations;
-		entry = create_proc_entry("ppc_htab", S_IRUGO|S_IWUSR, NULL);
-		if (entry)
-			entry->proc_fops = &ppc_htab_operations;
-	}
-#endif
 }
diff --git a/include/asm-ppc64/a.out.h b/include/asm-powerpc/a.out.h
similarity index 69%
rename from include/asm-ppc64/a.out.h
rename to include/asm-powerpc/a.out.h
index 3871e25..c7393a9 100644
--- a/include/asm-ppc64/a.out.h
+++ b/include/asm-powerpc/a.out.h
@@ -1,14 +1,5 @@
-#ifndef __PPC64_A_OUT_H__
-#define __PPC64_A_OUT_H__
-
-/*
- * c 2001 PPC 64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
+#ifndef _ASM_POWERPC_A_OUT_H
+#define _ASM_POWERPC_A_OUT_H
 
 struct exec
 {
@@ -27,6 +18,7 @@
 #define N_SYMSIZE(a)	((a).a_syms)
 
 #ifdef __KERNEL__
+#ifdef __powerpc64__
 
 #define STACK_TOP_USER64 TASK_SIZE_USER64
 #define STACK_TOP_USER32 TASK_SIZE_USER32
@@ -34,6 +26,11 @@
 #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
 		   STACK_TOP_USER32 : STACK_TOP_USER64)
 
+#else /* __powerpc64__ */
+
+#define STACK_TOP TASK_SIZE
+
+#endif /* __powerpc64__ */
 #endif /* __KERNEL__ */
 
-#endif /* __PPC64_A_OUT_H__ */
+#endif /* _ASM_POWERPC_A_OUT_H */
diff --git a/include/asm-ppc/atomic.h b/include/asm-powerpc/atomic.h
similarity index 85%
rename from include/asm-ppc/atomic.h
rename to include/asm-powerpc/atomic.h
index eeafd50..ed4b345 100644
--- a/include/asm-ppc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -1,29 +1,20 @@
+#ifndef _ASM_POWERPC_ATOMIC_H_
+#define _ASM_POWERPC_ATOMIC_H_
+
 /*
  * PowerPC atomic operations
  */
 
-#ifndef _ASM_PPC_ATOMIC_H_
-#define _ASM_PPC_ATOMIC_H_
-
 typedef struct { volatile int counter; } atomic_t;
 
 #ifdef __KERNEL__
+#include <asm/synch.h>
 
-#define ATOMIC_INIT(i)	{ (i) }
+#define ATOMIC_INIT(i)		{ (i) }
 
 #define atomic_read(v)		((v)->counter)
 #define atomic_set(v,i)		(((v)->counter) = (i))
 
-extern void atomic_clear_mask(unsigned long mask, unsigned long *addr);
-
-#ifdef CONFIG_SMP
-#define SMP_SYNC	"sync"
-#define SMP_ISYNC	"\n\tisync"
-#else
-#define SMP_SYNC	""
-#define SMP_ISYNC
-#endif
-
 /* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx.
  * The old ATOMIC_SYNC_FIX covered some but not all of this.
  */
@@ -53,12 +44,13 @@
 	int t;
 
 	__asm__ __volatile__(
+	EIEIO_ON_SMP
 "1:	lwarx	%0,0,%2		# atomic_add_return\n\
 	add	%0,%1,%0\n"
 	PPC405_ERR77(0,%2)
 "	stwcx.	%0,0,%2 \n\
 	bne-	1b"
-	SMP_ISYNC
+	ISYNC_ON_SMP
 	: "=&r" (t)
 	: "r" (a), "r" (&v->counter)
 	: "cc", "memory");
@@ -88,12 +80,13 @@
 	int t;
 
 	__asm__ __volatile__(
+	EIEIO_ON_SMP
 "1:	lwarx	%0,0,%2		# atomic_sub_return\n\
 	subf	%0,%1,%0\n"
 	PPC405_ERR77(0,%2)
 "	stwcx.	%0,0,%2 \n\
 	bne-	1b"
-	SMP_ISYNC
+	ISYNC_ON_SMP
 	: "=&r" (t)
 	: "r" (a), "r" (&v->counter)
 	: "cc", "memory");
@@ -121,12 +114,13 @@
 	int t;
 
 	__asm__ __volatile__(
+	EIEIO_ON_SMP
 "1:	lwarx	%0,0,%1		# atomic_inc_return\n\
 	addic	%0,%0,1\n"
 	PPC405_ERR77(0,%1)
 "	stwcx.	%0,0,%1 \n\
 	bne-	1b"
-	SMP_ISYNC
+	ISYNC_ON_SMP
 	: "=&r" (t)
 	: "r" (&v->counter)
 	: "cc", "memory");
@@ -164,12 +158,13 @@
 	int t;
 
 	__asm__ __volatile__(
+	EIEIO_ON_SMP
 "1:	lwarx	%0,0,%1		# atomic_dec_return\n\
 	addic	%0,%0,-1\n"
 	PPC405_ERR77(0,%1)
 "	stwcx.	%0,0,%1\n\
 	bne-	1b"
-	SMP_ISYNC
+	ISYNC_ON_SMP
 	: "=&r" (t)
 	: "r" (&v->counter)
 	: "cc", "memory");
@@ -189,13 +184,14 @@
 	int t;
 
 	__asm__ __volatile__(
+	EIEIO_ON_SMP
 "1:	lwarx	%0,0,%1		# atomic_dec_if_positive\n\
 	addic.	%0,%0,-1\n\
 	blt-	2f\n"
 	PPC405_ERR77(0,%1)
 "	stwcx.	%0,0,%1\n\
 	bne-	1b"
-	SMP_ISYNC
+	ISYNC_ON_SMP
 	"\n\
 2:"	: "=&r" (t)
 	: "r" (&v->counter)
@@ -204,11 +200,10 @@
 	return t;
 }
 
-#define __MB	__asm__ __volatile__ (SMP_SYNC : : : "memory")
-#define smp_mb__before_atomic_dec()	__MB
-#define smp_mb__after_atomic_dec()	__MB
-#define smp_mb__before_atomic_inc()	__MB
-#define smp_mb__after_atomic_inc()	__MB
+#define smp_mb__before_atomic_dec()     smp_mb()
+#define smp_mb__after_atomic_dec()      smp_mb()
+#define smp_mb__before_atomic_inc()     smp_mb()
+#define smp_mb__after_atomic_inc()      smp_mb()
 
 #endif /* __KERNEL__ */
-#endif /* _ASM_PPC_ATOMIC_H_ */
+#endif /* _ASM_POWERPC_ATOMIC_H_ */
diff --git a/include/asm-ppc64/auxvec.h b/include/asm-powerpc/auxvec.h
similarity index 82%
rename from include/asm-ppc64/auxvec.h
rename to include/asm-powerpc/auxvec.h
index ac6381a..79d8c47 100644
--- a/include/asm-ppc64/auxvec.h
+++ b/include/asm-powerpc/auxvec.h
@@ -1,5 +1,5 @@
-#ifndef __PPC64_AUXVEC_H
-#define __PPC64_AUXVEC_H
+#ifndef _ASM_POWERPC_AUXVEC_H
+#define _ASM_POWERPC_AUXVEC_H
 
 /*
  * We need to put in some extra aux table entries to tell glibc what
@@ -14,6 +14,8 @@
 /* The vDSO location. We have to use the same value as x86 for glibc's
  * sake :-)
  */
+#ifdef __powerpc64__
 #define AT_SYSINFO_EHDR		33
+#endif
 
-#endif /* __PPC64_AUXVEC_H */
+#endif
diff --git a/include/asm-ppc64/bug.h b/include/asm-powerpc/bug.h
similarity index 62%
rename from include/asm-ppc64/bug.h
rename to include/asm-powerpc/bug.h
index 1601782..e4d028e 100644
--- a/include/asm-ppc64/bug.h
+++ b/include/asm-powerpc/bug.h
@@ -1,5 +1,5 @@
-#ifndef _PPC64_BUG_H
-#define _PPC64_BUG_H
+#ifndef _ASM_POWERPC_BUG_H
+#define _ASM_POWERPC_BUG_H
 
 /*
  * Define an illegal instr to trap on the bug.
@@ -11,9 +11,21 @@
 
 #ifndef __ASSEMBLY__
 
+#ifdef __powerpc64__
+#define BUG_TABLE_ENTRY(label, line, file, func) \
+	".llong " #label "\n .long " #line "\n .llong " #file ", " #func "\n"
+#define TRAP_OP(ra, rb) "1: tdnei " #ra ", " #rb "\n"
+#define DATA_TYPE long long
+#else 
+#define BUG_TABLE_ENTRY(label, line, file, func) \
+	".long " #label ", " #line ", " #file ", " #func "\n"
+#define TRAP_OP(ra, rb) "1: twnei " #ra ", " #rb "\n"
+#define DATA_TYPE int
+#endif /* __powerpc64__ */
+
 struct bug_entry {
 	unsigned long	bug_addr;
-	long		line;
+	int		line;
 	const char	*file;
 	const char	*function;
 };
@@ -32,28 +44,28 @@
 	__asm__ __volatile__(						 \
 		"1:	twi 31,0,0\n"					 \
 		".section __bug_table,\"a\"\n\t"			 \
-		"	.llong 1b,%0,%1,%2\n"				 \
+		BUG_TABLE_ENTRY(1b,%0,%1,%2)				 \
 		".previous"						 \
 		: : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
 } while (0)
 
 #define BUG_ON(x) do {						\
 	__asm__ __volatile__(					\
-		"1:	tdnei %0,0\n"				\
+		TRAP_OP(%0,0)					\
 		".section __bug_table,\"a\"\n\t"		\
-		"	.llong 1b,%1,%2,%3\n"			\
+		BUG_TABLE_ENTRY(1b,%1,%2,%3)			\
 		".previous"					\
-		: : "r" ((long long)(x)), "i" (__LINE__),	\
+		: : "r" ((DATA_TYPE)(x)), "i" (__LINE__),	\
 		    "i" (__FILE__), "i" (__FUNCTION__));	\
 } while (0)
 
 #define WARN_ON(x) do {						\
 	__asm__ __volatile__(					\
-		"1:	tdnei %0,0\n"				\
+		TRAP_OP(%0,0)					\
 		".section __bug_table,\"a\"\n\t"		\
-		"	.llong 1b,%1,%2,%3\n"			\
+		BUG_TABLE_ENTRY(1b,%1,%2,%3)			\
 		".previous"					\
-		: : "r" ((long long)(x)),			\
+		: : "r" ((DATA_TYPE)(x)),			\
 		    "i" (__LINE__ + BUG_WARNING_TRAP),		\
 		    "i" (__FILE__), "i" (__FUNCTION__));	\
 } while (0)
@@ -61,9 +73,9 @@
 #define HAVE_ARCH_BUG
 #define HAVE_ARCH_BUG_ON
 #define HAVE_ARCH_WARN_ON
-#endif
-#endif
+#endif /* CONFIG_BUG */
+#endif /* __ASSEMBLY __ */
 
 #include <asm-generic/bug.h>
 
-#endif
+#endif /* _ASM_POWERPC_BUG_H */
diff --git a/include/asm-ppc64/byteorder.h b/include/asm-powerpc/byteorder.h
similarity index 90%
rename from include/asm-ppc64/byteorder.h
rename to include/asm-powerpc/byteorder.h
index 8b57da6..b377522 100644
--- a/include/asm-ppc64/byteorder.h
+++ b/include/asm-powerpc/byteorder.h
@@ -1,5 +1,5 @@
-#ifndef _PPC64_BYTEORDER_H
-#define _PPC64_BYTEORDER_H
+#ifndef _ASM_POWERPC_BYTEORDER_H
+#define _ASM_POWERPC_BYTEORDER_H
 
 /*
  * This program is free software; you can redistribute it and/or
@@ -77,10 +77,13 @@
 
 #ifndef __STRICT_ANSI__
 #define __BYTEORDER_HAS_U64__
-#endif
+#ifndef __powerpc64__
+#define __SWAB_64_THRU_32__
+#endif /* __powerpc64__ */
+#endif /* __STRICT_ANSI__ */
 
 #endif /* __GNUC__ */
 
 #include <linux/byteorder/big_endian.h>
 
-#endif /* _PPC64_BYTEORDER_H */
+#endif /* _ASM_POWERPC_BYTEORDER_H */
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
new file mode 100644
index 0000000..1e50efa
--- /dev/null
+++ b/include/asm-powerpc/cputable.h
@@ -0,0 +1,426 @@
+#ifndef __ASM_POWERPC_CPUTABLE_H
+#define __ASM_POWERPC_CPUTABLE_H
+
+#include <linux/config.h>
+#include <asm/ppc_asm.h> /* for ASM_CONST */
+
+#define PPC_FEATURE_32			0x80000000
+#define PPC_FEATURE_64			0x40000000
+#define PPC_FEATURE_601_INSTR		0x20000000
+#define PPC_FEATURE_HAS_ALTIVEC		0x10000000
+#define PPC_FEATURE_HAS_FPU		0x08000000
+#define PPC_FEATURE_HAS_MMU		0x04000000
+#define PPC_FEATURE_HAS_4xxMAC		0x02000000
+#define PPC_FEATURE_UNIFIED_CACHE	0x01000000
+#define PPC_FEATURE_HAS_SPE		0x00800000
+#define PPC_FEATURE_HAS_EFP_SINGLE	0x00400000
+#define PPC_FEATURE_HAS_EFP_DOUBLE	0x00200000
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+/* This structure can grow, it's real size is used by head.S code
+ * via the mkdefs mechanism.
+ */
+struct cpu_spec;
+struct op_powerpc_model;
+
+typedef	void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
+
+struct cpu_spec {
+	/* CPU is matched via (PVR & pvr_mask) == pvr_value */
+	unsigned int	pvr_mask;
+	unsigned int	pvr_value;
+
+	char		*cpu_name;
+	unsigned long	cpu_features;		/* Kernel features */
+	unsigned int	cpu_user_features;	/* Userland features */
+
+	/* cache line sizes */
+	unsigned int	icache_bsize;
+	unsigned int	dcache_bsize;
+
+	/* number of performance monitor counters */
+	unsigned int	num_pmcs;
+
+	/* this is called to initialize various CPU bits like L1 cache,
+	 * BHT, SPD, etc... from head.S before branching to identify_machine
+	 */
+	cpu_setup_t	cpu_setup;
+
+	/* Used by oprofile userspace to select the right counters */
+	char		*oprofile_cpu_type;
+
+	/* Processor specific oprofile operations */
+	struct op_powerpc_model *oprofile_model;
+};
+
+extern struct cpu_spec		*cur_cpu_spec;
+
+#endif /* __ASSEMBLY__ */
+
+/* CPU kernel features */
+
+/* Retain the 32b definitions all use bottom half of word */
+#define CPU_FTR_SPLIT_ID_CACHE		ASM_CONST(0x0000000000000001)
+#define CPU_FTR_L2CR			ASM_CONST(0x0000000000000002)
+#define CPU_FTR_SPEC7450		ASM_CONST(0x0000000000000004)
+#define CPU_FTR_ALTIVEC			ASM_CONST(0x0000000000000008)
+#define CPU_FTR_TAU			ASM_CONST(0x0000000000000010)
+#define CPU_FTR_CAN_DOZE		ASM_CONST(0x0000000000000020)
+#define CPU_FTR_USE_TB			ASM_CONST(0x0000000000000040)
+#define CPU_FTR_604_PERF_MON		ASM_CONST(0x0000000000000080)
+#define CPU_FTR_601			ASM_CONST(0x0000000000000100)
+#define CPU_FTR_HPTE_TABLE		ASM_CONST(0x0000000000000200)
+#define CPU_FTR_CAN_NAP			ASM_CONST(0x0000000000000400)
+#define CPU_FTR_L3CR			ASM_CONST(0x0000000000000800)
+#define CPU_FTR_L3_DISABLE_NAP		ASM_CONST(0x0000000000001000)
+#define CPU_FTR_NAP_DISABLE_L2_PR	ASM_CONST(0x0000000000002000)
+#define CPU_FTR_DUAL_PLL_750FX		ASM_CONST(0x0000000000004000)
+#define CPU_FTR_NO_DPM			ASM_CONST(0x0000000000008000)
+#define CPU_FTR_HAS_HIGH_BATS		ASM_CONST(0x0000000000010000)
+#define CPU_FTR_NEED_COHERENT		ASM_CONST(0x0000000000020000)
+#define CPU_FTR_NO_BTIC			ASM_CONST(0x0000000000040000)
+#define CPU_FTR_BIG_PHYS		ASM_CONST(0x0000000000080000)
+
+#ifdef __powerpc64__
+/* Add the 64b processor unique features in the top half of the word */
+#define CPU_FTR_SLB           		ASM_CONST(0x0000000100000000)
+#define CPU_FTR_16M_PAGE      		ASM_CONST(0x0000000200000000)
+#define CPU_FTR_TLBIEL         		ASM_CONST(0x0000000400000000)
+#define CPU_FTR_NOEXECUTE     		ASM_CONST(0x0000000800000000)
+#define CPU_FTR_NODSISRALIGN  		ASM_CONST(0x0000001000000000)
+#define CPU_FTR_IABR  			ASM_CONST(0x0000002000000000)
+#define CPU_FTR_MMCRA  			ASM_CONST(0x0000004000000000)
+#define CPU_FTR_CTRL			ASM_CONST(0x0000008000000000)
+#define CPU_FTR_SMT  			ASM_CONST(0x0000010000000000)
+#define CPU_FTR_COHERENT_ICACHE  	ASM_CONST(0x0000020000000000)
+#define CPU_FTR_LOCKLESS_TLBIE		ASM_CONST(0x0000040000000000)
+#define CPU_FTR_MMCRA_SIHV		ASM_CONST(0x0000080000000000)
+#else
+/* ensure on 32b processors the flags are available for compiling but
+ * don't do anything */
+#define CPU_FTR_SLB           		ASM_CONST(0x0)
+#define CPU_FTR_16M_PAGE      		ASM_CONST(0x0)
+#define CPU_FTR_TLBIEL         		ASM_CONST(0x0)
+#define CPU_FTR_NOEXECUTE     		ASM_CONST(0x0)
+#define CPU_FTR_NODSISRALIGN  		ASM_CONST(0x0)
+#define CPU_FTR_IABR  			ASM_CONST(0x0)
+#define CPU_FTR_MMCRA  			ASM_CONST(0x0)
+#define CPU_FTR_CTRL			ASM_CONST(0x0)
+#define CPU_FTR_SMT  			ASM_CONST(0x0)
+#define CPU_FTR_COHERENT_ICACHE  	ASM_CONST(0x0)
+#define CPU_FTR_LOCKLESS_TLBIE		ASM_CONST(0x0)
+#define CPU_FTR_MMCRA_SIHV		ASM_CONST(0x0)
+#endif
+
+#ifndef __ASSEMBLY__
+
+#define COMMON_USER_PPC64	(PPC_FEATURE_32 | PPC_FEATURE_64 | \
+				 PPC_FEATURE_HAS_FPU | PPC_FEATURE_HAS_MMU)
+
+#define CPU_FTR_PPCAS_ARCH_V2_BASE (CPU_FTR_SLB | \
+					CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
+					CPU_FTR_NODSISRALIGN | CPU_FTR_CTRL)
+
+/* iSeries doesn't support large pages */
+#ifdef CONFIG_PPC_ISERIES
+#define CPU_FTR_PPCAS_ARCH_V2	(CPU_FTR_PPCAS_ARCH_V2_BASE)
+#else
+#define CPU_FTR_PPCAS_ARCH_V2	(CPU_FTR_PPCAS_ARCH_V2_BASE | CPU_FTR_16M_PAGE)
+#endif /* CONFIG_PPC_ISERIES */
+
+/* We only set the altivec features if the kernel was compiled with altivec
+ * support
+ */
+#ifdef CONFIG_ALTIVEC
+#define CPU_FTR_ALTIVEC_COMP	CPU_FTR_ALTIVEC
+#define PPC_FEATURE_HAS_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC
+#else
+#define CPU_FTR_ALTIVEC_COMP	0
+#define PPC_FEATURE_HAS_ALTIVEC_COMP    0
+#endif
+
+/* We need to mark all pages as being coherent if we're SMP or we
+ * have a 74[45]x and an MPC107 host bridge.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE)
+#define CPU_FTR_COMMON                  CPU_FTR_NEED_COHERENT
+#else
+#define CPU_FTR_COMMON                  0
+#endif
+
+/* The powersave features NAP & DOZE seems to confuse BDI when
+   debugging. So if a BDI is used, disable theses
+ */
+#ifndef CONFIG_BDI_SWITCH
+#define CPU_FTR_MAYBE_CAN_DOZE	CPU_FTR_CAN_DOZE
+#define CPU_FTR_MAYBE_CAN_NAP	CPU_FTR_CAN_NAP
+#else
+#define CPU_FTR_MAYBE_CAN_DOZE	0
+#define CPU_FTR_MAYBE_CAN_NAP	0
+#endif
+
+#define CLASSIC_PPC (!defined(CONFIG_8xx) && !defined(CONFIG_4xx) && \
+		     !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \
+		     !defined(CONFIG_BOOKE))
+
+enum {
+	CPU_FTRS_PPC601 = CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE,
+	CPU_FTRS_603 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB |
+	    CPU_FTR_MAYBE_CAN_NAP,
+	CPU_FTRS_604 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_USE_TB | CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
+	CPU_FTRS_740_NOTAU = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+	    CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
+	CPU_FTRS_740 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+	    CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
+	CPU_FTRS_750 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+	    CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
+	CPU_FTRS_750FX1 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+	    CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
+	    CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM,
+	CPU_FTRS_750FX2 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+	    CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
+	    CPU_FTR_NO_DPM,
+	CPU_FTRS_750FX = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+	    CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
+	    CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS,
+	CPU_FTRS_750GX = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
+	    CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
+	    CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
+	    CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS,
+	CPU_FTRS_7400_NOTAU = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
+	    CPU_FTR_MAYBE_CAN_NAP,
+	CPU_FTRS_7400 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+	    CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
+	    CPU_FTR_MAYBE_CAN_NAP,
+	CPU_FTRS_7450_20 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+	    CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+	    CPU_FTR_NEED_COHERENT,
+	CPU_FTRS_7450_21 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_USE_TB |
+	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+	    CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+	    CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
+	    CPU_FTR_NEED_COHERENT,
+	CPU_FTRS_7450_23 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_USE_TB |
+	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+	    CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+	    CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT,
+	CPU_FTRS_7455_1 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_USE_TB |
+	    CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
+	    CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS |
+	    CPU_FTR_NEED_COHERENT,
+	CPU_FTRS_7455_20 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_USE_TB |
+	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+	    CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+	    CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
+	    CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS,
+	CPU_FTRS_7455 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_USE_TB |
+	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+	    CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+	    CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
+	    CPU_FTR_NEED_COHERENT,
+	CPU_FTRS_7447_10 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_USE_TB |
+	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+	    CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+	    CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
+	    CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
+	CPU_FTRS_7447 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_USE_TB |
+	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+	    CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+	    CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
+	    CPU_FTR_NEED_COHERENT,
+	CPU_FTRS_7447A = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_USE_TB |
+	    CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+	    CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+	    CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
+	    CPU_FTR_NEED_COHERENT,
+	CPU_FTRS_82XX = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB,
+	CPU_FTRS_G2_LE = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
+	    CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
+	CPU_FTRS_E300 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
+	    CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
+	CPU_FTRS_CLASSIC32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
+	CPU_FTRS_POWER3_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
+	CPU_FTRS_POWER4_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
+	CPU_FTRS_970_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+	    CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_ALTIVEC_COMP |
+	    CPU_FTR_MAYBE_CAN_NAP,
+	CPU_FTRS_8XX = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
+	CPU_FTRS_40X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
+	CPU_FTRS_44X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
+	CPU_FTRS_E200 = CPU_FTR_USE_TB,
+	CPU_FTRS_E500 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
+	CPU_FTRS_E500_2 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+	    CPU_FTR_BIG_PHYS,
+	CPU_FTRS_GENERIC_32 = CPU_FTR_COMMON,
+#ifdef __powerpc64__
+	CPU_FTRS_POWER3 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+	    CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
+	CPU_FTRS_RS64 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+	    CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
+	    CPU_FTR_MMCRA | CPU_FTR_CTRL,
+	CPU_FTRS_POWER4 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
+	CPU_FTRS_PPC970 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
+	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
+	CPU_FTRS_POWER5 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
+	    CPU_FTR_MMCRA | CPU_FTR_SMT |
+	    CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
+	    CPU_FTR_MMCRA_SIHV,
+	CPU_FTRS_CELL = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
+	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT,
+	CPU_FTRS_COMPATIBLE = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2,
+#endif
+
+	CPU_FTRS_POSSIBLE =
+#if CLASSIC_PPC
+	    CPU_FTRS_PPC601 | CPU_FTRS_603 | CPU_FTRS_604 | CPU_FTRS_740_NOTAU |
+	    CPU_FTRS_740 | CPU_FTRS_750 | CPU_FTRS_750FX1 |
+	    CPU_FTRS_750FX2 | CPU_FTRS_750FX | CPU_FTRS_750GX |
+	    CPU_FTRS_7400_NOTAU | CPU_FTRS_7400 | CPU_FTRS_7450_20 |
+	    CPU_FTRS_7450_21 | CPU_FTRS_7450_23 | CPU_FTRS_7455_1 |
+	    CPU_FTRS_7455_20 | CPU_FTRS_7455 | CPU_FTRS_7447_10 |
+	    CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX |
+	    CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_CLASSIC32 |
+#else
+	    CPU_FTRS_GENERIC_32 |
+#endif
+#ifdef CONFIG_PPC64BRIDGE
+	    CPU_FTRS_POWER3_32 |
+#endif
+#ifdef CONFIG_POWER4
+	    CPU_FTRS_POWER4_32 | CPU_FTRS_970_32 |
+#endif
+#ifdef CONFIG_8xx
+	    CPU_FTRS_8XX |
+#endif
+#ifdef CONFIG_40x
+	    CPU_FTRS_40X |
+#endif
+#ifdef CONFIG_44x
+	    CPU_FTRS_44X |
+#endif
+#ifdef CONFIG_E200
+	    CPU_FTRS_E200 |
+#endif
+#ifdef CONFIG_E500
+	    CPU_FTRS_E500 | CPU_FTRS_E500_2 |
+#endif
+#ifdef __powerpc64__
+	    CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 |
+	    CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_CELL |
+#endif
+	    0,
+
+	CPU_FTRS_ALWAYS =
+#if CLASSIC_PPC
+	    CPU_FTRS_PPC601 & CPU_FTRS_603 & CPU_FTRS_604 & CPU_FTRS_740_NOTAU &
+	    CPU_FTRS_740 & CPU_FTRS_750 & CPU_FTRS_750FX1 &
+	    CPU_FTRS_750FX2 & CPU_FTRS_750FX & CPU_FTRS_750GX &
+	    CPU_FTRS_7400_NOTAU & CPU_FTRS_7400 & CPU_FTRS_7450_20 &
+	    CPU_FTRS_7450_21 & CPU_FTRS_7450_23 & CPU_FTRS_7455_1 &
+	    CPU_FTRS_7455_20 & CPU_FTRS_7455 & CPU_FTRS_7447_10 &
+	    CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX &
+	    CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_CLASSIC32 &
+#else
+	    CPU_FTRS_GENERIC_32 &
+#endif
+#ifdef CONFIG_PPC64BRIDGE
+	    CPU_FTRS_POWER3_32 &
+#endif
+#ifdef CONFIG_POWER4
+	    CPU_FTRS_POWER4_32 & CPU_FTRS_970_32 &
+#endif
+#ifdef CONFIG_8xx
+	    CPU_FTRS_8XX &
+#endif
+#ifdef CONFIG_40x
+	    CPU_FTRS_40X &
+#endif
+#ifdef CONFIG_44x
+	    CPU_FTRS_44X &
+#endif
+#ifdef CONFIG_E200
+	    CPU_FTRS_E200 &
+#endif
+#ifdef CONFIG_E500
+	    CPU_FTRS_E500 & CPU_FTRS_E500_2 &
+#endif
+#ifdef __powerpc64__
+	    CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 &
+	    CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_CELL &
+#endif
+	    CPU_FTRS_POSSIBLE,
+};
+
+static inline int cpu_has_feature(unsigned long feature)
+{
+	return (CPU_FTRS_ALWAYS & feature) ||
+	       (CPU_FTRS_POSSIBLE
+		& cur_cpu_spec->cpu_features
+		& feature);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#ifdef __ASSEMBLY__
+
+#define BEGIN_FTR_SECTION		98:
+
+#ifndef __powerpc64__
+#define END_FTR_SECTION(msk, val)		\
+99:						\
+	.section __ftr_fixup,"a";		\
+	.align 2;				\
+	.long msk;				\
+	.long val;				\
+	.long 98b;				\
+	.long 99b;				\
+	.previous
+#else /* __powerpc64__ */
+#define END_FTR_SECTION(msk, val)		\
+99:						\
+	.section __ftr_fixup,"a";		\
+	.align 3;				\
+	.llong msk;				\
+	.llong val;				\
+	.llong 98b;				\
+	.llong 99b;	 			\
+	.previous
+#endif /* __powerpc64__ */
+
+#define END_FTR_SECTION_IFSET(msk)	END_FTR_SECTION((msk), (msk))
+#define END_FTR_SECTION_IFCLR(msk)	END_FTR_SECTION((msk), 0)
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_CPUTABLE_H */
diff --git a/include/asm-ppc/dma.h b/include/asm-powerpc/dma.h
similarity index 86%
rename from include/asm-ppc/dma.h
rename to include/asm-powerpc/dma.h
index cc8e5cd..926378d 100644
--- a/include/asm-ppc/dma.h
+++ b/include/asm-powerpc/dma.h
@@ -1,18 +1,14 @@
+#ifndef _ASM_POWERPC_DMA_H
+#define _ASM_POWERPC_DMA_H
+
 /*
- * include/asm-ppc/dma.h: Defines for using and allocating dma channels.
+ * Defines for using and allocating dma channels.
  * Written by Hennus Bergman, 1992.
  * High DMA channel support & info by Hannu Savolainen
  * and John Boyd, Nov. 1992.
  * Changes for ppc sound by Christoph Nadig
  */
 
-#ifdef __KERNEL__
-
-#include <linux/config.h>
-#include <asm/io.h>
-#include <linux/spinlock.h>
-#include <asm/system.h>
-
 /*
  * Note: Adapted for PowerPC by Gary Thomas
  * Modified by Cort Dougan <cort@cs.nmt.edu>
@@ -25,8 +21,10 @@
  * with a grain of salt.
  */
 
-#ifndef _ASM_DMA_H
-#define _ASM_DMA_H
+#include <linux/config.h>
+#include <asm/io.h>
+#include <linux/spinlock.h>
+#include <asm/system.h>
 
 #ifndef MAX_DMA_CHANNELS
 #define MAX_DMA_CHANNELS	8
@@ -34,11 +32,9 @@
 
 /* The maximum address that we can perform a DMA transfer to on this platform */
 /* Doesn't really apply... */
-#define MAX_DMA_ADDRESS		0xFFFFFFFF
+#define MAX_DMA_ADDRESS		(~0UL)
 
-/* in arch/ppc/kernel/setup.c -- Cort */
-extern unsigned long DMA_MODE_WRITE, DMA_MODE_READ;
-extern unsigned long ISA_DMA_THRESHOLD;
+#if !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)
 
 #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
 #define dma_outb	outb_p
@@ -171,7 +167,18 @@
 #define DMA1_EXT_REG		0x40B
 #define DMA2_EXT_REG		0x4D6
 
+#ifndef __powerpc64__
+    /* in arch/ppc/kernel/setup.c -- Cort */
+    extern unsigned int DMA_MODE_WRITE;
+    extern unsigned int DMA_MODE_READ;
+    extern unsigned long ISA_DMA_THRESHOLD;
+#else
+    #define DMA_MODE_READ	0x44	/* I/O to memory, no autoinit, increment, single mode */
+    #define DMA_MODE_WRITE	0x48	/* memory to I/O, no autoinit, increment, single mode */
+#endif
+
 #define DMA_MODE_CASCADE	0xC0	/* pass thru DREQ->HRQ, DACK<-HLDA only */
+
 #define DMA_AUTOINIT		0x10
 
 extern spinlock_t dma_spin_lock;
@@ -200,8 +207,9 @@
 	if (dmanr <= 3) {
 		dma_outb(dmanr, DMA1_MASK_REG);
 		dma_outb(ucDmaCmd, DMA1_CMD_REG);	/* Enable group */
-	} else
+	} else {
 		dma_outb(dmanr & 3, DMA2_MASK_REG);
+	}
 }
 
 static __inline__ void disable_dma(unsigned int dmanr)
@@ -290,19 +298,26 @@
 static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys)
 {
 	if (dmanr <= 3) {
-		dma_outb(phys & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
-		dma_outb((phys >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
+		dma_outb(phys & 0xff,
+			 ((dmanr & 3) << 1) + IO_DMA1_BASE);
+		dma_outb((phys >> 8) & 0xff,
+			 ((dmanr & 3) << 1) + IO_DMA1_BASE);
 	} else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) {
-		dma_outb(phys & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
-		dma_outb((phys >> 8) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
+		dma_outb(phys & 0xff,
+			 ((dmanr & 3) << 2) + IO_DMA2_BASE);
+		dma_outb((phys >> 8) & 0xff,
+			 ((dmanr & 3) << 2) + IO_DMA2_BASE);
 		dma_outb((dmanr & 3), DMA2_EXT_REG);
 	} else {
-		dma_outb((phys >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
-		dma_outb((phys >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
+		dma_outb((phys >> 1) & 0xff,
+			 ((dmanr & 3) << 2) + IO_DMA2_BASE);
+		dma_outb((phys >> 9) & 0xff,
+			 ((dmanr & 3) << 2) + IO_DMA2_BASE);
 	}
 	set_dma_page(dmanr, phys >> 16);
 }
 
+
 /* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
  * a specific DMA channel.
  * You must ensure the parameters are valid.
@@ -315,21 +330,24 @@
 {
 	count--;
 	if (dmanr <= 3) {
-		dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
-		dma_outb((count >> 8) & 0xff, ((dmanr & 3) << 1) + 1 +
-			 IO_DMA1_BASE);
+		dma_outb(count & 0xff,
+			 ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
+		dma_outb((count >> 8) & 0xff,
+			 ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
 	} else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) {
-		dma_outb(count & 0xff, ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
-		dma_outb((count >> 8) & 0xff, ((dmanr & 3) << 2) + 2 +
-			 IO_DMA2_BASE);
+		dma_outb(count & 0xff,
+			 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+		dma_outb((count >> 8) & 0xff,
+			 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
 	} else {
-		dma_outb((count >> 1) & 0xff, ((dmanr & 3) << 2) + 2 +
-			 IO_DMA2_BASE);
-		dma_outb((count >> 9) & 0xff, ((dmanr & 3) << 2) + 2 +
-			 IO_DMA2_BASE);
+		dma_outb((count >> 1) & 0xff,
+			 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+		dma_outb((count >> 9) & 0xff,
+			 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
 	}
 }
 
+
 /* Get DMA residue count. After a DMA transfer, this
  * should return zero. Reading this while a DMA transfer is
  * still in progress will return unpredictable results.
@@ -340,8 +358,8 @@
  */
 static __inline__ int get_dma_residue(unsigned int dmanr)
 {
-	unsigned int io_port = (dmanr <= 3) ?
-	    ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE
+	unsigned int io_port = (dmanr <= 3)
+	    ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE
 	    : ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE;
 
 	/* using short to get 16-bit wrap around */
@@ -352,7 +370,6 @@
 
 	return (dmanr <= 3 || dmanr == SND_DMA1 || dmanr == SND_DMA2)
 	    ? count : (count << 1);
-
 }
 
 /* These are in kernel/dma.c: */
@@ -367,5 +384,7 @@
 #else
 #define isa_dma_bridge_buggy	(0)
 #endif
-#endif				/* _ASM_DMA_H */
-#endif				/* __KERNEL__ */
+
+#endif	/* !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) */
+
+#endif	/* _ASM_POWERPC_DMA_H */
diff --git a/include/asm-ppc64/elf.h b/include/asm-powerpc/elf.h
similarity index 87%
rename from include/asm-ppc64/elf.h
rename to include/asm-powerpc/elf.h
index c919a89..f0a6779 100644
--- a/include/asm-ppc64/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -1,10 +1,11 @@
-#ifndef __PPC64_ELF_H
-#define __PPC64_ELF_H
+#ifndef _ASM_POWERPC_ELF_H
+#define _ASM_POWERPC_ELF_H
 
 #include <asm/types.h>
 #include <asm/ptrace.h>
 #include <asm/cputable.h>
 #include <asm/auxvec.h>
+#include <asm/page.h>
 
 /* PowerPC relocations defined by the ABIs */
 #define R_PPC_NONE		0
@@ -75,7 +76,7 @@
 #define R_PPC_GOT_DTPREL16_HI	93 /* half16*	(sym+add)@got@dtprel@h */
 #define R_PPC_GOT_DTPREL16_HA	94 /* half16*	(sym+add)@got@dtprel@ha */
 
-/* Keep this the last entry.  */
+/* keep this the last entry. */
 #define R_PPC_NUM		95
 
 /*
@@ -90,8 +91,6 @@
 
 #define ELF_NGREG	48	/* includes nip, msr, lr, etc. */
 #define ELF_NFPREG	33	/* includes fpscr */
-#define ELF_NVRREG32	33	/* includes vscr & vrsave stuffed together */
-#define ELF_NVRREG	34	/* includes vscr & vrsave in split vectors */
 
 typedef unsigned long elf_greg_t64;
 typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG];
@@ -100,8 +99,21 @@
 typedef elf_greg_t32 elf_gregset_t32[ELF_NGREG];
 
 /*
- * These are used to set parameters in the core dumps.
+ * ELF_ARCH, CLASS, and DATA are used to set parameters in the core dumps.
  */
+#ifdef __powerpc64__
+# define ELF_NVRREG32	33	/* includes vscr & vrsave stuffed together */
+# define ELF_NVRREG	34	/* includes vscr & vrsave in split vectors */
+# define ELF_GREG_TYPE	elf_greg_t64
+#else
+# define ELF_NEVRREG	34	/* includes acc (as 2) */
+# define ELF_NVRREG	33	/* includes vscr */
+# define ELF_GREG_TYPE	elf_greg_t32
+# define ELF_ARCH	EM_PPC
+# define ELF_CLASS	ELFCLASS32
+# define ELF_DATA	ELFDATA2MSB
+#endif /* __powerpc64__ */
+
 #ifndef ELF_ARCH
 # define ELF_ARCH	EM_PPC64
 # define ELF_CLASS	ELFCLASS64
@@ -114,8 +126,9 @@
   typedef elf_greg_t32 elf_greg_t;
   typedef elf_gregset_t32 elf_gregset_t;
 # define elf_addr_t u32
-#endif
+#endif /* ELF_ARCH */
 
+/* Floating point registers */
 typedef double elf_fpreg_t;
 typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
 
@@ -125,7 +138,9 @@
  * The entry with index 32 contains the vscr as the last word (offset 12) 
  * within the quadword.  This allows the vscr to be stored as either a 
  * quadword (since it must be copied via a vector register to/from storage) 
- * or as a word.  The entry with index 33 contains the vrsave as the first 
+ * or as a word.  
+ *
+ * 64-bit kernel notes: The entry at index 33 contains the vrsave as the first  
  * word (offset 0) within the quadword.
  *
  * This definition of the VMX state is compatible with the current PPC32 
@@ -138,7 +153,9 @@
  */
 typedef __vector128 elf_vrreg_t;
 typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG];
+#ifdef __powerpc64__
 typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
+#endif
 
 /*
  * This is used to ensure we don't load something for the wrong architecture.
@@ -146,7 +163,7 @@
 #define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
 
 #define USE_ELF_CORE_DUMP
-#define ELF_EXEC_PAGESIZE	4096
+#define ELF_EXEC_PAGESIZE	PAGE_SIZE
 
 /* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
    use of this is to invoke "./ld.so someprog" to test out a new version of
@@ -158,26 +175,30 @@
 #ifdef __KERNEL__
 
 /* Common routine for both 32-bit and 64-bit processes */
-static inline void ppc64_elf_core_copy_regs(elf_gregset_t elf_regs,
+static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs,
 					    struct pt_regs *regs)
 {
 	int i;
-	int gprs = sizeof(struct pt_regs)/sizeof(elf_greg_t64);
+	int gprs = sizeof(struct pt_regs)/sizeof(ELF_GREG_TYPE);
 
 	if (gprs > ELF_NGREG)
 		gprs = ELF_NGREG;
 
 	for (i=0; i < gprs; i++)
-		elf_regs[i] = (elf_greg_t)((elf_greg_t64 *)regs)[i];
+		elf_regs[i] = (elf_greg_t)((ELF_GREG_TYPE *)regs)[i];
+
+	memset((char *)(elf_regs) + sizeof(struct pt_regs), 0,  	\
+	       sizeof(elf_gregset_t) - sizeof(struct pt_regs));
+
 }
-#define ELF_CORE_COPY_REGS(gregs, regs) ppc64_elf_core_copy_regs(gregs, regs);
+#define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs);
 
 static inline int dump_task_regs(struct task_struct *tsk,
 				 elf_gregset_t *elf_regs)
 {
 	struct pt_regs *regs = tsk->thread.regs;
 	if (regs)
-		ppc64_elf_core_copy_regs(*elf_regs, regs);
+		ppc_elf_core_copy_regs(*elf_regs, regs);
 
 	return 1;
 }
@@ -186,15 +207,19 @@
 extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *); 
 #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
 
-/* XXX Should we define the XFPREGS using altivec ??? */
+#endif /* __KERNEL__ */
 
-#endif
-
-/* This yields a mask that user programs can use to figure out what
+/* ELF_HWCAP yields a mask that user programs can use to figure out what
    instruction set this cpu supports.  This could be done in userspace,
    but it's not easy, and we've already done it here.  */
-
-#define ELF_HWCAP	(cur_cpu_spec->cpu_user_features)
+# define ELF_HWCAP	(cur_cpu_spec->cpu_user_features)
+#ifdef __powerpc64__
+# define ELF_PLAT_INIT(_r, load_addr)	do { \
+	memset(_r->gpr, 0, sizeof(_r->gpr)); \
+	_r->ctr = _r->link = _r->xer = _r->ccr = 0; \
+	_r->gpr[2] = load_addr; \
+} while (0)
+#endif /* __powerpc64__ */
 
 /* This yields a string that ld.so will use to load implementation
    specific libraries for optimization.  This is more specific in
@@ -205,14 +230,10 @@
 
 #define ELF_PLATFORM	(NULL)
 
-#define ELF_PLAT_INIT(_r, load_addr)	do { \
-	memset(_r->gpr, 0, sizeof(_r->gpr)); \
-	_r->ctr = _r->link = _r->xer = _r->ccr = 0; \
-	_r->gpr[2] = load_addr; \
-} while (0)
-
 #ifdef __KERNEL__
-#define SET_PERSONALITY(ex, ibcs2)				\
+
+#ifdef __powerpc64__
+# define SET_PERSONALITY(ex, ibcs2)				\
 do {								\
 	unsigned long new_flags = 0;				\
 	if ((ex).e_ident[EI_CLASS] == ELFCLASS32)		\
@@ -225,7 +246,6 @@
 	if (personality(current->personality) != PER_LINUX32)	\
 		set_personality(PER_LINUX);			\
 } while (0)
-
 /*
  * An executable for which elf_read_implies_exec() returns TRUE will
  * have the READ_IMPLIES_EXEC personality flag set automatically. This
@@ -233,19 +253,26 @@
  * the 64bit ABI has never had these issues dont enable the workaround
  * even if we have an executable stack.
  */
-#define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \
+# define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \
 		(exec_stk != EXSTACK_DISABLE_X) : 0)
+#else 
+# define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
+#endif /* __powerpc64__ */
 
-#endif
+#endif /* __KERNEL__ */
 
 extern int dcache_bsize;
 extern int icache_bsize;
 extern int ucache_bsize;
 
-/* We do have an arch_setup_additional_pages for vDSO matters */
-#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
+#ifdef __powerpc64__
 struct linux_binprm;
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES	/* vDSO has arch_setup_additional_pages */
 extern int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack);
+#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b);
+#else
+#define VDSO_AUX_ENT(a,b)
+#endif /* __powerpc64__ */
 
 /*
  * The requirements here are:
@@ -265,9 +292,8 @@
 	NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize);			\
 	NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize);			\
 	NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize);			\
-	/* vDSO base */							\
-	NEW_AUX_ENT(AT_SYSINFO_EHDR, current->thread.vdso_base);       	\
- } while (0)
+	VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->thread.vdso_base)	\
+} while (0)
 
 /* PowerPC64 relocations defined by the ABIs */
 #define R_PPC64_NONE    R_PPC_NONE
@@ -384,4 +410,4 @@
 /* Keep this the last entry.  */
 #define R_PPC64_NUM		107
 
-#endif /* __PPC64_ELF_H */
+#endif /* _ASM_POWERPC_ELF_H */
diff --git a/include/asm-ppc/hardirq.h b/include/asm-powerpc/hardirq.h
similarity index 67%
rename from include/asm-ppc/hardirq.h
rename to include/asm-powerpc/hardirq.h
index 94f1411..2c0a31b 100644
--- a/include/asm-ppc/hardirq.h
+++ b/include/asm-powerpc/hardirq.h
@@ -1,11 +1,5 @@
-#ifdef __KERNEL__
-#ifndef __ASM_HARDIRQ_H
-#define __ASM_HARDIRQ_H
-
-#include <linux/config.h>
-#include <linux/cache.h>
-#include <linux/smp_lock.h>
-#include <asm/irq.h>
+#ifndef _ASM_POWERPC_HARDIRQ_H
+#define _ASM_POWERPC_HARDIRQ_H
 
 /* The __last_jiffy_stamp field is needed to ensure that no decrementer
  * interrupt is lost on SMP machines. Since on most CPUs it is in the same
@@ -13,7 +7,7 @@
  * for uniformity.
  */
 typedef struct {
-	unsigned long __softirq_pending;	/* set_bit is used on this */
+	unsigned int __softirq_pending;	/* set_bit is used on this */
 	unsigned int __last_jiffy_stamp;
 } ____cacheline_aligned irq_cpustat_t;
 
@@ -27,5 +21,4 @@
 	BUG();
 }
 
-#endif /* __ASM_HARDIRQ_H */
-#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_HARDIRQ_H */
diff --git a/include/asm-ppc64/hw_irq.h b/include/asm-powerpc/hw_irq.h
similarity index 65%
rename from include/asm-ppc64/hw_irq.h
rename to include/asm-powerpc/hw_irq.h
index baea40e..605a65e 100644
--- a/include/asm-ppc64/hw_irq.h
+++ b/include/asm-powerpc/hw_irq.h
@@ -1,22 +1,18 @@
 /*
  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
- *
- * Use inline IRQs where possible - Anton Blanchard <anton@au.ibm.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
+#ifndef _ASM_POWERPC_HW_IRQ_H
+#define _ASM_POWERPC_HW_IRQ_H
+
 #ifdef __KERNEL__
-#ifndef _PPC64_HW_IRQ_H
-#define _PPC64_HW_IRQ_H
 
 #include <linux/config.h>
 #include <linux/errno.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
 #include <asm/irq.h>
 
-int timer_interrupt(struct pt_regs *);
+extern void timer_interrupt(struct pt_regs *);
 extern void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq);
 
 #ifdef CONFIG_PPC_ISERIES
@@ -33,45 +29,60 @@
 
 #else
 
-#define local_save_flags(flags)	((flags) = mfmsr())
+#if defined(CONFIG_BOOKE)
+#define SET_MSR_EE(x)	mtmsr(x)
+#define local_irq_restore(flags)	__asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
+#elif defined(__powerpc64__)
+#define SET_MSR_EE(x)	__mtmsrd(x, 1)
 #define local_irq_restore(flags) do { \
 	__asm__ __volatile__("": : :"memory"); \
 	__mtmsrd((flags), 1); \
 } while(0)
+#else
+#define SET_MSR_EE(x)	mtmsr(x)
+#define local_irq_restore(flags)	mtmsr(flags)
+#endif
 
 static inline void local_irq_disable(void)
 {
+#ifdef CONFIG_BOOKE
+	__asm__ __volatile__("wrteei 0": : :"memory");
+#else
 	unsigned long msr;
-	msr = mfmsr();
-	__mtmsrd(msr & ~MSR_EE, 1);
 	__asm__ __volatile__("": : :"memory");
+	msr = mfmsr();
+	SET_MSR_EE(msr & ~MSR_EE);
+#endif
 }
 
 static inline void local_irq_enable(void)
 {
+#ifdef CONFIG_BOOKE
+	__asm__ __volatile__("wrteei 1": : :"memory");
+#else
 	unsigned long msr;
 	__asm__ __volatile__("": : :"memory");
 	msr = mfmsr();
-	__mtmsrd(msr | MSR_EE, 1);
+	SET_MSR_EE(msr | MSR_EE);
+#endif
 }
 
-static inline void __do_save_and_cli(unsigned long *flags)
+static inline void local_irq_save_ptr(unsigned long *flags)
 {
 	unsigned long msr;
 	msr = mfmsr();
 	*flags = msr;
-	__mtmsrd(msr & ~MSR_EE, 1);
+#ifdef CONFIG_BOOKE
+	__asm__ __volatile__("wrteei 0": : :"memory");
+#else
+	SET_MSR_EE(msr & ~MSR_EE);
+#endif
 	__asm__ __volatile__("": : :"memory");
 }
 
-#define local_irq_save(flags)          __do_save_and_cli(&flags)
-
-#define irqs_disabled()				\
-({						\
-	unsigned long flags;			\
-	local_save_flags(flags);		\
-	!(flags & MSR_EE);			\
-})
+#define local_save_flags(flags)	((flags) = mfmsr())
+#define local_irq_save(flags)	local_irq_save_ptr(&flags)
+#define irqs_disabled()		((mfmsr() & MSR_EE) == 0)
 
 #endif /* CONFIG_PPC_ISERIES */
 
@@ -99,6 +110,6 @@
  */
 struct hw_interrupt_type;
 static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
- 
-#endif /* _PPC64_HW_IRQ_H */
-#endif /* __KERNEL__ */
+
+#endif	/* __KERNEL__ */
+#endif	/* _ASM_POWERPC_HW_IRQ_H */
diff --git a/include/asm-ppc64/kdebug.h b/include/asm-powerpc/kdebug.h
similarity index 78%
rename from include/asm-ppc64/kdebug.h
rename to include/asm-powerpc/kdebug.h
index d383d16..9dcbac6 100644
--- a/include/asm-ppc64/kdebug.h
+++ b/include/asm-powerpc/kdebug.h
@@ -1,5 +1,5 @@
-#ifndef _PPC64_KDEBUG_H
-#define _PPC64_KDEBUG_H 1
+#ifndef _ASM_POWERPC_KDEBUG_H
+#define _ASM_POWERPC_KDEBUG_H
 
 /* nearly identical to x86_64/i386 code */
 
@@ -21,7 +21,7 @@
    then free.
  */
 int register_die_notifier(struct notifier_block *nb);
-extern struct notifier_block *ppc64_die_chain;
+extern struct notifier_block *powerpc_die_chain;
 
 /* Grossly misnamed. */
 enum die_val {
@@ -30,14 +30,13 @@
 	DIE_DABR_MATCH,
 	DIE_BPT,
 	DIE_SSTEP,
-	DIE_GPF,
 	DIE_PAGE_FAULT,
 };
 
 static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig)
 {
 	struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig };
-	return notifier_call_chain(&ppc64_die_chain, val, &args);
+	return notifier_call_chain(&powerpc_die_chain, val, &args);
 }
 
-#endif
+#endif /* _ASM_POWERPC_KDEBUG_H */
diff --git a/include/asm-powerpc/kmap_types.h b/include/asm-powerpc/kmap_types.h
new file mode 100644
index 0000000..b6bac6f
--- /dev/null
+++ b/include/asm-powerpc/kmap_types.h
@@ -0,0 +1,33 @@
+#ifndef _ASM_POWERPC_KMAP_TYPES_H
+#define _ASM_POWERPC_KMAP_TYPES_H
+
+#ifdef __KERNEL__
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+enum km_type {
+	KM_BOUNCE_READ,
+	KM_SKB_SUNRPC_DATA,
+	KM_SKB_DATA_SOFTIRQ,
+	KM_USER0,
+	KM_USER1,
+	KM_BIO_SRC_IRQ,
+	KM_BIO_DST_IRQ,
+	KM_PTE0,
+	KM_PTE1,
+	KM_IRQ0,
+	KM_IRQ1,
+	KM_SOFTIRQ0,
+	KM_SOFTIRQ1,
+	KM_PPC_SYNC_PAGE,
+	KM_PPC_SYNC_ICACHE,
+	KM_TYPE_NR
+};
+
+#endif	/* __KERNEL__ */
+#endif	/* _ASM_POWERPC_KMAP_TYPES_H */
diff --git a/include/asm-ppc64/kprobes.h b/include/asm-powerpc/kprobes.h
similarity index 94%
rename from include/asm-ppc64/kprobes.h
rename to include/asm-powerpc/kprobes.h
index d9129d2..b2f09f1 100644
--- a/include/asm-ppc64/kprobes.h
+++ b/include/asm-powerpc/kprobes.h
@@ -1,8 +1,7 @@
-#ifndef _ASM_KPROBES_H
-#define _ASM_KPROBES_H
+#ifndef _ASM_POWERPC_KPROBES_H
+#define _ASM_POWERPC_KPROBES_H
 /*
  *  Kernel Probes (KProbes)
- *  include/asm-ppc64/kprobes.h
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -64,4 +63,4 @@
 	return 0;
 }
 #endif
-#endif				/* _ASM_KPROBES_H */
+#endif	/* _ASM_POWERPC_KPROBES_H */
diff --git a/arch/ppc64/kernel/mpic.h b/include/asm-powerpc/mpic.h
similarity index 96%
rename from arch/ppc64/kernel/mpic.h
rename to include/asm-powerpc/mpic.h
index ca78a7f..6b558ae 100644
--- a/arch/ppc64/kernel/mpic.h
+++ b/include/asm-powerpc/mpic.h
@@ -1,3 +1,6 @@
+#ifndef _ASM_POWERPC_MPIC_H
+#define _ASM_POWERPC_MPIC_H
+
 #include <linux/irq.h>
 
 /*
@@ -258,6 +261,12 @@
 /* Clean up for kexec (or cpu offline or ...) */
 extern void mpic_teardown_this_cpu(int secondary);
 
+/* Get the current cpu priority for this cpu (0..15) */
+extern int mpic_cpu_get_priority(void);
+
+/* Set the current cpu priority for this cpu */
+extern void mpic_cpu_set_priority(int prio);
+
 /* Request IPIs on primary mpic */
 extern void mpic_request_ipis(void);
 
@@ -271,3 +280,5 @@
 
 /* global mpic for pSeries */
 extern struct mpic *pSeries_mpic;
+
+#endif	/* _ASM_POWERPC_MPIC_H */
diff --git a/include/asm-ppc64/oprofile_impl.h b/include/asm-powerpc/oprofile_impl.h
similarity index 83%
rename from include/asm-ppc64/oprofile_impl.h
rename to include/asm-powerpc/oprofile_impl.h
index b04f1df..8013cd2 100644
--- a/include/asm-ppc64/oprofile_impl.h
+++ b/include/asm-powerpc/oprofile_impl.h
@@ -9,39 +9,49 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#ifndef OP_IMPL_H
-#define OP_IMPL_H 1
+#ifndef _ASM_POWERPC_OPROFILE_IMPL_H
+#define _ASM_POWERPC_OPROFILE_IMPL_H
 
 #define OP_MAX_COUNTER 8
 
 /* Per-counter configuration as set via oprofilefs.  */
 struct op_counter_config {
+#ifdef __powerpc64__
 	unsigned long valid;
+#endif
 	unsigned long enabled;
 	unsigned long event;
 	unsigned long count;
 	unsigned long kernel;
+#ifdef __powerpc64__
 	/* We dont support per counter user/kernel selection */
+#endif
 	unsigned long user;
 	unsigned long unit_mask;
 };
 
 /* System-wide configuration as set via oprofilefs.  */
 struct op_system_config {
+#ifdef __powerpc64__
 	unsigned long mmcr0;
 	unsigned long mmcr1;
 	unsigned long mmcra;
+#endif
 	unsigned long enable_kernel;
 	unsigned long enable_user;
+#ifdef __powerpc64__
 	unsigned long backtrace_spinlocks;
+#endif
 };
 
 /* Per-arch configuration */
-struct op_ppc64_model {
+struct op_powerpc_model {
 	void (*reg_setup) (struct op_counter_config *,
 			   struct op_system_config *,
 			   int num_counters);
+#ifdef __powerpc64__
 	void (*cpu_setup) (void *);
+#endif
 	void (*start) (struct op_counter_config *);
 	void (*stop) (void);
 	void (*handle_interrupt) (struct pt_regs *,
@@ -49,8 +59,9 @@
 	int num_counters;
 };
 
-extern struct op_ppc64_model op_model_rs64;
-extern struct op_ppc64_model op_model_power4;
+#ifdef __powerpc64__
+extern struct op_powerpc_model op_model_rs64;
+extern struct op_powerpc_model op_model_power4;
 
 static inline unsigned int ctr_read(unsigned int i)
 {
@@ -107,5 +118,6 @@
 		break;
 	}
 }
+#endif /* __powerpc64__ */
 
-#endif
+#endif /* _ASM_POWERPC_OPROFILE_IMPL_H */
diff --git a/include/asm-ppc64/posix_types.h b/include/asm-powerpc/posix_types.h
similarity index 87%
rename from include/asm-ppc64/posix_types.h
rename to include/asm-powerpc/posix_types.h
index 516de72..c639107 100644
--- a/include/asm-ppc64/posix_types.h
+++ b/include/asm-powerpc/posix_types.h
@@ -1,44 +1,54 @@
-#ifndef _PPC64_POSIX_TYPES_H
-#define _PPC64_POSIX_TYPES_H
+#ifndef _ASM_POWERPC_POSIX_TYPES_H
+#define _ASM_POWERPC_POSIX_TYPES_H
 
 /*
  * This file is generally used by user-level software, so you need to
  * be a little careful about namespace pollution etc.  Also, we cannot
  * assume GCC is being used.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
 typedef unsigned long	__kernel_ino_t;
-typedef unsigned long  	__kernel_nlink_t;
 typedef unsigned int	__kernel_mode_t;
 typedef long		__kernel_off_t;
-typedef long long	__kernel_loff_t;
 typedef int		__kernel_pid_t;
-typedef int             __kernel_ipc_pid_t;
 typedef unsigned int	__kernel_uid_t;
 typedef unsigned int	__kernel_gid_t;
-typedef unsigned long	__kernel_size_t;
-typedef long		__kernel_ssize_t;
 typedef long		__kernel_ptrdiff_t;
 typedef long		__kernel_time_t;
+typedef long		__kernel_clock_t;
 typedef int		__kernel_timer_t;
 typedef int		__kernel_clockid_t;
 typedef long		__kernel_suseconds_t;
-typedef long		__kernel_clock_t;
 typedef int		__kernel_daddr_t;
 typedef char *		__kernel_caddr_t;
 typedef unsigned short	__kernel_uid16_t;
 typedef unsigned short	__kernel_gid16_t;
 typedef unsigned int	__kernel_uid32_t;
 typedef unsigned int	__kernel_gid32_t;
-
 typedef unsigned int	__kernel_old_uid_t;
 typedef unsigned int	__kernel_old_gid_t;
+
+#ifdef __powerpc64__
+typedef unsigned long  	__kernel_nlink_t;
+typedef int             __kernel_ipc_pid_t;
+typedef unsigned long	__kernel_size_t;
+typedef long		__kernel_ssize_t;
 typedef unsigned long	__kernel_old_dev_t;
+#else
+typedef unsigned short	__kernel_nlink_t;
+typedef short		__kernel_ipc_pid_t;
+typedef unsigned int	__kernel_size_t;
+typedef int		__kernel_ssize_t;
+typedef unsigned int	__kernel_old_dev_t;
+#endif
+
+#ifdef __powerpc64__
+typedef long long	__kernel_loff_t;
+#else
+#ifdef __GNUC__
+typedef long long	__kernel_loff_t;
+#endif
+#endif
 
 typedef struct {
 	int	val[2];
@@ -116,4 +126,4 @@
 
 #endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
 #endif /* __GNUC__ */
-#endif /* _PPC64_POSIX_TYPES_H */
+#endif /* _ASM_POWERPC_POSIX_TYPES_H */
diff --git a/arch/ppc64/kernel/pci.h b/include/asm-powerpc/ppc-pci.h
similarity index 93%
rename from arch/ppc64/kernel/pci.h
rename to include/asm-powerpc/ppc-pci.h
index 5eb2cc3..a88728f 100644
--- a/arch/ppc64/kernel/pci.h
+++ b/include/asm-powerpc/ppc-pci.h
@@ -6,8 +6,8 @@
  *      as published by the Free Software Foundation; either version
  *      2 of the License, or (at your option) any later version.
  */
-#ifndef __PPC_KERNEL_PCI_H__
-#define __PPC_KERNEL_PCI_H__
+#ifndef _ASM_POWERPC_PPC_PCI_H
+#define _ASM_POWERPC_PPC_PCI_H
 
 #include <linux/pci.h>
 #include <asm/pci-bridge.h>
@@ -51,4 +51,4 @@
 extern unsigned long pci_assign_all_buses;
 extern int pci_read_irq_line(struct pci_dev *pci_dev);
 
-#endif /* __PPC_KERNEL_PCI_H__ */
+#endif /* _ASM_POWERPC_PPC_PCI_H */
diff --git a/include/asm-ppc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
similarity index 61%
rename from include/asm-ppc/ppc_asm.h
rename to include/asm-powerpc/ppc_asm.h
index bb53e2d..4efa718 100644
--- a/include/asm-ppc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -1,37 +1,39 @@
 /*
- * include/asm-ppc/ppc_asm.h
- *
- * Definitions used by various bits of low-level assembly code on PowerPC.
- *
  * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
  */
 
-#include <linux/config.h>
+#ifndef _ASM_POWERPC_PPC_ASM_H
+#define _ASM_POWERPC_PPC_ASM_H
+
+#ifdef __ASSEMBLY__
 
 /*
  * Macros for storing registers into and loading registers from
  * exception frames.
  */
+#ifdef __powerpc64__
+#define SAVE_GPR(n, base)	std	n,GPR0+8*(n)(base)
+#define REST_GPR(n, base)	ld	n,GPR0+8*(n)(base)
+#define SAVE_NVGPRS(base)	SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
+#define REST_NVGPRS(base)	REST_8GPRS(14, base); REST_10GPRS(22, base)
+#else
 #define SAVE_GPR(n, base)	stw	n,GPR0+4*(n)(base)
-#define SAVE_2GPRS(n, base)	SAVE_GPR(n, base); SAVE_GPR(n+1, base)
-#define SAVE_4GPRS(n, base)	SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
-#define SAVE_8GPRS(n, base)	SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
-#define SAVE_10GPRS(n, base)	SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
 #define REST_GPR(n, base)	lwz	n,GPR0+4*(n)(base)
-#define REST_2GPRS(n, base)	REST_GPR(n, base); REST_GPR(n+1, base)
-#define REST_4GPRS(n, base)	REST_2GPRS(n, base); REST_2GPRS(n+2, base)
-#define REST_8GPRS(n, base)	REST_4GPRS(n, base); REST_4GPRS(n+4, base)
-#define REST_10GPRS(n, base)	REST_8GPRS(n, base); REST_2GPRS(n+8, base)
-
 #define SAVE_NVGPRS(base)	SAVE_GPR(13, base); SAVE_8GPRS(14, base); \
 				SAVE_10GPRS(22, base)
 #define REST_NVGPRS(base)	REST_GPR(13, base); REST_8GPRS(14, base); \
 				REST_10GPRS(22, base)
+#endif
+
+
+#define SAVE_2GPRS(n, base)	SAVE_GPR(n, base); SAVE_GPR(n+1, base)
+#define SAVE_4GPRS(n, base)	SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
+#define SAVE_8GPRS(n, base)	SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
+#define SAVE_10GPRS(n, base)	SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
+#define REST_2GPRS(n, base)	REST_GPR(n, base); REST_GPR(n+1, base)
+#define REST_4GPRS(n, base)	REST_2GPRS(n, base); REST_2GPRS(n+2, base)
+#define REST_8GPRS(n, base)	REST_4GPRS(n, base); REST_4GPRS(n+4, base)
+#define REST_10GPRS(n, base)	REST_8GPRS(n, base); REST_2GPRS(n+8, base)
 
 #define SAVE_FPR(n, base)	stfd	n,THREAD_FPR0+8*(n)(base)
 #define SAVE_2FPRS(n, base)	SAVE_FPR(n, base); SAVE_FPR(n+1, base)
@@ -47,32 +49,83 @@
 #define REST_32FPRS(n, base)	REST_16FPRS(n, base); REST_16FPRS(n+16, base)
 
 #define SAVE_VR(n,b,base)	li b,THREAD_VR0+(16*(n));  stvx n,b,base
-#define SAVE_2VR(n,b,base)	SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
-#define SAVE_4VR(n,b,base)	SAVE_2VR(n,b,base); SAVE_2VR(n+2,b,base)
-#define SAVE_8VR(n,b,base)	SAVE_4VR(n,b,base); SAVE_4VR(n+4,b,base)
-#define SAVE_16VR(n,b,base)	SAVE_8VR(n,b,base); SAVE_8VR(n+8,b,base)
-#define SAVE_32VR(n,b,base)	SAVE_16VR(n,b,base); SAVE_16VR(n+16,b,base)
+#define SAVE_2VRS(n,b,base)	SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
+#define SAVE_4VRS(n,b,base)	SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
+#define SAVE_8VRS(n,b,base)	SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
+#define SAVE_16VRS(n,b,base)	SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
+#define SAVE_32VRS(n,b,base)	SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
 #define REST_VR(n,b,base)	li b,THREAD_VR0+(16*(n)); lvx n,b,base
-#define REST_2VR(n,b,base)	REST_VR(n,b,base); REST_VR(n+1,b,base)
-#define REST_4VR(n,b,base)	REST_2VR(n,b,base); REST_2VR(n+2,b,base)
-#define REST_8VR(n,b,base)	REST_4VR(n,b,base); REST_4VR(n+4,b,base)
-#define REST_16VR(n,b,base)	REST_8VR(n,b,base); REST_8VR(n+8,b,base)
-#define REST_32VR(n,b,base)	REST_16VR(n,b,base); REST_16VR(n+16,b,base)
+#define REST_2VRS(n,b,base)	REST_VR(n,b,base); REST_VR(n+1,b,base)
+#define REST_4VRS(n,b,base)	REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
+#define REST_8VRS(n,b,base)	REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
+#define REST_16VRS(n,b,base)	REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
+#define REST_32VRS(n,b,base)	REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
 
 #define SAVE_EVR(n,s,base)	evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base)
-#define SAVE_2EVR(n,s,base)	SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base)
-#define SAVE_4EVR(n,s,base)	SAVE_2EVR(n,s,base); SAVE_2EVR(n+2,s,base)
-#define SAVE_8EVR(n,s,base)	SAVE_4EVR(n,s,base); SAVE_4EVR(n+4,s,base)
-#define SAVE_16EVR(n,s,base)	SAVE_8EVR(n,s,base); SAVE_8EVR(n+8,s,base)
-#define SAVE_32EVR(n,s,base)	SAVE_16EVR(n,s,base); SAVE_16EVR(n+16,s,base)
-
+#define SAVE_2EVRS(n,s,base)	SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base)
+#define SAVE_4EVRS(n,s,base)	SAVE_2EVRS(n,s,base); SAVE_2EVRS(n+2,s,base)
+#define SAVE_8EVRS(n,s,base)	SAVE_4EVRS(n,s,base); SAVE_4EVRS(n+4,s,base)
+#define SAVE_16EVRS(n,s,base)	SAVE_8EVRS(n,s,base); SAVE_8EVRS(n+8,s,base)
+#define SAVE_32EVRS(n,s,base)	SAVE_16EVRS(n,s,base); SAVE_16EVRS(n+16,s,base)
 #define REST_EVR(n,s,base)	lwz s,THREAD_EVR0+4*(n)(base); evmergelo n,s,n
-#define REST_2EVR(n,s,base)	REST_EVR(n,s,base); REST_EVR(n+1,s,base)
-#define REST_4EVR(n,s,base)	REST_2EVR(n,s,base); REST_2EVR(n+2,s,base)
-#define REST_8EVR(n,s,base)	REST_4EVR(n,s,base); REST_4EVR(n+4,s,base)
-#define REST_16EVR(n,s,base)	REST_8EVR(n,s,base); REST_8EVR(n+8,s,base)
-#define REST_32EVR(n,s,base)	REST_16EVR(n,s,base); REST_16EVR(n+16,s,base)
+#define REST_2EVRS(n,s,base)	REST_EVR(n,s,base); REST_EVR(n+1,s,base)
+#define REST_4EVRS(n,s,base)	REST_2EVRS(n,s,base); REST_2EVRS(n+2,s,base)
+#define REST_8EVRS(n,s,base)	REST_4EVRS(n,s,base); REST_4EVRS(n+4,s,base)
+#define REST_16EVRS(n,s,base)	REST_8EVRS(n,s,base); REST_8EVRS(n+8,s,base)
+#define REST_32EVRS(n,s,base)	REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base)
 
+/* Macros to adjust thread priority for Iseries hardware multithreading */
+#define HMT_VERY_LOW    or   31,31,31	# very low priority\n"
+#define HMT_LOW		or 1,1,1
+#define HMT_MEDIUM_LOW  or   6,6,6	# medium low priority\n"
+#define HMT_MEDIUM	or 2,2,2
+#define HMT_MEDIUM_HIGH or   5,5,5	# medium high priority\n"
+#define HMT_HIGH	or 3,3,3
+
+/* handle instructions that older assemblers may not know */
+#define RFCI		.long 0x4c000066	/* rfci instruction */
+#define RFDI		.long 0x4c00004e	/* rfdi instruction */
+#define RFMCI		.long 0x4c00004c	/* rfmci instruction */
+
+/* 
+ * LOADADDR( rn, name )
+ *   loads the address of 'name' into 'rn'
+ *
+ * LOADBASE( rn, name )
+ *   loads the address (less the low 16 bits) of 'name' into 'rn'
+ *   suitable for base+disp addressing
+ */
+#ifdef __powerpc64__
+#define LOADADDR(rn,name) \
+	lis	rn,name##@highest;	\
+	ori	rn,rn,name##@higher;	\
+	rldicr	rn,rn,32,31;		\
+	oris	rn,rn,name##@h;		\
+	ori	rn,rn,name##@l
+
+#define LOADBASE(rn,name) \
+	lis	rn,name@highest;	\
+	ori	rn,rn,name@higher;	\
+	rldicr	rn,rn,32,31;		\
+	oris	rn,rn,name@ha
+
+
+#define SET_REG_TO_CONST(reg, value)	         	\
+	lis     reg,(((value)>>48)&0xFFFF);             \
+	ori     reg,reg,(((value)>>32)&0xFFFF);         \
+	rldicr  reg,reg,32,31;                          \
+	oris    reg,reg,(((value)>>16)&0xFFFF);         \
+	ori     reg,reg,((value)&0xFFFF);
+
+#define SET_REG_TO_LABEL(reg, label)	         	\
+	lis     reg,(label)@highest;                    \
+	ori     reg,reg,(label)@higher;                 \
+	rldicr  reg,reg,32,31;                          \
+	oris    reg,reg,(label)@h;                      \
+	ori     reg,reg,(label)@l;
+#endif
+
+/* various errata or part fixups */
 #ifdef CONFIG_PPC601_SYNC_FIX
 #define SYNC				\
 BEGIN_FTR_SECTION			\
@@ -93,6 +146,7 @@
 #define ISYNC_601
 #endif
 
+
 #ifndef CONFIG_SMP
 #define TLBSYNC
 #else /* CONFIG_SMP */
@@ -104,6 +158,7 @@
 END_FTR_SECTION_IFCLR(CPU_FTR_601)
 #endif
 
+	
 /*
  * This instruction is not implemented on the PPC 603 or 601; however, on
  * the 403GCX and 405GP tlbia IS defined and tlbie is not.
@@ -121,14 +176,44 @@
 	bdnz	0b
 #endif
 
-#ifdef CONFIG_BOOKE
+
+#ifdef CONFIG_IBM405_ERR77
+#define PPC405_ERR77(ra,rb)	dcbt	ra, rb;
+#define	PPC405_ERR77_SYNC	sync;
+#else
+#define PPC405_ERR77(ra,rb)
+#define PPC405_ERR77_SYNC
+#endif
+
+
+#ifdef CONFIG_IBM440EP_ERR42
+#define PPC440EP_ERR42 isync
+#else
+#define PPC440EP_ERR42
+#endif
+
+
+#if defined(CONFIG_BOOKE)
 #define tophys(rd,rs)				\
 	addis	rd,rs,0
 
 #define tovirt(rd,rs)				\
 	addis	rd,rs,0
 
-#else  /* CONFIG_BOOKE */
+#elif defined(CONFIG_PPC64)
+/* PPPBBB - DRENG  If KERNELBASE is always 0xC0...,
+ * Then we can easily do this with one asm insn. -Peter
+ */
+#define tophys(rd,rs)                           \
+        lis     rd,((KERNELBASE>>48)&0xFFFF);   \
+        rldicr  rd,rd,32,31;                    \
+        sub     rd,rs,rd
+
+#define tovirt(rd,rs)                           \
+        lis     rd,((KERNELBASE>>48)&0xFFFF);   \
+        rldicr  rd,rd,32,31;                    \
+        add     rd,rs,rd
+#else
 /*
  * On APUS (Amiga PowerPC cpu upgrade board), we don't know the
  * physical base address of RAM at compile time.
@@ -146,14 +231,14 @@
 	.align  1;				\
 	.long   0b;				\
 	.previous
-#endif  /* CONFIG_BOOKE */
+#endif
 
 /*
  * On 64-bit cpus, we use the rfid instruction instead of rfi, but
  * we then have to make sure we preserve the top 32 bits except for
  * the 64-bit mode bit, which we clear.
  */
-#ifdef CONFIG_PPC64BRIDGE
+#if defined(CONFIG_PPC64BRIDGE)
 #define	FIX_SRR1(ra, rb)	\
 	mr	rb,ra;		\
 	mfmsr	ra;		\
@@ -162,6 +247,17 @@
 #define	RFI		.long	0x4c000024	/* rfid instruction */
 #define MTMSRD(r)	.long	(0x7c000164 + ((r) << 21))	/* mtmsrd */
 #define CLR_TOP32(r)	rlwinm	(r),(r),0,0,31	/* clear top 32 bits */
+#elif defined(CONFIG_PPC64)
+/* Insert the high 32 bits of the MSR into what will be the new
+   MSR (via SRR1 and rfid)  This preserves the MSR.SF and MSR.ISF
+   bits. */
+
+#define FIX_SRR1(ra, rb)	\
+	mr	rb,ra;		\
+	mfmsr	ra;		\
+	rldimi	ra,rb,0,32
+
+#define CLR_TOP32(r)	rlwinm	(r),(r),0,0,31	/* clear top 32 bits */
 
 #else
 #define FIX_SRR1(ra, rb)
@@ -172,24 +268,6 @@
 #endif
 #define MTMSRD(r)	mtmsr	r
 #define CLR_TOP32(r)
-#endif /* CONFIG_PPC64BRIDGE */
-
-#define RFCI		.long 0x4c000066	/* rfci instruction */
-#define RFDI		.long 0x4c00004e	/* rfdi instruction */
-#define RFMCI		.long 0x4c00004c	/* rfmci instruction */
-
-#ifdef CONFIG_IBM405_ERR77
-#define PPC405_ERR77(ra,rb)	dcbt	ra, rb;
-#define	PPC405_ERR77_SYNC	sync;
-#else
-#define PPC405_ERR77(ra,rb)
-#define PPC405_ERR77_SYNC
-#endif
-
-#ifdef CONFIG_IBM440EP_ERR42
-#define PPC440EP_ERR42 isync
-#else
-#define PPC440EP_ERR42
 #endif
 
 /* The boring bits... */
@@ -277,6 +355,8 @@
 #define	fr30	30
 #define	fr31	31
 
+/* AltiVec Registers (VPRs) */
+
 #define	vr0	0
 #define	vr1	1
 #define	vr2	2
@@ -310,6 +390,8 @@
 #define	vr30	30
 #define	vr31	31
 
+/* SPE Registers (EVPRs) */
+
 #define	evr0	0
 #define	evr1	1
 #define	evr2	2
@@ -348,3 +430,11 @@
 #define N_RSYM	64
 #define N_SLINE	68
 #define N_SO	100
+
+#define ASM_CONST(x) x
+#else
+  #define __ASM_CONST(x) x##UL
+  #define ASM_CONST(x) __ASM_CONST(x)
+#endif /*  __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/include/asm-ppc/reg.h b/include/asm-powerpc/reg.h
similarity index 97%
rename from include/asm-ppc/reg.h
rename to include/asm-powerpc/reg.h
index 73c33e3..1402a2d 100644
--- a/include/asm-ppc/reg.h
+++ b/include/asm-powerpc/reg.h
@@ -6,9 +6,9 @@
  * Implementations of the PowerPC Architecture (a.k.a. Green Book) here.
  */
 
+#ifndef _ASM_POWERPC_REGS_H
+#define _ASM_POWERPC_REGS_H
 #ifdef __KERNEL__
-#ifndef __ASM_PPC_REGS_H__
-#define __ASM_PPC_REGS_H__
 
 #include <linux/stringify.h>
 
@@ -60,7 +60,7 @@
 #define FPSCR_VX	0x20000000	/* Invalid operation summary */
 #define FPSCR_OX	0x10000000	/* Overflow exception summary */
 #define FPSCR_UX	0x08000000	/* Underflow exception summary */
-#define FPSCR_ZX	0x04000000	/* Zero-devide exception summary */
+#define FPSCR_ZX	0x04000000	/* Zero-divide exception summary */
 #define FPSCR_XX	0x02000000	/* Inexact exception summary */
 #define FPSCR_VXSNAN	0x01000000	/* Invalid op for SNaN */
 #define FPSCR_VXISI	0x00800000	/* Invalid op for Inv - Inv */
@@ -86,7 +86,14 @@
 /* Special Purpose Registers (SPRNs)*/
 #define SPRN_CTR	0x009	/* Count Register */
 #define SPRN_DABR	0x3F5	/* Data Address Breakpoint Register */
+#define   DABR_TRANSLATION	(1UL << 2)
 #define SPRN_DAR	0x013	/* Data Address Register */
+#define	SPRN_DSISR	0x012	/* Data Storage Interrupt Status Register */
+#define   DSISR_NOHPTE		0x40000000	/* no translation found */
+#define   DSISR_PROTFAULT	0x08000000	/* protection fault */
+#define   DSISR_ISSTORE		0x02000000	/* access was a store */
+#define   DSISR_DABRMATCH	0x00400000	/* hit data breakpoint */
+#define   DSISR_NOSEGMENT	0x00200000	/* STAB/SLB miss */
 #define SPRN_TBRL	0x10C	/* Time Base Read Lower Register (user, R/O) */
 #define SPRN_TBRU	0x10D	/* Time Base Read Upper Register (user, R/O) */
 #define SPRN_TBWL	0x11C	/* Time Base Lower Register (super, R/W) */
@@ -131,7 +138,6 @@
 #define DER_EBRKE	0x00000002	/* External Breakpoint Interrupt */
 #define DER_DPIE	0x00000001	/* Dev. Port Nonmaskable Request */
 #define SPRN_DMISS	0x3D0		/* Data TLB Miss Register */
-#define SPRN_DSISR	0x012	/* Data Storage Interrupt Status Register */
 #define SPRN_EAR	0x11A		/* External Address Register */
 #define SPRN_HASH1	0x3D2		/* Primary Hash Address Register */
 #define SPRN_HASH2	0x3D3		/* Secondary Hash Address Resgister */
@@ -436,5 +442,5 @@
 
 #define proc_trap()	asm volatile("trap")
 #endif /* __ASSEMBLY__ */
-#endif /* __ASM_PPC_REGS_H__ */
 #endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_REGS_H */
diff --git a/include/asm-ppc64/rwsem.h b/include/asm-powerpc/rwsem.h
similarity index 90%
rename from include/asm-ppc64/rwsem.h
rename to include/asm-powerpc/rwsem.h
index bd5c2f0..0a5b83a 100644
--- a/include/asm-ppc64/rwsem.h
+++ b/include/asm-powerpc/rwsem.h
@@ -1,18 +1,14 @@
+#ifndef _ASM_POWERPC_RWSEM_H
+#define _ASM_POWERPC_RWSEM_H
+
+#ifdef __KERNEL__
+
 /*
  * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff
  * in lib/rwsem.c.  Adapted largely from include/asm-i386/rwsem.h
  * by Paul Mackerras <paulus@samba.org>.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
-#ifndef _PPC64_RWSEM_H
-#define _PPC64_RWSEM_H
-
-#ifdef __KERNEL__
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <asm/atomic.h>
@@ -163,5 +159,5 @@
 	return atomic_add_return(delta, (atomic_t *)(&sem->count));
 }
 
-#endif /* __KERNEL__ */
-#endif /* _PPC_RWSEM_XADD_H */
+#endif	/* __KERNEL__ */
+#endif	/* _ASM_POWERPC_RWSEM_H */
diff --git a/include/asm-ppc64/seccomp.h b/include/asm-powerpc/seccomp.h
similarity index 66%
rename from include/asm-ppc64/seccomp.h
rename to include/asm-powerpc/seccomp.h
index c130c33..1e1cfe1 100644
--- a/include/asm-ppc64/seccomp.h
+++ b/include/asm-powerpc/seccomp.h
@@ -1,11 +1,6 @@
-#ifndef _ASM_SECCOMP_H
+#ifndef _ASM_POWERPC_SECCOMP_H
 
-#include <linux/thread_info.h> /* already defines TIF_32BIT */
-
-#ifndef TIF_32BIT
-#error "unexpected TIF_32BIT on ppc64"
-#endif
-
+#include <linux/thread_info.h>
 #include <linux/unistd.h>
 
 #define __NR_seccomp_read __NR_read
@@ -18,4 +13,4 @@
 #define __NR_seccomp_exit_32 __NR_exit
 #define __NR_seccomp_sigreturn_32 __NR_sigreturn
 
-#endif /* _ASM_SECCOMP_H */
+#endif	/* _ASM_POWERPC_SECCOMP_H */
diff --git a/include/asm-powerpc/sections.h b/include/asm-powerpc/sections.h
new file mode 100644
index 0000000..47be2ac
--- /dev/null
+++ b/include/asm-powerpc/sections.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_POWERPC_SECTIONS_H
+#define _ASM_POWERPC_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+#ifdef __powerpc64__
+
+extern char _end[];
+
+static inline int in_kernel_text(unsigned long addr)
+{
+	if (addr >= (unsigned long)_stext && addr < (unsigned long)__init_end)
+		return 1;
+
+	return 0;
+}
+
+#endif
+
+#endif	/* _ASM_POWERPC_SECTIONS_H */
diff --git a/include/asm-ppc64/semaphore.h b/include/asm-powerpc/semaphore.h
similarity index 95%
rename from include/asm-ppc64/semaphore.h
rename to include/asm-powerpc/semaphore.h
index aefe775..fd42fe9 100644
--- a/include/asm-ppc64/semaphore.h
+++ b/include/asm-powerpc/semaphore.h
@@ -1,5 +1,5 @@
-#ifndef _PPC64_SEMAPHORE_H
-#define _PPC64_SEMAPHORE_H
+#ifndef _ASM_POWERPC_SEMAPHORE_H
+#define _ASM_POWERPC_SEMAPHORE_H
 
 /*
  * Remove spinlock-based RW semaphores; RW semaphore definitions are
@@ -95,4 +95,4 @@
 
 #endif /* __KERNEL__ */
 
-#endif /* !(_PPC64_SEMAPHORE_H) */
+#endif /* _ASM_POWERPC_SEMAPHORE_H */
diff --git a/include/asm-ppc64/spinlock_types.h b/include/asm-powerpc/spinlock_types.h
similarity index 79%
rename from include/asm-ppc64/spinlock_types.h
rename to include/asm-powerpc/spinlock_types.h
index a37c8ea..74236c9 100644
--- a/include/asm-ppc64/spinlock_types.h
+++ b/include/asm-powerpc/spinlock_types.h
@@ -1,5 +1,5 @@
-#ifndef __ASM_SPINLOCK_TYPES_H
-#define __ASM_SPINLOCK_TYPES_H
+#ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
+#define _ASM_POWERPC_SPINLOCK_TYPES_H
 
 #ifndef __LINUX_SPINLOCK_TYPES_H
 # error "please don't include this file directly"
diff --git a/include/asm-ppc64/statfs.h b/include/asm-powerpc/statfs.h
similarity index 69%
rename from include/asm-ppc64/statfs.h
rename to include/asm-powerpc/statfs.h
index 3c985e5..6702402 100644
--- a/include/asm-ppc64/statfs.h
+++ b/include/asm-powerpc/statfs.h
@@ -1,12 +1,11 @@
-#ifndef _PPC64_STATFS_H
-#define _PPC64_STATFS_H
+#ifndef _ASM_POWERPC_STATFS_H
+#define _ASM_POWERPC_STATFS_H
 
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
+/* For ppc32 we just use the generic definitions, not so simple on ppc64 */
+
+#ifndef __powerpc64__
+#include <asm-generic/statfs.h>
+#else
 
 #ifndef __KERNEL_STRICT_NAMES
 #include <linux/types.h>
@@ -57,5 +56,5 @@
 	__u32 f_frsize;
 	__u32 f_spare[5];
 };
-
-#endif  /* _PPC64_STATFS_H */
+#endif /* ! __powerpc64__ */
+#endif
diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h
new file mode 100644
index 0000000..4660c03
--- /dev/null
+++ b/include/asm-powerpc/synch.h
@@ -0,0 +1,51 @@
+#ifndef _ASM_POWERPC_SYNCH_H 
+#define _ASM_POWERPC_SYNCH_H 
+
+#include <linux/config.h>
+
+#ifdef __powerpc64__
+#define __SUBARCH_HAS_LWSYNC
+#endif
+
+#ifdef __SUBARCH_HAS_LWSYNC
+#    define LWSYNC	lwsync
+#else
+#    define LWSYNC	sync
+#endif
+
+
+/*
+ * Arguably the bitops and *xchg operations don't imply any memory barrier
+ * or SMP ordering, but in fact a lot of drivers expect them to imply
+ * both, since they do on x86 cpus.
+ */
+#ifdef CONFIG_SMP
+#define EIEIO_ON_SMP	"eieio\n"
+#define ISYNC_ON_SMP	"\n\tisync"
+#define SYNC_ON_SMP	__stringify(LWSYNC) "\n"
+#else
+#define EIEIO_ON_SMP
+#define ISYNC_ON_SMP
+#define SYNC_ON_SMP
+#endif
+
+static inline void eieio(void)
+{
+	__asm__ __volatile__ ("eieio" : : : "memory");
+}
+
+static inline void isync(void)
+{
+	__asm__ __volatile__ ("isync" : : : "memory");
+}
+
+#ifdef CONFIG_SMP
+#define eieio_on_smp()	eieio()
+#define isync_on_smp()	isync()
+#else
+#define eieio_on_smp()	__asm__ __volatile__("": : :"memory")
+#define isync_on_smp()	__asm__ __volatile__("": : :"memory")
+#endif
+
+#endif	/* _ASM_POWERPC_SYNCH_H */
+
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
new file mode 100644
index 0000000..1b64879
--- /dev/null
+++ b/include/asm-powerpc/system.h
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_SYSTEM_H
+#define _ASM_POWERPC_SYSTEM_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+
+#include <asm/hw_irq.h>
+#include <asm/ppc_asm.h>
+
+/*
+ * Memory barrier.
+ * The sync instruction guarantees that all memory accesses initiated
+ * by this processor have been performed (with respect to all other
+ * mechanisms that access memory).  The eieio instruction is a barrier
+ * providing an ordering (separately) for (a) cacheable stores and (b)
+ * loads and stores to non-cacheable memory (e.g. I/O devices).
+ *
+ * mb() prevents loads and stores being reordered across this point.
+ * rmb() prevents loads being reordered across this point.
+ * wmb() prevents stores being reordered across this point.
+ * read_barrier_depends() prevents data-dependent loads being reordered
+ *	across this point (nop on PPC).
+ *
+ * We have to use the sync instructions for mb(), since lwsync doesn't
+ * order loads with respect to previous stores.  Lwsync is fine for
+ * rmb(), though.  Note that lwsync is interpreted as sync by
+ * 32-bit and older 64-bit CPUs.
+ *
+ * For wmb(), we use sync since wmb is used in drivers to order
+ * stores to system memory with respect to writes to the device.
+ * However, smp_wmb() can be a lighter-weight eieio barrier on
+ * SMP since it is only used to order updates to system memory.
+ */
+#define mb()   __asm__ __volatile__ ("sync" : : : "memory")
+#define rmb()  __asm__ __volatile__ ("lwsync" : : : "memory")
+#define wmb()  __asm__ __volatile__ ("sync" : : : "memory")
+#define read_barrier_depends()  do { } while(0)
+
+#define set_mb(var, value)	do { var = value; mb(); } while (0)
+#define set_wmb(var, value)	do { var = value; wmb(); } while (0)
+
+#ifdef CONFIG_SMP
+#define smp_mb()	mb()
+#define smp_rmb()	rmb()
+#define smp_wmb()	__asm__ __volatile__ ("eieio" : : : "memory")
+#define smp_read_barrier_depends()	read_barrier_depends()
+#else
+#define smp_mb()	barrier()
+#define smp_rmb()	barrier()
+#define smp_wmb()	barrier()
+#define smp_read_barrier_depends()	do { } while(0)
+#endif /* CONFIG_SMP */
+
+#ifdef __KERNEL__
+struct task_struct;
+struct pt_regs;
+
+#ifdef CONFIG_DEBUGGER
+
+extern int (*__debugger)(struct pt_regs *regs);
+extern int (*__debugger_ipi)(struct pt_regs *regs);
+extern int (*__debugger_bpt)(struct pt_regs *regs);
+extern int (*__debugger_sstep)(struct pt_regs *regs);
+extern int (*__debugger_iabr_match)(struct pt_regs *regs);
+extern int (*__debugger_dabr_match)(struct pt_regs *regs);
+extern int (*__debugger_fault_handler)(struct pt_regs *regs);
+
+#define DEBUGGER_BOILERPLATE(__NAME) \
+static inline int __NAME(struct pt_regs *regs) \
+{ \
+	if (unlikely(__ ## __NAME)) \
+		return __ ## __NAME(regs); \
+	return 0; \
+}
+
+DEBUGGER_BOILERPLATE(debugger)
+DEBUGGER_BOILERPLATE(debugger_ipi)
+DEBUGGER_BOILERPLATE(debugger_bpt)
+DEBUGGER_BOILERPLATE(debugger_sstep)
+DEBUGGER_BOILERPLATE(debugger_iabr_match)
+DEBUGGER_BOILERPLATE(debugger_dabr_match)
+DEBUGGER_BOILERPLATE(debugger_fault_handler)
+
+#ifdef CONFIG_XMON
+extern void xmon_init(int enable);
+#endif
+
+#else
+static inline int debugger(struct pt_regs *regs) { return 0; }
+static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
+static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
+static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
+static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
+static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
+static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
+#endif
+
+extern int set_dabr(unsigned long dabr);
+extern void print_backtrace(unsigned long *);
+extern void show_regs(struct pt_regs * regs);
+extern void flush_instruction_cache(void);
+extern void hard_reset_now(void);
+extern void poweroff_now(void);
+
+#ifdef CONFIG_6xx
+extern long _get_L2CR(void);
+extern long _get_L3CR(void);
+extern void _set_L2CR(unsigned long);
+extern void _set_L3CR(unsigned long);
+#else
+#define _get_L2CR()	0L
+#define _get_L3CR()	0L
+#define _set_L2CR(val)	do { } while(0)
+#define _set_L3CR(val)	do { } while(0)
+#endif
+
+extern void via_cuda_init(void);
+extern void pmac_nvram_init(void);
+extern void read_rtc_time(void);
+extern void pmac_find_display(void);
+extern void giveup_fpu(struct task_struct *);
+extern void enable_kernel_fp(void);
+extern void flush_fp_to_thread(struct task_struct *);
+extern void enable_kernel_altivec(void);
+extern void giveup_altivec(struct task_struct *);
+extern void load_up_altivec(struct task_struct *);
+extern void giveup_spe(struct task_struct *);
+extern void load_up_spe(struct task_struct *);
+extern int fix_alignment(struct pt_regs *);
+extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
+extern void cvt_df(double *from, float *to, unsigned long *fpscr);
+
+#ifdef CONFIG_ALTIVEC
+extern void flush_altivec_to_thread(struct task_struct *);
+#else
+static inline void flush_altivec_to_thread(struct task_struct *t)
+{
+}
+#endif
+
+#ifdef CONFIG_SPE
+extern void flush_spe_to_thread(struct task_struct *);
+#else
+static inline void flush_spe_to_thread(struct task_struct *t)
+{
+}
+#endif
+
+extern int call_rtas(const char *, int, int, unsigned long *, ...);
+extern void cacheable_memzero(void *p, unsigned int nb);
+extern void *cacheable_memcpy(void *, const void *, unsigned int);
+extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
+extern void bad_page_fault(struct pt_regs *, unsigned long, int);
+extern int die(const char *, struct pt_regs *, long);
+extern void _exception(int, struct pt_regs *, int, unsigned long);
+#ifdef CONFIG_BOOKE_WDT
+extern u32 booke_wdt_enabled;
+extern u32 booke_wdt_period;
+#endif /* CONFIG_BOOKE_WDT */
+
+/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
+extern unsigned char e2a(unsigned char);
+
+struct device_node;
+extern void note_scsi_host(struct device_node *, void *);
+
+extern struct task_struct *__switch_to(struct task_struct *,
+	struct task_struct *);
+#define switch_to(prev, next, last)	((last) = __switch_to((prev), (next)))
+
+struct thread_struct;
+extern struct task_struct *_switch(struct thread_struct *prev,
+				   struct thread_struct *next);
+
+extern unsigned int rtas_data;
+
+/*
+ * Atomic exchange
+ *
+ * Changes the memory location '*ptr' to be val and returns
+ * the previous value stored there.
+ */
+static __inline__ unsigned long
+__xchg_u32(volatile void *p, unsigned long val)
+{
+	unsigned long prev;
+
+	__asm__ __volatile__(
+	EIEIO_ON_SMP
+"1:	lwarx	%0,0,%2 \n"
+	PPC405_ERR77(0,%2)
+"	stwcx.	%3,0,%2 \n\
+	bne-	1b"
+	ISYNC_ON_SMP
+	: "=&r" (prev), "=m" (*(volatile unsigned int *)p)
+	: "r" (p), "r" (val), "m" (*(volatile unsigned int *)p)
+	: "cc", "memory");
+
+	return prev;
+}
+
+#ifdef CONFIG_PPC64
+static __inline__ unsigned long
+__xchg_u64(volatile void *p, unsigned long val)
+{
+	unsigned long prev;
+
+	__asm__ __volatile__(
+	EIEIO_ON_SMP
+"1:	ldarx	%0,0,%2 \n"
+	PPC405_ERR77(0,%2)
+"	stdcx.	%3,0,%2 \n\
+	bne-	1b"
+	ISYNC_ON_SMP
+	: "=&r" (prev), "=m" (*(volatile unsigned long *)p)
+	: "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
+	: "cc", "memory");
+
+	return prev;
+}
+#endif
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid xchg().
+ */
+extern void __xchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__xchg(volatile void *ptr, unsigned long x, unsigned int size)
+{
+	switch (size) {
+	case 4:
+		return __xchg_u32(ptr, x);
+#ifdef CONFIG_PPC64
+	case 8:
+		return __xchg_u64(ptr, x);
+#endif
+	}
+	__xchg_called_with_bad_pointer();
+	return x;
+}
+
+#define xchg(ptr,x)							     \
+  ({									     \
+     __typeof__(*(ptr)) _x_ = (x);					     \
+     (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
+  })
+
+#define tas(ptr) (xchg((ptr),1))
+
+/*
+ * Compare and exchange - if *p == old, set it to new,
+ * and return the old value of *p.
+ */
+#define __HAVE_ARCH_CMPXCHG	1
+
+static __inline__ unsigned long
+__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
+{
+	unsigned int prev;
+
+	__asm__ __volatile__ (
+	EIEIO_ON_SMP
+"1:	lwarx	%0,0,%2		# __cmpxchg_u32\n\
+	cmpw	0,%0,%3\n\
+	bne-	2f\n"
+	PPC405_ERR77(0,%2)
+"	stwcx.	%4,0,%2\n\
+	bne-	1b"
+	ISYNC_ON_SMP
+	"\n\
+2:"
+	: "=&r" (prev), "=m" (*p)
+	: "r" (p), "r" (old), "r" (new), "m" (*p)
+	: "cc", "memory");
+
+	return prev;
+}
+
+#ifdef CONFIG_PPC64
+static __inline__ unsigned long
+__cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new)
+{
+	unsigned long prev;
+
+	__asm__ __volatile__ (
+	EIEIO_ON_SMP
+"1:	ldarx	%0,0,%2		# __cmpxchg_u64\n\
+	cmpd	0,%0,%3\n\
+	bne-	2f\n\
+	stdcx.	%4,0,%2\n\
+	bne-	1b"
+	ISYNC_ON_SMP
+	"\n\
+2:"
+	: "=&r" (prev), "=m" (*p)
+	: "r" (p), "r" (old), "r" (new), "m" (*p)
+	: "cc", "memory");
+
+	return prev;
+}
+#endif
+
+/* This function doesn't exist, so you'll get a linker error
+   if something tries to do an invalid cmpxchg().  */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
+	  unsigned int size)
+{
+	switch (size) {
+	case 4:
+		return __cmpxchg_u32(ptr, old, new);
+#ifdef CONFIG_PPC64
+	case 8:
+		return __cmpxchg_u64(ptr, old, new);
+#endif
+	}
+	__cmpxchg_called_with_bad_pointer();
+	return old;
+}
+
+#define cmpxchg(ptr,o,n)						 \
+  ({									 \
+     __typeof__(*(ptr)) _o_ = (o);					 \
+     __typeof__(*(ptr)) _n_ = (n);					 \
+     (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \
+				    (unsigned long)_n_, sizeof(*(ptr))); \
+  })
+
+#ifdef CONFIG_PPC64
+/*
+ * We handle most unaligned accesses in hardware. On the other hand 
+ * unaligned DMA can be very expensive on some ppc64 IO chips (it does
+ * powers of 2 writes until it reaches sufficient alignment).
+ *
+ * Based on this we disable the IP header alignment in network drivers.
+ */
+#define NET_IP_ALIGN   0
+#endif
+
+#define arch_align_stack(x) (x)
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SYSTEM_H */
diff --git a/include/asm-ppc64/vga.h b/include/asm-powerpc/vga.h
similarity index 74%
rename from include/asm-ppc64/vga.h
rename to include/asm-powerpc/vga.h
index c098497..f8d350a 100644
--- a/include/asm-ppc64/vga.h
+++ b/include/asm-powerpc/vga.h
@@ -1,16 +1,14 @@
+#ifndef _ASM_POWERPC_VGA_H_
+#define _ASM_POWERPC_VGA_H_
+
+#ifdef __KERNEL__
+
 /*
  *	Access to VGA videoram
  *
  *	(c) 1998 Martin Mares <mj@ucw.cz>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
-#ifndef _LINUX_ASM_VGA_H_
-#define _LINUX_ASM_VGA_H_
 
 #include <asm/io.h>
 
@@ -42,9 +40,15 @@
 #endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */
 
 extern unsigned long vgacon_remap_base;
+
+#ifdef __powerpc64__
 #define VGA_MAP_MEM(x) ((unsigned long) ioremap((x), 0))
+#else
+#define VGA_MAP_MEM(x) (x + vgacon_remap_base)
+#endif
 
 #define vga_readb(x) (*(x))
 #define vga_writeb(x,y) (*(y) = (x))
 
-#endif
+#endif	/* __KERNEL__ */
+#endif	/* _ASM_POWERPC_VGA_H_ */
diff --git a/include/asm-ppc/a.out.h b/include/asm-ppc/a.out.h
deleted file mode 100644
index 8979a94..0000000
--- a/include/asm-ppc/a.out.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef __PPC_A_OUT_H__
-#define __PPC_A_OUT_H__
-
-/* grabbed from the intel stuff  */
-#define STACK_TOP TASK_SIZE
-
-
-struct exec
-{
-  unsigned long a_info;		/* Use macros N_MAGIC, etc for access */
-  unsigned a_text;		/* length of text, in bytes */
-  unsigned a_data;		/* length of data, in bytes */
-  unsigned a_bss;		/* length of uninitialized data area for file, in bytes */
-  unsigned a_syms;		/* length of symbol table data in file, in bytes */
-  unsigned a_entry;		/* start address */
-  unsigned a_trsize;		/* length of relocation info for text, in bytes */
-  unsigned a_drsize;		/* length of relocation info for data, in bytes */
-};
-
-
-#define N_TRSIZE(a)	((a).a_trsize)
-#define N_DRSIZE(a)	((a).a_drsize)
-#define N_SYMSIZE(a)	((a).a_syms)
-
-
-#endif
diff --git a/include/asm-ppc/auxvec.h b/include/asm-ppc/auxvec.h
deleted file mode 100644
index 172358d..0000000
--- a/include/asm-ppc/auxvec.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef __PPC_AUXVEC_H
-#define __PPC_AUXVEC_H
-
-/*
- * We need to put in some extra aux table entries to tell glibc what
- * the cache block size is, so it can use the dcbz instruction safely.
- */
-#define AT_DCACHEBSIZE		19
-#define AT_ICACHEBSIZE		20
-#define AT_UCACHEBSIZE		21
-/* A special ignored type value for PPC, for glibc compatibility.  */
-#define AT_IGNOREPPC		22
-
-#endif
diff --git a/include/asm-ppc/bug.h b/include/asm-ppc/bug.h
deleted file mode 100644
index 8b34fd6..0000000
--- a/include/asm-ppc/bug.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef _PPC_BUG_H
-#define _PPC_BUG_H
-
-struct bug_entry {
-	unsigned long	bug_addr;
-	int		line;
-	const char	*file;
-	const char	*function;
-};
-
-/*
- * If this bit is set in the line number it means that the trap
- * is for WARN_ON rather than BUG or BUG_ON.
- */
-#define BUG_WARNING_TRAP	0x1000000
-
-#ifdef CONFIG_BUG
-#define BUG() do {							 \
-	__asm__ __volatile__(						 \
-		"1:	twi 31,0,0\n"					 \
-		".section __bug_table,\"a\"\n\t"			 \
-		"	.long 1b,%0,%1,%2\n"				 \
-		".previous"						 \
-		: : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
-} while (0)
-
-#define BUG_ON(x) do {							\
-	if (!__builtin_constant_p(x) || (x)) {				\
-		__asm__ __volatile__(					\
-			"1:	twnei %0,0\n"				\
-			".section __bug_table,\"a\"\n\t"		\
-			"	.long 1b,%1,%2,%3\n"			\
-			".previous"					\
-			: : "r" (x), "i" (__LINE__), "i" (__FILE__),	\
-			    "i" (__FUNCTION__));			\
-	}								\
-} while (0)
-
-#define WARN_ON(x) do {							\
-	if (!__builtin_constant_p(x) || (x)) {				\
-		__asm__ __volatile__(					\
-			"1:	twnei %0,0\n"				\
-			".section __bug_table,\"a\"\n\t"		\
-			"	.long 1b,%1,%2,%3\n"			\
-			".previous"					\
-			: : "r" (x), "i" (__LINE__ + BUG_WARNING_TRAP),	\
-			    "i" (__FILE__), "i" (__FUNCTION__));	\
-	}								\
-} while (0)
-
-#define HAVE_ARCH_BUG
-#define HAVE_ARCH_BUG_ON
-#define HAVE_ARCH_WARN_ON
-#endif
-
-#include <asm-generic/bug.h>
-
-#endif
diff --git a/include/asm-ppc/byteorder.h b/include/asm-ppc/byteorder.h
deleted file mode 100644
index c63c81e..0000000
--- a/include/asm-ppc/byteorder.h
+++ /dev/null
@@ -1,76 +0,0 @@
-#ifndef _PPC_BYTEORDER_H
-#define _PPC_BYTEORDER_H
-
-#include <asm/types.h>
-#include <linux/compiler.h>
-
-#ifdef __GNUC__
-#ifdef __KERNEL__
-
-extern __inline__ unsigned ld_le16(const volatile unsigned short *addr)
-{
-	unsigned val;
-
-	__asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
-	return val;
-}
-
-extern __inline__ void st_le16(volatile unsigned short *addr, const unsigned val)
-{
-	__asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
-}
-
-extern __inline__ unsigned ld_le32(const volatile unsigned *addr)
-{
-	unsigned val;
-
-	__asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
-	return val;
-}
-
-extern __inline__ void st_le32(volatile unsigned *addr, const unsigned val)
-{
-	__asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
-}
-
-static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value)
-{
-	__u16 result;
-
-	__asm__("rlwimi %0,%2,8,16,23" : "=&r" (result) : "0" (value >> 8), "r" (value));
-	return result;
-}
-
-static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value)
-{
-	__u32 result;
-
-	__asm__("rlwimi %0,%2,24,16,23" : "=&r" (result) : "0" (value>>24), "r" (value));
-	__asm__("rlwimi %0,%2,8,8,15"   : "=&r" (result) : "0" (result),    "r" (value));
-	__asm__("rlwimi %0,%2,24,0,7"   : "=&r" (result) : "0" (result),    "r" (value));
-
-	return result;
-}
-#define __arch__swab32(x) ___arch__swab32(x)
-#define __arch__swab16(x) ___arch__swab16(x)
-
-/* The same, but returns converted value from the location pointer by addr. */
-#define __arch__swab16p(addr) ld_le16(addr)
-#define __arch__swab32p(addr) ld_le32(addr)
-
-/* The same, but do the conversion in situ, ie. put the value back to addr. */
-#define __arch__swab16s(addr) st_le16(addr,*addr)
-#define __arch__swab32s(addr) st_le32(addr,*addr)
-
-#endif /* __KERNEL__ */
-
-#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
-#  define __BYTEORDER_HAS_U64__
-#  define __SWAB_64_THRU_32__
-#endif
-
-#endif /* __GNUC__ */
-
-#include <linux/byteorder/big_endian.h>
-
-#endif /* _PPC_BYTEORDER_H */
diff --git a/include/asm-ppc/cputable.h b/include/asm-ppc/cputable.h
deleted file mode 100644
index 41d8f84..0000000
--- a/include/asm-ppc/cputable.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- *  include/asm-ppc/cputable.h
- *
- *  Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- */
-
-#ifndef __ASM_PPC_CPUTABLE_H
-#define __ASM_PPC_CPUTABLE_H
-
-/* Exposed to userland CPU features */
-#define PPC_FEATURE_32			0x80000000
-#define PPC_FEATURE_64			0x40000000
-#define PPC_FEATURE_601_INSTR		0x20000000
-#define PPC_FEATURE_HAS_ALTIVEC		0x10000000
-#define PPC_FEATURE_HAS_FPU		0x08000000
-#define PPC_FEATURE_HAS_MMU		0x04000000
-#define PPC_FEATURE_HAS_4xxMAC		0x02000000
-#define PPC_FEATURE_UNIFIED_CACHE	0x01000000
-#define PPC_FEATURE_HAS_SPE		0x00800000
-#define PPC_FEATURE_HAS_EFP_SINGLE	0x00400000
-#define PPC_FEATURE_HAS_EFP_DOUBLE	0x00200000
-
-#ifdef __KERNEL__
-
-#ifndef __ASSEMBLY__
-
-/* This structure can grow, it's real size is used by head.S code
- * via the mkdefs mecanism.
- */
-struct cpu_spec;
-
-typedef	void (*cpu_setup_t)(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-
-struct cpu_spec {
-	/* CPU is matched via (PVR & pvr_mask) == pvr_value */
-	unsigned int	pvr_mask;
-	unsigned int	pvr_value;
-
-	char		*cpu_name;
-	unsigned int	cpu_features;		/* Kernel features */
-	unsigned int	cpu_user_features;	/* Userland features */
-
-	/* cache line sizes */
-	unsigned int	icache_bsize;
-	unsigned int	dcache_bsize;
-
-	/* number of performance monitor counters */
-	unsigned int	num_pmcs;
-
-	/* this is called to initialize various CPU bits like L1 cache,
-	 * BHT, SPD, etc... from head.S before branching to identify_machine
-	 */
-	cpu_setup_t	cpu_setup;
-};
-
-extern struct cpu_spec		cpu_specs[];
-extern struct cpu_spec		*cur_cpu_spec[];
-
-static inline unsigned int cpu_has_feature(unsigned int feature)
-{
-	return cur_cpu_spec[0]->cpu_features & feature;
-}
-
-#endif /* __ASSEMBLY__ */
-
-/* CPU kernel features */
-#define CPU_FTR_SPLIT_ID_CACHE		0x00000001
-#define CPU_FTR_L2CR			0x00000002
-#define CPU_FTR_SPEC7450		0x00000004
-#define CPU_FTR_ALTIVEC			0x00000008
-#define CPU_FTR_TAU			0x00000010
-#define CPU_FTR_CAN_DOZE		0x00000020
-#define CPU_FTR_USE_TB			0x00000040
-#define CPU_FTR_604_PERF_MON		0x00000080
-#define CPU_FTR_601			0x00000100
-#define CPU_FTR_HPTE_TABLE		0x00000200
-#define CPU_FTR_CAN_NAP			0x00000400
-#define CPU_FTR_L3CR			0x00000800
-#define CPU_FTR_L3_DISABLE_NAP		0x00001000
-#define CPU_FTR_NAP_DISABLE_L2_PR	0x00002000
-#define CPU_FTR_DUAL_PLL_750FX		0x00004000
-#define CPU_FTR_NO_DPM			0x00008000
-#define CPU_FTR_HAS_HIGH_BATS		0x00010000
-#define CPU_FTR_NEED_COHERENT		0x00020000
-#define CPU_FTR_NO_BTIC			0x00040000
-#define CPU_FTR_BIG_PHYS		0x00080000
-
-#ifdef __ASSEMBLY__
-
-#define BEGIN_FTR_SECTION		98:
-
-#define END_FTR_SECTION(msk, val)		\
-99:						\
-	.section __ftr_fixup,"a";		\
-	.align 2;				\
-	.long msk;				\
-	.long val;				\
-	.long 98b;				\
-	.long 99b;				\
-	.previous
-
-#else
-
-#define BEGIN_FTR_SECTION		"98:\n"
-#define END_FTR_SECTION(msk, val)		\
-"99:\n"						\
-"	.section __ftr_fixup,\"a\";\n"		\
-"	.align 2;\n"				\
-"	.long "#msk";\n"			\
-"	.long "#val";\n"			\
-"	.long 98b;\n"			        \
-"	.long 99b;\n"	 		        \
-"	.previous\n"
-
-
-#endif /* __ASSEMBLY__ */
-
-#define END_FTR_SECTION_IFSET(msk)	END_FTR_SECTION((msk), (msk))
-#define END_FTR_SECTION_IFCLR(msk)	END_FTR_SECTION((msk), 0)
-
-#endif /* __ASM_PPC_CPUTABLE_H */
-#endif /* __KERNEL__ */
-
diff --git a/include/asm-ppc/elf.h b/include/asm-ppc/elf.h
deleted file mode 100644
index c25cc35..0000000
--- a/include/asm-ppc/elf.h
+++ /dev/null
@@ -1,151 +0,0 @@
-#ifndef __PPC_ELF_H
-#define __PPC_ELF_H
-
-/*
- * ELF register definitions..
- */
-#include <asm/types.h>
-#include <asm/ptrace.h>
-#include <asm/cputable.h>
-#include <asm/auxvec.h>
-
-/* PowerPC relocations defined by the ABIs */
-#define R_PPC_NONE		0
-#define R_PPC_ADDR32		1	/* 32bit absolute address */
-#define R_PPC_ADDR24		2	/* 26bit address, 2 bits ignored.  */
-#define R_PPC_ADDR16		3	/* 16bit absolute address */
-#define R_PPC_ADDR16_LO		4	/* lower 16bit of absolute address */
-#define R_PPC_ADDR16_HI		5	/* high 16bit of absolute address */
-#define R_PPC_ADDR16_HA		6	/* adjusted high 16bit */
-#define R_PPC_ADDR14		7	/* 16bit address, 2 bits ignored */
-#define R_PPC_ADDR14_BRTAKEN	8
-#define R_PPC_ADDR14_BRNTAKEN	9
-#define R_PPC_REL24		10	/* PC relative 26 bit */
-#define R_PPC_REL14		11	/* PC relative 16 bit */
-#define R_PPC_REL14_BRTAKEN	12
-#define R_PPC_REL14_BRNTAKEN	13
-#define R_PPC_GOT16		14
-#define R_PPC_GOT16_LO		15
-#define R_PPC_GOT16_HI		16
-#define R_PPC_GOT16_HA		17
-#define R_PPC_PLTREL24		18
-#define R_PPC_COPY		19
-#define R_PPC_GLOB_DAT		20
-#define R_PPC_JMP_SLOT		21
-#define R_PPC_RELATIVE		22
-#define R_PPC_LOCAL24PC		23
-#define R_PPC_UADDR32		24
-#define R_PPC_UADDR16		25
-#define R_PPC_REL32		26
-#define R_PPC_PLT32		27
-#define R_PPC_PLTREL32		28
-#define R_PPC_PLT16_LO		29
-#define R_PPC_PLT16_HI		30
-#define R_PPC_PLT16_HA		31
-#define R_PPC_SDAREL16		32
-#define R_PPC_SECTOFF		33
-#define R_PPC_SECTOFF_LO	34
-#define R_PPC_SECTOFF_HI	35
-#define R_PPC_SECTOFF_HA	36
-/* Keep this the last entry.  */
-#define R_PPC_NUM		37
-
-#define ELF_NGREG	48	/* includes nip, msr, lr, etc. */
-#define ELF_NFPREG	33	/* includes fpscr */
-#define ELF_NVRREG	33	/* includes vscr */
-#define ELF_NEVRREG	34	/* includes acc (as 2) */
-
-/*
- * These are used to set parameters in the core dumps.
- */
-#define ELF_ARCH	EM_PPC
-#define ELF_CLASS	ELFCLASS32
-#define ELF_DATA	ELFDATA2MSB
-
-/* General registers */
-typedef unsigned long elf_greg_t;
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-
-/* Floating point registers */
-typedef double elf_fpreg_t;
-typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
-
-/* Altivec registers */
-typedef __vector128 elf_vrreg_t;
-typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG];
-
-#ifdef __KERNEL__
-
-struct task_struct;
-
-/*
- * This is used to ensure we don't load something for the wrong architecture.
- */
-
-#define elf_check_arch(x) ((x)->e_machine == EM_PPC)
-
-/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
-   use of this is to invoke "./ld.so someprog" to test out a new version of
-   the loader.  We need to make sure that it is out of the way of the program
-   that it will "exec", and that there is sufficient room for the brk.  */
-
-#define ELF_ET_DYN_BASE         (0x08000000)
-
-#define USE_ELF_CORE_DUMP
-#define ELF_EXEC_PAGESIZE	4096
-
-#define ELF_CORE_COPY_REGS(gregs, regs)				\
-	memcpy((gregs), (regs), sizeof(struct pt_regs));	\
-	memset((char *)(gregs) + sizeof(struct pt_regs), 0,	\
-	       sizeof(elf_gregset_t) - sizeof(struct pt_regs));
-
-#define ELF_CORE_COPY_TASK_REGS(t, elfregs)			\
-	((t)->thread.regs?					\
-	 ({ ELF_CORE_COPY_REGS((elfregs), (t)->thread.regs); 1; }): 0)
-
-extern int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpu);
-#define ELF_CORE_COPY_FPREGS(t, fpu)	dump_task_fpu((t), (fpu))
-
-/* This yields a mask that user programs can use to figure out what
-   instruction set this cpu supports.  This could be done in userspace,
-   but it's not easy, and we've already done it here.  */
-
-#define ELF_HWCAP	(cur_cpu_spec[0]->cpu_user_features)
-
-/* This yields a string that ld.so will use to load implementation
-   specific libraries for optimization.  This is more specific in
-   intent than poking at uname or /proc/cpuinfo.
-
-   For the moment, we have only optimizations for the Intel generations,
-   but that could change... */
-
-#define ELF_PLATFORM	(NULL)
-
-#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
-
-extern int dcache_bsize;
-extern int icache_bsize;
-extern int ucache_bsize;
-
-/*
- * The requirements here are:
- * - keep the final alignment of sp (sp & 0xf)
- * - make sure the 32-bit value at the first 16 byte aligned position of
- *   AUXV is greater than 16 for glibc compatibility.
- *   AT_IGNOREPPC is used for that.
- * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
- *   even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
- */
-#define ARCH_DLINFO							\
-do {									\
-	/* Handle glibc compatibility. */				\
-	NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);			\
-	NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);			\
-	/* Cache size items */						\
-	NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize);			\
-	NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize);			\
-	NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize);			\
- } while (0)
-
-#endif /* __KERNEL__ */
-#endif
diff --git a/include/asm-ppc/hw_irq.h b/include/asm-ppc/hw_irq.h
deleted file mode 100644
index 47dc799..0000000
--- a/include/asm-ppc/hw_irq.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
- */
-#ifdef __KERNEL__
-#ifndef _PPC_HW_IRQ_H
-#define _PPC_HW_IRQ_H
-
-#include <asm/ptrace.h>
-#include <asm/reg.h>
-
-extern void timer_interrupt(struct pt_regs *);
-
-#define INLINE_IRQS
-
-#define irqs_disabled()	((mfmsr() & MSR_EE) == 0)
-
-#ifdef INLINE_IRQS
-
-static inline void local_irq_disable(void)
-{
-	unsigned long msr;
-	msr = mfmsr();
-	mtmsr(msr & ~MSR_EE);
-	__asm__ __volatile__("": : :"memory");
-}
-
-static inline void local_irq_enable(void)
-{
-	unsigned long msr;
-	__asm__ __volatile__("": : :"memory");
-	msr = mfmsr();
-	mtmsr(msr | MSR_EE);
-}
-
-static inline void local_irq_save_ptr(unsigned long *flags)
-{
-	unsigned long msr;
-	msr = mfmsr();
-	*flags = msr;
-	mtmsr(msr & ~MSR_EE);
-	__asm__ __volatile__("": : :"memory");
-}
-
-#define local_save_flags(flags)		((flags) = mfmsr())
-#define local_irq_save(flags)		local_irq_save_ptr(&flags)
-#define local_irq_restore(flags)	mtmsr(flags)
-
-#else
-
-extern void local_irq_enable(void);
-extern void local_irq_disable(void);
-extern void local_irq_restore(unsigned long);
-extern void local_save_flags_ptr(unsigned long *);
-
-#define local_save_flags(flags) local_save_flags_ptr(&flags)
-#define local_irq_save(flags) ({local_save_flags(flags);local_irq_disable();})
-
-#endif
-
-extern void do_lost_interrupts(unsigned long);
-
-#define mask_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->disable) irq_desc[irq].handler->disable(irq);})
-#define unmask_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->enable) irq_desc[irq].handler->enable(irq);})
-#define ack_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->ack) irq_desc[irq].handler->ack(irq);})
-
-/* Should we handle this via lost interrupts and IPIs or should we don't care like
- * we do now ? --BenH.
- */
-struct hw_interrupt_type;
-static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
-
-
-#endif /* _PPC_HW_IRQ_H */
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h
index 7eb7cf6..39caf06 100644
--- a/include/asm-ppc/io.h
+++ b/include/asm-ppc/io.h
@@ -8,6 +8,7 @@
 
 #include <asm/page.h>
 #include <asm/byteorder.h>
+#include <asm/synch.h>
 #include <asm/mmu.h>
 
 #define SIO_CONFIG_RA	0x398
@@ -440,16 +441,6 @@
 #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
 #define page_to_bus(page)	(page_to_phys(page) + PCI_DRAM_OFFSET)
 
-/*
- * Enforce In-order Execution of I/O:
- * Acts as a barrier to ensure all previous I/O accesses have
- * completed before any further ones are issued.
- */
-extern inline void eieio(void)
-{
-	__asm__ __volatile__ ("eieio" : : : "memory");
-}
-
 /* Enforce in-order execution of data I/O.
  * No distinction between read/write on PPC; use eieio for all three.
  */
diff --git a/include/asm-ppc/irq.h b/include/asm-ppc/irq.h
index bd96748..137ea0c 100644
--- a/include/asm-ppc/irq.h
+++ b/include/asm-ppc/irq.h
@@ -24,6 +24,12 @@
  */
 #define ARCH_HAS_IRQ_PER_CPU
 
+#define get_irq_desc(irq) (&irq_desc[(irq)])
+
+/* Define a way to iterate across irqs. */
+#define for_each_irq(i) \
+	for ((i) = 0; (i) < NR_IRQS; ++(i))
+
 #if defined(CONFIG_40x)
 #include <asm/ibm4xx.h>
 
diff --git a/include/asm-ppc/kmap_types.h b/include/asm-ppc/kmap_types.h
deleted file mode 100644
index 6d6fc78..0000000
--- a/include/asm-ppc/kmap_types.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifdef __KERNEL__
-#ifndef _ASM_KMAP_TYPES_H
-#define _ASM_KMAP_TYPES_H
-
-enum km_type {
-	KM_BOUNCE_READ,
-	KM_SKB_SUNRPC_DATA,
-	KM_SKB_DATA_SOFTIRQ,
-	KM_USER0,
-	KM_USER1,
-	KM_BIO_SRC_IRQ,
-	KM_BIO_DST_IRQ,
-	KM_PTE0,
-	KM_PTE1,
-	KM_IRQ0,
-	KM_IRQ1,
-	KM_SOFTIRQ0,
-	KM_SOFTIRQ1,
-	KM_PPC_SYNC_PAGE,
-	KM_PPC_SYNC_ICACHE,
-	KM_TYPE_NR
-};
-
-#endif
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/mmu_context.h b/include/asm-ppc/mmu_context.h
index afe26ff..4f152cc 100644
--- a/include/asm-ppc/mmu_context.h
+++ b/include/asm-ppc/mmu_context.h
@@ -164,13 +164,11 @@
 			     struct task_struct *tsk)
 {
 #ifdef CONFIG_ALTIVEC
-	asm volatile (
- BEGIN_FTR_SECTION
-	"dssall;\n"
+	if (cpu_has_feature(CPU_FTR_ALTIVEC))
+	asm volatile ("dssall;\n"
 #ifndef CONFIG_POWER4
 	 "sync;\n" /* G4 needs a sync here, G5 apparently not */
 #endif
- END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 	 : : );
 #endif /* CONFIG_ALTIVEC */
 
diff --git a/include/asm-ppc/perfmon.h b/include/asm-ppc/perfmon.h
index 5e7a89c..2ae0315 100644
--- a/include/asm-ppc/perfmon.h
+++ b/include/asm-ppc/perfmon.h
@@ -3,8 +3,8 @@
 
 extern void (*perf_irq)(struct pt_regs *);
 
-int request_perfmon_irq(void (*handler)(struct pt_regs *));
-void free_perfmon_irq(void);
+int reserve_pmc_hardware(void (*handler)(struct pt_regs *));
+void release_pmc_hardware(void);
 
 #ifdef CONFIG_FSL_BOOKE
 void init_pmc_stop(int ctr);
@@ -16,7 +16,7 @@
 void pmc_stop_ctrs(void);
 void dump_pmcs(void);
 
-extern struct op_ppc32_model op_model_fsl_booke;
+extern struct op_powerpc_model op_model_fsl_booke;
 #endif
 
 #endif /* __PERFMON_H */
diff --git a/include/asm-ppc/posix_types.h b/include/asm-ppc/posix_types.h
deleted file mode 100644
index a14a82a..0000000
--- a/include/asm-ppc/posix_types.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#ifndef _PPC_POSIX_TYPES_H
-#define _PPC_POSIX_TYPES_H
-
-/*
- * This file is generally used by user-level software, so you need to
- * be a little careful about namespace pollution etc.  Also, we cannot
- * assume GCC is being used.
- */
-
-typedef unsigned long	__kernel_ino_t;
-typedef unsigned int	__kernel_mode_t;
-typedef unsigned short	__kernel_nlink_t;
-typedef long		__kernel_off_t;
-typedef int		__kernel_pid_t;
-typedef unsigned int	__kernel_uid_t;
-typedef unsigned int	__kernel_gid_t;
-typedef unsigned int	__kernel_size_t;
-typedef int		__kernel_ssize_t;
-typedef long		__kernel_ptrdiff_t;
-typedef long		__kernel_time_t;
-typedef long		__kernel_suseconds_t;
-typedef long		__kernel_clock_t;
-typedef int		__kernel_timer_t;
-typedef int		__kernel_clockid_t;
-typedef int		__kernel_daddr_t;
-typedef char *		__kernel_caddr_t;
-typedef short             __kernel_ipc_pid_t;
-typedef unsigned short	__kernel_uid16_t;
-typedef unsigned short	__kernel_gid16_t;
-typedef unsigned int	__kernel_uid32_t;
-typedef unsigned int	__kernel_gid32_t;
-
-typedef unsigned int	__kernel_old_uid_t;
-typedef unsigned int	__kernel_old_gid_t;
-typedef unsigned int	__kernel_old_dev_t;
-
-#ifdef __GNUC__
-typedef long long	__kernel_loff_t;
-#endif
-
-typedef struct {
-	int	val[2];
-} __kernel_fsid_t;
-
-#ifndef __GNUC__
-
-#define	__FD_SET(d, set)	((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
-#define	__FD_CLR(d, set)	((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
-#define	__FD_ISSET(d, set)	((set)->fds_bits[__FDELT(d)] & __FDMASK(d))
-#define	__FD_ZERO(set)	\
-  ((void) memset ((__ptr_t) (set), 0, sizeof (__kernel_fd_set)))
-
-#else /* __GNUC__ */
-
-#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) \
-    || (__GLIBC__ == 2 && __GLIBC_MINOR__ == 0)
-/* With GNU C, use inline functions instead so args are evaluated only once: */
-
-#undef __FD_SET
-static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
-{
-	unsigned long _tmp = fd / __NFDBITS;
-	unsigned long _rem = fd % __NFDBITS;
-	fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
-}
-
-#undef __FD_CLR
-static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
-{
-	unsigned long _tmp = fd / __NFDBITS;
-	unsigned long _rem = fd % __NFDBITS;
-	fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
-}
-
-#undef __FD_ISSET
-static __inline__ int __FD_ISSET(unsigned long fd, __kernel_fd_set *p)
-{
-	unsigned long _tmp = fd / __NFDBITS;
-	unsigned long _rem = fd % __NFDBITS;
-	return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant case (8 ints,
- * for a 256-bit fd_set)
- */
-#undef __FD_ZERO
-static __inline__ void __FD_ZERO(__kernel_fd_set *p)
-{
-	unsigned int *tmp = (unsigned int *)p->fds_bits;
-	int i;
-
-	if (__builtin_constant_p(__FDSET_LONGS)) {
-		switch (__FDSET_LONGS) {
-			case 8:
-				tmp[0] = 0; tmp[1] = 0; tmp[2] = 0; tmp[3] = 0;
-				tmp[4] = 0; tmp[5] = 0; tmp[6] = 0; tmp[7] = 0;
-				return;
-		}
-	}
-	i = __FDSET_LONGS;
-	while (i) {
-		i--;
-		*tmp = 0;
-		tmp++;
-	}
-}
-
-#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
-#endif /* __GNUC__ */
-#endif /* _PPC_POSIX_TYPES_H */
diff --git a/include/asm-ppc/rwsem.h b/include/asm-ppc/rwsem.h
deleted file mode 100644
index 3e738f4..0000000
--- a/include/asm-ppc/rwsem.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * include/asm-ppc/rwsem.h: R/W semaphores for PPC using the stuff
- * in lib/rwsem.c.  Adapted largely from include/asm-i386/rwsem.h
- * by Paul Mackerras <paulus@samba.org>.
- */
-
-#ifndef _PPC_RWSEM_H
-#define _PPC_RWSEM_H
-
-#ifdef __KERNEL__
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
-
-/*
- * the semaphore definition
- */
-struct rw_semaphore {
-	/* XXX this should be able to be an atomic_t  -- paulus */
-	signed long		count;
-#define RWSEM_UNLOCKED_VALUE		0x00000000
-#define RWSEM_ACTIVE_BIAS		0x00000001
-#define RWSEM_ACTIVE_MASK		0x0000ffff
-#define RWSEM_WAITING_BIAS		(-0x00010000)
-#define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-	spinlock_t		wait_lock;
-	struct list_head	wait_list;
-#if RWSEM_DEBUG
-	int			debug;
-#endif
-};
-
-/*
- * initialisation
- */
-#if RWSEM_DEBUG
-#define __RWSEM_DEBUG_INIT      , 0
-#else
-#define __RWSEM_DEBUG_INIT	/* */
-#endif
-
-#define __RWSEM_INITIALIZER(name) \
-	{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
-	  LIST_HEAD_INIT((name).wait_list) \
-	  __RWSEM_DEBUG_INIT }
-
-#define DECLARE_RWSEM(name)		\
-	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-static inline void init_rwsem(struct rw_semaphore *sem)
-{
-	sem->count = RWSEM_UNLOCKED_VALUE;
-	spin_lock_init(&sem->wait_lock);
-	INIT_LIST_HEAD(&sem->wait_list);
-#if RWSEM_DEBUG
-	sem->debug = 0;
-#endif
-}
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
-	if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
-		smp_wmb();
-	else
-		rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
-	int tmp;
-
-	while ((tmp = sem->count) >= 0) {
-		if (tmp == cmpxchg(&sem->count, tmp,
-				   tmp + RWSEM_ACTIVE_READ_BIAS)) {
-			smp_wmb();
-			return 1;
-		}
-	}
-	return 0;
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write(struct rw_semaphore *sem)
-{
-	int tmp;
-
-	tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
-				(atomic_t *)(&sem->count));
-	if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
-		smp_wmb();
-	else
-		rwsem_down_write_failed(sem);
-}
-
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
-	int tmp;
-
-	tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
-		      RWSEM_ACTIVE_WRITE_BIAS);
-	smp_wmb();
-	return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
-	int tmp;
-
-	smp_wmb();
-	tmp = atomic_dec_return((atomic_t *)(&sem->count));
-	if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
-		rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
-	smp_wmb();
-	if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
-			      (atomic_t *)(&sem->count)) < 0)
-		rwsem_wake(sem);
-}
-
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
-{
-	atomic_add(delta, (atomic_t *)(&sem->count));
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
-	int tmp;
-
-	smp_wmb();
-	tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
-	if (tmp < 0)
-		rwsem_downgrade_wake(sem);
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
-{
-	smp_mb();
-	return atomic_add_return(delta, (atomic_t *)(&sem->count));
-}
-
-#endif /* __KERNEL__ */
-#endif /* _PPC_RWSEM_XADD_H */
diff --git a/include/asm-ppc/seccomp.h b/include/asm-ppc/seccomp.h
deleted file mode 100644
index 666c4da..0000000
--- a/include/asm-ppc/seccomp.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _ASM_SECCOMP_H
-
-#include <linux/unistd.h>
-
-#define __NR_seccomp_read __NR_read
-#define __NR_seccomp_write __NR_write
-#define __NR_seccomp_exit __NR_exit
-#define __NR_seccomp_sigreturn __NR_rt_sigreturn
-
-#endif /* _ASM_SECCOMP_H */
diff --git a/include/asm-ppc/sections.h b/include/asm-ppc/sections.h
deleted file mode 100644
index ba8f43a..0000000
--- a/include/asm-ppc/sections.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifdef __KERNEL__
-#ifndef _PPC_SECTIONS_H
-#define _PPC_SECTIONS_H
-
-#include <asm-generic/sections.h>
-
-#define __pmac __attribute__ ((__section__ (".pmac.text")))
-#define __pmacdata __attribute__ ((__section__ (".pmac.data")))
-#define __pmacfunc(__argpmac) \
-	__argpmac __pmac; \
-	__argpmac
-	
-#define __prep __attribute__ ((__section__ (".prep.text")))
-#define __prepdata __attribute__ ((__section__ (".prep.data")))
-#define __prepfunc(__argprep) \
-	__argprep __prep; \
-	__argprep
-
-#define __chrp __attribute__ ((__section__ (".chrp.text")))
-#define __chrpdata __attribute__ ((__section__ (".chrp.data")))
-#define __chrpfunc(__argchrp) \
-	__argchrp __chrp; \
-	__argchrp
-
-/* this is actually just common chrp/pmac code, not OF code -- Cort */
-#define __openfirmware __attribute__ ((__section__ (".openfirmware.text")))
-#define __openfirmwaredata __attribute__ ((__section__ (".openfirmware.data")))
-#define __openfirmwarefunc(__argopenfirmware) \
-	__argopenfirmware __openfirmware; \
-	__argopenfirmware
-	
-#endif /* _PPC_SECTIONS_H */
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/semaphore.h b/include/asm-ppc/semaphore.h
deleted file mode 100644
index 89e6e73..0000000
--- a/include/asm-ppc/semaphore.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#ifndef _PPC_SEMAPHORE_H
-#define _PPC_SEMAPHORE_H
-
-/*
- * Swiped from asm-sparc/semaphore.h and modified
- * -- Cort (cort@cs.nmt.edu)
- *
- * Stole some rw spinlock-based semaphore stuff from asm-alpha/semaphore.h
- * -- Ani Joshi (ajoshi@unixbox.com)
- *
- * Remove spinlock-based RW semaphores; RW semaphore definitions are
- * now in rwsem.h and we use the generic lib/rwsem.c implementation.
- * Rework semaphores to use atomic_dec_if_positive.
- * -- Paul Mackerras (paulus@samba.org)
- */
-
-#ifdef __KERNEL__
-
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <linux/wait.h>
-#include <linux/rwsem.h>
-
-struct semaphore {
-	/*
-	 * Note that any negative value of count is equivalent to 0,
-	 * but additionally indicates that some process(es) might be
-	 * sleeping on `wait'.
-	 */
-	atomic_t count;
-	wait_queue_head_t wait;
-};
-
-#define __SEMAPHORE_INITIALIZER(name, n)				\
-{									\
-	.count		= ATOMIC_INIT(n),				\
-	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
-}
-
-#define __MUTEX_INITIALIZER(name) \
-	__SEMAPHORE_INITIALIZER(name, 1)
-
-#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
-
-#define DECLARE_MUTEX(name)		__DECLARE_SEMAPHORE_GENERIC(name, 1)
-#define DECLARE_MUTEX_LOCKED(name)	__DECLARE_SEMAPHORE_GENERIC(name, 0)
-
-static inline void sema_init (struct semaphore *sem, int val)
-{
-	atomic_set(&sem->count, val);
-	init_waitqueue_head(&sem->wait);
-}
-
-static inline void init_MUTEX (struct semaphore *sem)
-{
-	sema_init(sem, 1);
-}
-
-static inline void init_MUTEX_LOCKED (struct semaphore *sem)
-{
-	sema_init(sem, 0);
-}
-
-extern void __down(struct semaphore * sem);
-extern int  __down_interruptible(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
-
-extern inline void down(struct semaphore * sem)
-{
-	might_sleep();
-
-	/*
-	 * Try to get the semaphore, take the slow path if we fail.
-	 */
-	if (atomic_dec_return(&sem->count) < 0)
-		__down(sem);
-	smp_wmb();
-}
-
-extern inline int down_interruptible(struct semaphore * sem)
-{
-	int ret = 0;
-
-	might_sleep();
-
-	if (atomic_dec_return(&sem->count) < 0)
-		ret = __down_interruptible(sem);
-	smp_wmb();
-	return ret;
-}
-
-extern inline int down_trylock(struct semaphore * sem)
-{
-	int ret;
-
-	ret = atomic_dec_if_positive(&sem->count) < 0;
-	smp_wmb();
-	return ret;
-}
-
-extern inline void up(struct semaphore * sem)
-{
-	smp_wmb();
-	if (atomic_inc_return(&sem->count) <= 0)
-		__up(sem);
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* !(_PPC_SEMAPHORE_H) */
diff --git a/include/asm-ppc/smp.h b/include/asm-ppc/smp.h
index 829481c..79c1be3 100644
--- a/include/asm-ppc/smp.h
+++ b/include/asm-ppc/smp.h
@@ -45,30 +45,21 @@
 extern void __cpu_die(unsigned int cpu);
 extern void cpu_die(void) __attribute__((noreturn));
 
-#define NO_PROC_ID		0xFF            /* No processor magic marker */
-#define PROC_CHANGE_PENALTY	20
-
 #define raw_smp_processor_id()	(current_thread_info()->cpu)
 
 extern int __cpu_up(unsigned int cpu);
 
 extern int smp_hw_index[];
-#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
-
-struct klock_info_struct {
-	unsigned long kernel_flag;
-	unsigned char akp;
-};
-
-extern struct klock_info_struct klock_info;
-#define KLOCK_HELD       0xffffffff
-#define KLOCK_CLEAR      0x0
+#define hard_smp_processor_id() 	(smp_hw_index[smp_processor_id()])
+#define get_hard_smp_processor_id(cpu)	(smp_hw_index[(cpu)])
 
 #endif /* __ASSEMBLY__ */
 
 #else /* !(CONFIG_SMP) */
 
 static inline void cpu_die(void) { }
+#define get_hard_smp_processor_id(cpu) 0
+#define hard_smp_processor_id() 0
 
 #endif /* !(CONFIG_SMP) */
 
diff --git a/include/asm-ppc/spinlock.h b/include/asm-ppc/spinlock.h
index 20edcf2a..5c64b75 100644
--- a/include/asm-ppc/spinlock.h
+++ b/include/asm-ppc/spinlock.h
@@ -9,7 +9,7 @@
  * (the type definitions are in asm/raw_spinlock_types.h)
  */
 
-#define __raw_spin_is_locked(x)		((x)->lock != 0)
+#define __raw_spin_is_locked(x)		((x)->slock != 0)
 #define __raw_spin_unlock_wait(lock) \
 	do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
@@ -31,17 +31,17 @@
 	bne-	2b\n\
 	isync"
 	: "=&r"(tmp)
-	: "r"(&lock->lock), "r"(1)
+	: "r"(&lock->slock), "r"(1)
 	: "cr0", "memory");
 }
 
 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
 	__asm__ __volatile__("eieio	# __raw_spin_unlock": : :"memory");
-	lock->lock = 0;
+	lock->slock = 0;
 }
 
-#define __raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock))
+#define __raw_spin_trylock(l) (!test_and_set_bit(0,(volatile unsigned long *)(&(l)->slock)))
 
 /*
  * Read-write spinlocks, allowing multiple readers
diff --git a/include/asm-ppc/spinlock_types.h b/include/asm-ppc/spinlock_types.h
deleted file mode 100644
index 7919ccc..0000000
--- a/include/asm-ppc/spinlock_types.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __ASM_SPINLOCK_TYPES_H
-#define __ASM_SPINLOCK_TYPES_H
-
-#ifndef __LINUX_SPINLOCK_TYPES_H
-# error "please don't include this file directly"
-#endif
-
-typedef struct {
-	volatile unsigned long lock;
-} raw_spinlock_t;
-
-#define __RAW_SPIN_LOCK_UNLOCKED	{ 0 }
-
-typedef struct {
-	volatile signed int lock;
-} raw_rwlock_t;
-
-#define __RAW_RW_LOCK_UNLOCKED		{ 0 }
-
-#endif
diff --git a/include/asm-ppc/statfs.h b/include/asm-ppc/statfs.h
deleted file mode 100644
index 807c699..0000000
--- a/include/asm-ppc/statfs.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _PPC_STATFS_H
-#define _PPC_STATFS_H
-
-#include <asm-generic/statfs.h>
-#endif
-
-
-
diff --git a/include/asm-ppc/vga.h b/include/asm-ppc/vga.h
deleted file mode 100644
index c586473..0000000
--- a/include/asm-ppc/vga.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- *	Access to VGA videoram
- *
- *	(c) 1998 Martin Mares <mj@ucw.cz>
- */
-
-#ifdef __KERNEL__
-#ifndef _LINUX_ASM_VGA_H_
-#define _LINUX_ASM_VGA_H_
-
-#include <asm/io.h>
-
-#include <linux/config.h>
-
-#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
-
-#define VT_BUF_HAVE_RW
-/*
- *  These are only needed for supporting VGA or MDA text mode, which use little
- *  endian byte ordering.
- *  In other cases, we can optimize by using native byte ordering and
- *  <linux/vt_buffer.h> has already done the right job for us.
- */
-
-extern inline void scr_writew(u16 val, volatile u16 *addr)
-{
-    st_le16(addr, val);
-}
-
-extern inline u16 scr_readw(volatile const u16 *addr)
-{
-    return ld_le16(addr);
-}
-
-#define VT_BUF_HAVE_MEMCPYW
-#define scr_memcpyw	memcpy
-
-#endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */
-
-extern unsigned long vgacon_remap_base;
-#define VGA_MAP_MEM(x) (x + vgacon_remap_base)
-#define vga_readb(x) (*(x))
-#define vga_writeb(x,y) (*(y) = (x))
-
-#endif
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/atomic.h b/include/asm-ppc64/atomic.h
deleted file mode 100644
index 0e5f25e..0000000
--- a/include/asm-ppc64/atomic.h
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * PowerPC64 atomic operations
- *
- * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
- * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _ASM_PPC64_ATOMIC_H_ 
-#define _ASM_PPC64_ATOMIC_H_
-
-#include <asm/memory.h>
-
-typedef struct { volatile int counter; } atomic_t;
-
-#define ATOMIC_INIT(i)	{ (i) }
-
-#define atomic_read(v)		((v)->counter)
-#define atomic_set(v,i)		(((v)->counter) = (i))
-
-static __inline__ void atomic_add(int a, atomic_t *v)
-{
-	int t;
-
-	__asm__ __volatile__(
-"1:	lwarx	%0,0,%3		# atomic_add\n\
-	add	%0,%2,%0\n\
-	stwcx.	%0,0,%3\n\
-	bne-	1b"
-	: "=&r" (t), "=m" (v->counter)
-	: "r" (a), "r" (&v->counter), "m" (v->counter)
-	: "cc");
-}
-
-static __inline__ int atomic_add_return(int a, atomic_t *v)
-{
-	int t;
-
-	__asm__ __volatile__(
-	EIEIO_ON_SMP
-"1:	lwarx	%0,0,%2		# atomic_add_return\n\
-	add	%0,%1,%0\n\
-	stwcx.	%0,0,%2\n\
-	bne-	1b"
-	ISYNC_ON_SMP
-	: "=&r" (t)
-	: "r" (a), "r" (&v->counter)
-	: "cc", "memory");
-
-	return t;
-}
-
-#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
-
-static __inline__ void atomic_sub(int a, atomic_t *v)
-{
-	int t;
-
-	__asm__ __volatile__(
-"1:	lwarx	%0,0,%3		# atomic_sub\n\
-	subf	%0,%2,%0\n\
-	stwcx.	%0,0,%3\n\
-	bne-	1b"
-	: "=&r" (t), "=m" (v->counter)
-	: "r" (a), "r" (&v->counter), "m" (v->counter)
-	: "cc");
-}
-
-static __inline__ int atomic_sub_return(int a, atomic_t *v)
-{
-	int t;
-
-	__asm__ __volatile__(
-	EIEIO_ON_SMP
-"1:	lwarx	%0,0,%2		# atomic_sub_return\n\
-	subf	%0,%1,%0\n\
-	stwcx.	%0,0,%2\n\
-	bne-	1b"
-	ISYNC_ON_SMP
-	: "=&r" (t)
-	: "r" (a), "r" (&v->counter)
-	: "cc", "memory");
-
-	return t;
-}
-
-static __inline__ void atomic_inc(atomic_t *v)
-{
-	int t;
-
-	__asm__ __volatile__(
-"1:	lwarx	%0,0,%2		# atomic_inc\n\
-	addic	%0,%0,1\n\
-	stwcx.	%0,0,%2\n\
-	bne-	1b"
-	: "=&r" (t), "=m" (v->counter)
-	: "r" (&v->counter), "m" (v->counter)
-	: "cc");
-}
-
-static __inline__ int atomic_inc_return(atomic_t *v)
-{
-	int t;
-
-	__asm__ __volatile__(
-	EIEIO_ON_SMP
-"1:	lwarx	%0,0,%1		# atomic_inc_return\n\
-	addic	%0,%0,1\n\
-	stwcx.	%0,0,%1\n\
-	bne-	1b"
-	ISYNC_ON_SMP
-	: "=&r" (t)
-	: "r" (&v->counter)
-	: "cc", "memory");
-
-	return t;
-}
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-static __inline__ void atomic_dec(atomic_t *v)
-{
-	int t;
-
-	__asm__ __volatile__(
-"1:	lwarx	%0,0,%2		# atomic_dec\n\
-	addic	%0,%0,-1\n\
-	stwcx.	%0,0,%2\n\
-	bne-	1b"
-	: "=&r" (t), "=m" (v->counter)
-	: "r" (&v->counter), "m" (v->counter)
-	: "cc");
-}
-
-static __inline__ int atomic_dec_return(atomic_t *v)
-{
-	int t;
-
-	__asm__ __volatile__(
-	EIEIO_ON_SMP
-"1:	lwarx	%0,0,%1		# atomic_dec_return\n\
-	addic	%0,%0,-1\n\
-	stwcx.	%0,0,%1\n\
-	bne-	1b"
-	ISYNC_ON_SMP
-	: "=&r" (t)
-	: "r" (&v->counter)
-	: "cc", "memory");
-
-	return t;
-}
-
-#define atomic_sub_and_test(a, v)	(atomic_sub_return((a), (v)) == 0)
-#define atomic_dec_and_test(v)		(atomic_dec_return((v)) == 0)
-
-/*
- * Atomically test *v and decrement if it is greater than 0.
- * The function returns the old value of *v minus 1.
- */
-static __inline__ int atomic_dec_if_positive(atomic_t *v)
-{
-	int t;
-
-	__asm__ __volatile__(
-	EIEIO_ON_SMP
-"1:	lwarx	%0,0,%1		# atomic_dec_if_positive\n\
-	addic.	%0,%0,-1\n\
-	blt-	2f\n\
-	stwcx.	%0,0,%1\n\
-	bne-	1b"
-	ISYNC_ON_SMP
-	"\n\
-2:"	: "=&r" (t)
-	: "r" (&v->counter)
-	: "cc", "memory");
-
-	return t;
-}
-
-#define smp_mb__before_atomic_dec()     smp_mb()
-#define smp_mb__after_atomic_dec()      smp_mb()
-#define smp_mb__before_atomic_inc()     smp_mb()
-#define smp_mb__after_atomic_inc()      smp_mb()
-
-#endif /* _ASM_PPC64_ATOMIC_H_ */
diff --git a/include/asm-ppc64/bitops.h b/include/asm-ppc64/bitops.h
index a0f8312..dbfa42e 100644
--- a/include/asm-ppc64/bitops.h
+++ b/include/asm-ppc64/bitops.h
@@ -42,7 +42,7 @@
 
 #ifdef __KERNEL__
 
-#include <asm/memory.h>
+#include <asm/synch.h>
 
 /*
  * clear_bit doesn't imply a memory barrier
diff --git a/include/asm-ppc64/cputable.h b/include/asm-ppc64/cputable.h
deleted file mode 100644
index acc9b4d..0000000
--- a/include/asm-ppc64/cputable.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- *  include/asm-ppc64/cputable.h
- *
- *  Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
- *
- *  Modifications for ppc64:
- *      Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
- * 
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- */
-
-#ifndef __ASM_PPC_CPUTABLE_H
-#define __ASM_PPC_CPUTABLE_H
-
-#include <linux/config.h>
-#include <asm/page.h> /* for ASM_CONST */
-
-/* Exposed to userland CPU features - Must match ppc32 definitions */
-#define PPC_FEATURE_32			0x80000000
-#define PPC_FEATURE_64			0x40000000
-#define PPC_FEATURE_601_INSTR		0x20000000
-#define PPC_FEATURE_HAS_ALTIVEC		0x10000000
-#define PPC_FEATURE_HAS_FPU		0x08000000
-#define PPC_FEATURE_HAS_MMU		0x04000000
-#define PPC_FEATURE_HAS_4xxMAC		0x02000000
-#define PPC_FEATURE_UNIFIED_CACHE	0x01000000
-
-#ifdef __KERNEL__
-
-#ifndef __ASSEMBLY__
-
-/* This structure can grow, it's real size is used by head.S code
- * via the mkdefs mechanism.
- */
-struct cpu_spec;
-struct op_ppc64_model;
-
-typedef	void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
-
-struct cpu_spec {
-	/* CPU is matched via (PVR & pvr_mask) == pvr_value */
-	unsigned int	pvr_mask;
-	unsigned int	pvr_value;
-
-	char		*cpu_name;
-	unsigned long	cpu_features;		/* Kernel features */
-	unsigned int	cpu_user_features;	/* Userland features */
-
-	/* cache line sizes */
-	unsigned int	icache_bsize;
-	unsigned int	dcache_bsize;
-
-	/* number of performance monitor counters */
-	unsigned int	num_pmcs;
-
-	/* this is called to initialize various CPU bits like L1 cache,
-	 * BHT, SPD, etc... from head.S before branching to identify_machine
-	 */
-	cpu_setup_t	cpu_setup;
-
-	/* Used by oprofile userspace to select the right counters */
-	char		*oprofile_cpu_type;
-
-	/* Processor specific oprofile operations */
-	struct op_ppc64_model *oprofile_model;
-};
-
-extern struct cpu_spec		cpu_specs[];
-extern struct cpu_spec		*cur_cpu_spec;
-
-static inline unsigned long cpu_has_feature(unsigned long feature)
-{
-	return cur_cpu_spec->cpu_features & feature;
-}
-
-#endif /* __ASSEMBLY__ */
-
-/* CPU kernel features */
-
-/* Retain the 32b definitions for the time being - use bottom half of word */
-#define CPU_FTR_SPLIT_ID_CACHE		ASM_CONST(0x0000000000000001)
-#define CPU_FTR_L2CR			ASM_CONST(0x0000000000000002)
-#define CPU_FTR_SPEC7450		ASM_CONST(0x0000000000000004)
-#define CPU_FTR_ALTIVEC			ASM_CONST(0x0000000000000008)
-#define CPU_FTR_TAU			ASM_CONST(0x0000000000000010)
-#define CPU_FTR_CAN_DOZE		ASM_CONST(0x0000000000000020)
-#define CPU_FTR_USE_TB			ASM_CONST(0x0000000000000040)
-#define CPU_FTR_604_PERF_MON		ASM_CONST(0x0000000000000080)
-#define CPU_FTR_601			ASM_CONST(0x0000000000000100)
-#define CPU_FTR_HPTE_TABLE		ASM_CONST(0x0000000000000200)
-#define CPU_FTR_CAN_NAP			ASM_CONST(0x0000000000000400)
-#define CPU_FTR_L3CR			ASM_CONST(0x0000000000000800)
-#define CPU_FTR_L3_DISABLE_NAP		ASM_CONST(0x0000000000001000)
-#define CPU_FTR_NAP_DISABLE_L2_PR	ASM_CONST(0x0000000000002000)
-#define CPU_FTR_DUAL_PLL_750FX		ASM_CONST(0x0000000000004000)
-
-/* Add the 64b processor unique features in the top half of the word */
-#define CPU_FTR_SLB           		ASM_CONST(0x0000000100000000)
-#define CPU_FTR_16M_PAGE      		ASM_CONST(0x0000000200000000)
-#define CPU_FTR_TLBIEL         		ASM_CONST(0x0000000400000000)
-#define CPU_FTR_NOEXECUTE     		ASM_CONST(0x0000000800000000)
-#define CPU_FTR_NODSISRALIGN  		ASM_CONST(0x0000001000000000)
-#define CPU_FTR_IABR  			ASM_CONST(0x0000002000000000)
-#define CPU_FTR_MMCRA  			ASM_CONST(0x0000004000000000)
-/* unused 				ASM_CONST(0x0000008000000000) */
-#define CPU_FTR_SMT  			ASM_CONST(0x0000010000000000)
-#define CPU_FTR_COHERENT_ICACHE  	ASM_CONST(0x0000020000000000)
-#define CPU_FTR_LOCKLESS_TLBIE		ASM_CONST(0x0000040000000000)
-#define CPU_FTR_MMCRA_SIHV		ASM_CONST(0x0000080000000000)
-#define CPU_FTR_CTRL			ASM_CONST(0x0000100000000000)
-
-#ifndef __ASSEMBLY__
-
-#define COMMON_USER_PPC64	(PPC_FEATURE_32 | PPC_FEATURE_64 | \
-			         PPC_FEATURE_HAS_FPU | PPC_FEATURE_HAS_MMU)
-
-#define CPU_FTR_PPCAS_ARCH_V2_BASE (CPU_FTR_SLB | \
-                                 CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
-                                 CPU_FTR_NODSISRALIGN | CPU_FTR_CTRL)
-
-/* iSeries doesn't support large pages */
-#ifdef CONFIG_PPC_ISERIES
-#define CPU_FTR_PPCAS_ARCH_V2	(CPU_FTR_PPCAS_ARCH_V2_BASE)
-#else
-#define CPU_FTR_PPCAS_ARCH_V2	(CPU_FTR_PPCAS_ARCH_V2_BASE | CPU_FTR_16M_PAGE)
-#endif /* CONFIG_PPC_ISERIES */
-
-#endif /* __ASSEMBLY */
-
-#ifdef __ASSEMBLY__
-
-#define BEGIN_FTR_SECTION		98:
-
-#define END_FTR_SECTION(msk, val)		\
-99:						\
-	.section __ftr_fixup,"a";		\
-	.align 3;				\
-	.llong msk;			        \
-	.llong val;			        \
-	.llong 98b;			        \
-	.llong 99b;	 		        \
-	.previous
-
-#else
-
-#define BEGIN_FTR_SECTION		"98:\n"
-#define END_FTR_SECTION(msk, val)		\
-"99:\n"						\
-"	.section __ftr_fixup,\"a\";\n"		\
-"	.align 3;\n"				\
-"	.llong "#msk";\n"			\
-"	.llong "#val";\n"			\
-"	.llong 98b;\n"			        \
-"	.llong 99b;\n"	 		        \
-"	.previous\n"
-
-#endif /* __ASSEMBLY__ */
-
-#define END_FTR_SECTION_IFSET(msk)	END_FTR_SECTION((msk), (msk))
-#define END_FTR_SECTION_IFCLR(msk)	END_FTR_SECTION((msk), 0)
-
-#endif /* __ASM_PPC_CPUTABLE_H */
-#endif /* __KERNEL__ */
-
diff --git a/include/asm-ppc64/dart.h b/include/asm-ppc64/dart.h
new file mode 100644
index 0000000..cdf8a2d
--- /dev/null
+++ b/include/asm-ppc64/dart.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#ifndef _ASM_DART_H
+#define _ASM_DART_H
+
+
+/* physical base of DART registers */
+#define DART_BASE        0xf8033000UL
+
+/* Offset from base to control register */
+#define DARTCNTL   0
+/* Offset from base to exception register */
+#define DARTEXCP   0x10
+/* Offset from base to TLB tag registers */
+#define DARTTAG    0x1000
+
+
+/* Control Register fields */
+
+/* base address of table (pfn) */
+#define DARTCNTL_BASE_MASK    0xfffff
+#define DARTCNTL_BASE_SHIFT   12
+
+#define DARTCNTL_FLUSHTLB     0x400
+#define DARTCNTL_ENABLE       0x200
+
+/* size of table in pages */
+#define DARTCNTL_SIZE_MASK    0x1ff
+#define DARTCNTL_SIZE_SHIFT   0
+
+
+/* DART table fields */
+
+#define DARTMAP_VALID   0x80000000
+#define DARTMAP_RPNMASK 0x00ffffff
+
+
+#define DART_PAGE_SHIFT		12
+#define DART_PAGE_SIZE		(1 << DART_PAGE_SHIFT)
+#define DART_PAGE_FACTOR	(PAGE_SHIFT - DART_PAGE_SHIFT)
+
+
+#endif
diff --git a/include/asm-ppc64/dma.h b/include/asm-ppc64/dma.h
deleted file mode 100644
index dfd1f69..0000000
--- a/include/asm-ppc64/dma.h
+++ /dev/null
@@ -1,329 +0,0 @@
-/* 
- * linux/include/asm/dma.h: Defines for using and allocating dma channels.
- * Written by Hennus Bergman, 1992.
- * High DMA channel support & info by Hannu Savolainen
- * and John Boyd, Nov. 1992.
- * Changes for ppc sound by Christoph Nadig
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _ASM_DMA_H
-#define _ASM_DMA_H
-
-#include <linux/config.h>
-#include <asm/io.h>
-#include <linux/spinlock.h>
-#include <asm/system.h>
-
-#ifndef MAX_DMA_CHANNELS
-#define MAX_DMA_CHANNELS	8
-#endif
-
-/* The maximum address that we can perform a DMA transfer to on this platform */
-/* Doesn't really apply... */
-#define MAX_DMA_ADDRESS  (~0UL)
-
-#if !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)
-
-#define dma_outb	outb
-#define dma_inb		inb
-
-/*
- * NOTES about DMA transfers:
- *
- *  controller 1: channels 0-3, byte operations, ports 00-1F
- *  controller 2: channels 4-7, word operations, ports C0-DF
- *
- *  - ALL registers are 8 bits only, regardless of transfer size
- *  - channel 4 is not used - cascades 1 into 2.
- *  - channels 0-3 are byte - addresses/counts are for physical bytes
- *  - channels 5-7 are word - addresses/counts are for physical words
- *  - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
- *  - transfer count loaded to registers is 1 less than actual count
- *  - controller 2 offsets are all even (2x offsets for controller 1)
- *  - page registers for 5-7 don't use data bit 0, represent 128K pages
- *  - page registers for 0-3 use bit 0, represent 64K pages
- *
- * On PReP, DMA transfers are limited to the lower 16MB of _physical_ memory.  
- * On CHRP, the W83C553F (and VLSI Tollgate?) support full 32 bit addressing.
- * Note that addresses loaded into registers must be _physical_ addresses,
- * not logical addresses (which may differ if paging is active).
- *
- *  Address mapping for channels 0-3:
- *
- *   A23 ... A16 A15 ... A8  A7 ... A0    (Physical addresses)
- *    |  ...  |   |  ... |   |  ... |
- *    |  ...  |   |  ... |   |  ... |
- *    |  ...  |   |  ... |   |  ... |
- *   P7  ...  P0  A7 ... A0  A7 ... A0   
- * |    Page    | Addr MSB | Addr LSB |   (DMA registers)
- *
- *  Address mapping for channels 5-7:
- *
- *   A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0    (Physical addresses)
- *    |  ...  |   \   \   ... \  \  \  ... \  \
- *    |  ...  |    \   \   ... \  \  \  ... \  (not used)
- *    |  ...  |     \   \   ... \  \  \  ... \
- *   P7  ...  P1 (0) A7 A6  ... A0 A7 A6 ... A0   
- * |      Page      |  Addr MSB   |  Addr LSB  |   (DMA registers)
- *
- * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
- * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
- * the hardware level, so odd-byte transfers aren't possible).
- *
- * Transfer count (_not # bytes_) is limited to 64K, represented as actual
- * count - 1 : 64K => 0xFFFF, 1 => 0x0000.  Thus, count is always 1 or more,
- * and up to 128K bytes may be transferred on channels 5-7 in one operation. 
- *
- */
-
-/* 8237 DMA controllers */
-#define IO_DMA1_BASE	0x00	/* 8 bit slave DMA, channels 0..3 */
-#define IO_DMA2_BASE	0xC0	/* 16 bit master DMA, ch 4(=slave input)..7 */
-
-/* DMA controller registers */
-#define DMA1_CMD_REG		0x08	/* command register (w) */
-#define DMA1_STAT_REG		0x08	/* status register (r) */
-#define DMA1_REQ_REG            0x09    /* request register (w) */
-#define DMA1_MASK_REG		0x0A	/* single-channel mask (w) */
-#define DMA1_MODE_REG		0x0B	/* mode register (w) */
-#define DMA1_CLEAR_FF_REG	0x0C	/* clear pointer flip-flop (w) */
-#define DMA1_TEMP_REG           0x0D    /* Temporary Register (r) */
-#define DMA1_RESET_REG		0x0D	/* Master Clear (w) */
-#define DMA1_CLR_MASK_REG       0x0E    /* Clear Mask */
-#define DMA1_MASK_ALL_REG       0x0F    /* all-channels mask (w) */
-
-#define DMA2_CMD_REG		0xD0	/* command register (w) */
-#define DMA2_STAT_REG		0xD0	/* status register (r) */
-#define DMA2_REQ_REG            0xD2    /* request register (w) */
-#define DMA2_MASK_REG		0xD4	/* single-channel mask (w) */
-#define DMA2_MODE_REG		0xD6	/* mode register (w) */
-#define DMA2_CLEAR_FF_REG	0xD8	/* clear pointer flip-flop (w) */
-#define DMA2_TEMP_REG           0xDA    /* Temporary Register (r) */
-#define DMA2_RESET_REG		0xDA	/* Master Clear (w) */
-#define DMA2_CLR_MASK_REG       0xDC    /* Clear Mask */
-#define DMA2_MASK_ALL_REG       0xDE    /* all-channels mask (w) */
-
-#define DMA_ADDR_0              0x00    /* DMA address registers */
-#define DMA_ADDR_1              0x02
-#define DMA_ADDR_2              0x04
-#define DMA_ADDR_3              0x06
-#define DMA_ADDR_4              0xC0
-#define DMA_ADDR_5              0xC4
-#define DMA_ADDR_6              0xC8
-#define DMA_ADDR_7              0xCC
-
-#define DMA_CNT_0               0x01    /* DMA count registers */
-#define DMA_CNT_1               0x03
-#define DMA_CNT_2               0x05
-#define DMA_CNT_3               0x07
-#define DMA_CNT_4               0xC2
-#define DMA_CNT_5               0xC6
-#define DMA_CNT_6               0xCA
-#define DMA_CNT_7               0xCE
-
-#define DMA_LO_PAGE_0              0x87    /* DMA page registers */
-#define DMA_LO_PAGE_1              0x83
-#define DMA_LO_PAGE_2              0x81
-#define DMA_LO_PAGE_3              0x82
-#define DMA_LO_PAGE_5              0x8B
-#define DMA_LO_PAGE_6              0x89
-#define DMA_LO_PAGE_7              0x8A
-
-#define DMA_HI_PAGE_0              0x487    /* DMA page registers */
-#define DMA_HI_PAGE_1              0x483
-#define DMA_HI_PAGE_2              0x481
-#define DMA_HI_PAGE_3              0x482
-#define DMA_HI_PAGE_5              0x48B
-#define DMA_HI_PAGE_6              0x489
-#define DMA_HI_PAGE_7              0x48A
-
-#define DMA1_EXT_REG               0x40B
-#define DMA2_EXT_REG               0x4D6
-
-#define DMA_MODE_READ	0x44	/* I/O to memory, no autoinit, increment, single mode */
-#define DMA_MODE_WRITE	0x48	/* memory to I/O, no autoinit, increment, single mode */
-#define DMA_MODE_CASCADE 0xC0   /* pass thru DREQ->HRQ, DACK<-HLDA only */
-
-#define DMA_AUTOINIT   	 0x10
-
-extern spinlock_t  dma_spin_lock;
-
-static __inline__ unsigned long claim_dma_lock(void)
-{
-	unsigned long flags;
-	spin_lock_irqsave(&dma_spin_lock, flags);
-	return flags;
-}
-
-static __inline__ void release_dma_lock(unsigned long flags)
-{
-	spin_unlock_irqrestore(&dma_spin_lock, flags);
-}
-
-/* enable/disable a specific DMA channel */
-static __inline__ void enable_dma(unsigned int dmanr)
-{
-	unsigned char ucDmaCmd=0x00;
-
-	if (dmanr != 4)
-	{
-		dma_outb(0, DMA2_MASK_REG);  /* This may not be enabled */
-		dma_outb(ucDmaCmd, DMA2_CMD_REG);  /* Enable group */
-	}
-	if (dmanr<=3)
-	{
-		dma_outb(dmanr,  DMA1_MASK_REG);
-		dma_outb(ucDmaCmd, DMA1_CMD_REG);  /* Enable group */
-	} else
-	{
-		dma_outb(dmanr & 3,  DMA2_MASK_REG);
-	}
-}
-
-static __inline__ void disable_dma(unsigned int dmanr)
-{
-	if (dmanr<=3)
-		dma_outb(dmanr | 4,  DMA1_MASK_REG);
-	else
-		dma_outb((dmanr & 3) | 4,  DMA2_MASK_REG);
-}
-
-/* Clear the 'DMA Pointer Flip Flop'.
- * Write 0 for LSB/MSB, 1 for MSB/LSB access.
- * Use this once to initialize the FF to a known state.
- * After that, keep track of it. :-)
- * --- In order to do that, the DMA routines below should ---
- * --- only be used while interrupts are disabled! ---
- */
-static __inline__ void clear_dma_ff(unsigned int dmanr)
-{
-	if (dmanr<=3)
-		dma_outb(0,  DMA1_CLEAR_FF_REG);
-	else
-		dma_outb(0,  DMA2_CLEAR_FF_REG);
-}
-
-/* set mode (above) for a specific DMA channel */
-static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
-{
-	if (dmanr<=3)
-		dma_outb(mode | dmanr,  DMA1_MODE_REG);
-	else
-		dma_outb(mode | (dmanr&3),  DMA2_MODE_REG);
-}
-
-/* Set only the page register bits of the transfer address.
- * This is used for successive transfers when we know the contents of
- * the lower 16 bits of the DMA current address register, but a 64k boundary
- * may have been crossed.
- */
-static __inline__ void set_dma_page(unsigned int dmanr, int pagenr)
-{
-	switch(dmanr) {
-		case 0:
-			dma_outb(pagenr, DMA_LO_PAGE_0);
-                        dma_outb(pagenr>>8, DMA_HI_PAGE_0);
-			break;
-		case 1:
-			dma_outb(pagenr, DMA_LO_PAGE_1);
-                        dma_outb(pagenr>>8, DMA_HI_PAGE_1);
-			break;
-		case 2:
-			dma_outb(pagenr, DMA_LO_PAGE_2);
-			dma_outb(pagenr>>8, DMA_HI_PAGE_2); 
-			break;
-		case 3:
-			dma_outb(pagenr, DMA_LO_PAGE_3);
-			dma_outb(pagenr>>8, DMA_HI_PAGE_3); 
-			break;
-	        case 5:
-		        dma_outb(pagenr & 0xfe, DMA_LO_PAGE_5);
-                        dma_outb(pagenr>>8, DMA_HI_PAGE_5);
-			break;
-		case 6:
-		        dma_outb(pagenr & 0xfe, DMA_LO_PAGE_6);
-			dma_outb(pagenr>>8, DMA_HI_PAGE_6);
-			break;
-		case 7:
-		        dma_outb(pagenr & 0xfe, DMA_LO_PAGE_7);
-			dma_outb(pagenr>>8, DMA_HI_PAGE_7);
-		  break;
-	}
-}
-
-
-/* Set transfer address & page bits for specific DMA channel.
- * Assumes dma flipflop is clear.
- */
-static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys)
-{
-	if (dmanr <= 3)  {
-	    dma_outb( phys & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
-            dma_outb( (phys>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
-	}  else  {
-	    dma_outb( (phys>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
-	    dma_outb( (phys>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
-	}
-	set_dma_page(dmanr, phys>>16);
-}
-
-
-/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
- * a specific DMA channel.
- * You must ensure the parameters are valid.
- * NOTE: from a manual: "the number of transfers is one more
- * than the initial word count"! This is taken into account.
- * Assumes dma flip-flop is clear.
- * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
- */
-static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
-{
-        count--;
-	if (dmanr <= 3)  {
-	    dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
-	    dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
-        } else {
-	    dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
-	    dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
-        }
-}
-
-
-/* Get DMA residue count. After a DMA transfer, this
- * should return zero. Reading this while a DMA transfer is
- * still in progress will return unpredictable results.
- * If called before the channel has been used, it may return 1.
- * Otherwise, it returns the number of _bytes_ left to transfer.
- *
- * Assumes DMA flip-flop is clear.
- */
-static __inline__ int get_dma_residue(unsigned int dmanr)
-{
-	unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
-					 : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
-
-	/* using short to get 16-bit wrap around */
-	unsigned short count;
-
-	count = 1 + dma_inb(io_port);
-	count += dma_inb(io_port) << 8;
-	
-	return (dmanr <= 3)? count : (count<<1);
-}
-
-/* These are in kernel/dma.c: */
-extern int request_dma(unsigned int dmanr, const char * device_id);	/* reserve a DMA channel */
-extern void free_dma(unsigned int dmanr);	/* release it again */
-
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;                                        
-#else                                                         
-#define isa_dma_bridge_buggy   (0)
-#endif
-#endif	/* !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) */
-#endif /* _ASM_DMA_H */
diff --git a/include/asm-ppc64/futex.h b/include/asm-ppc64/futex.h
index cb2640b..266b460d 100644
--- a/include/asm-ppc64/futex.h
+++ b/include/asm-ppc64/futex.h
@@ -5,7 +5,7 @@
 
 #include <linux/futex.h>
 #include <asm/errno.h>
-#include <asm/memory.h>
+#include <asm/synch.h>
 #include <asm/uaccess.h>
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
diff --git a/include/asm-ppc64/hardirq.h b/include/asm-ppc64/hardirq.h
deleted file mode 100644
index 4ee72bb..0000000
--- a/include/asm-ppc64/hardirq.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef __ASM_HARDIRQ_H
-#define __ASM_HARDIRQ_H
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/cache.h>
-#include <linux/preempt.h>
-
-typedef struct {
-	unsigned int __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */
-
-static inline void ack_bad_irq(int irq)
-{
-	printk(KERN_CRIT "illegal vector %d received!\n", irq);
-	BUG();
-}
-
-#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-ppc64/iSeries/iSeries_pci.h b/include/asm-ppc64/iSeries/iSeries_pci.h
index 575f611..a4d88b4 100644
--- a/include/asm-ppc64/iSeries/iSeries_pci.h
+++ b/include/asm-ppc64/iSeries/iSeries_pci.h
@@ -30,21 +30,19 @@
  * End Change Activity
  */
 
-#include <asm/iSeries/HvCallPci.h>
 #include <asm/abs_addr.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
 
 struct pci_dev;				/* For Forward Reference */
-struct iSeries_Device_Node;
 
 /*
- * Gets iSeries Bus, SubBus, DevFn using iSeries_Device_Node structure
+ * Gets iSeries Bus, SubBus, DevFn using device_node structure
  */
 
-#define ISERIES_BUS(DevPtr)	DevPtr->DsaAddr.Dsa.busNumber
-#define ISERIES_SUBBUS(DevPtr)	DevPtr->DsaAddr.Dsa.subBusNumber
-#define ISERIES_DEVICE(DevPtr)	DevPtr->DsaAddr.Dsa.deviceId
-#define ISERIES_DSA(DevPtr)	DevPtr->DsaAddr.DsaAddr
-#define ISERIES_DEVNODE(PciDev)	((struct iSeries_Device_Node *)PciDev->sysdata)
+#define ISERIES_BUS(DevPtr)	PCI_DN(DevPtr)->DsaAddr.Dsa.busNumber
+#define ISERIES_SUBBUS(DevPtr)	PCI_DN(DevPtr)->DsaAddr.Dsa.subBusNumber
+#define ISERIES_DEVNODE(PciDev)	((struct device_node *)PciDev->sysdata)
 
 #define EADsMaxAgents 7
 
@@ -67,22 +65,6 @@
 #define ISERIES_HV_ADDR(virtaddr)	\
 	(0x8000000000000000 | virt_to_abs(virtaddr))
 
-/*
- * iSeries Device Information
- */
-struct iSeries_Device_Node {
-	struct list_head Device_List;
-	struct pci_dev	*PciDev;
-	union HvDsaMap	DsaAddr;	/* Direct Select Address */
-					/* busNumber, subBusNumber, */
-					/* deviceId, barNumber */
-	int		DevFn;		/* Linux devfn */
-	int		Irq;		/* Assigned IRQ */
-	int		Flags;		/* Possible flags(disable/bist)*/
-	u8		LogicalSlot;	/* Hv Slot Index for Tces */
-	struct iommu_table *iommu_table;/* Device TCE Table */
-};
-
 extern void	iSeries_Device_Information(struct pci_dev*, int);
 
 #endif /* _ISERIES_64_PCI_H */
diff --git a/include/asm-ppc64/io.h b/include/asm-ppc64/io.h
index 59c958a..bd7c953 100644
--- a/include/asm-ppc64/io.h
+++ b/include/asm-ppc64/io.h
@@ -15,7 +15,7 @@
 #ifdef CONFIG_PPC_ISERIES 
 #include <asm/iSeries/iSeries_io.h>
 #endif  
-#include <asm/memory.h>
+#include <asm/synch.h>
 #include <asm/delay.h>
 
 #include <asm-generic/iomap.h>
diff --git a/include/asm-ppc64/iommu.h b/include/asm-ppc64/iommu.h
index 72dcf81..d096faf 100644
--- a/include/asm-ppc64/iommu.h
+++ b/include/asm-ppc64/iommu.h
@@ -1,5 +1,4 @@
 /*
- * iommu.h
  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
  * Rewrite, cleanup:
  * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
@@ -29,44 +28,11 @@
 
 /*
  * IOMAP_MAX_ORDER defines the largest contiguous block
- * of dma (tce) space we can get.  IOMAP_MAX_ORDER = 13
+ * of dma space we can get.  IOMAP_MAX_ORDER = 13
  * allows up to 2**12 pages (4096 * 4096) = 16 MB
  */
 #define IOMAP_MAX_ORDER 13
 
-/*
- * Tces come in two formats, one for the virtual bus and a different
- * format for PCI
- */
-#define TCE_VB  0
-#define TCE_PCI 1
-
-/* tce_entry
- * Used by pSeries (SMP) and iSeries/pSeries LPAR, but there it's
- * abstracted so layout is irrelevant.
- */
-union tce_entry {
-   	unsigned long te_word;
-	struct {
-		unsigned int  tb_cacheBits :6;	/* Cache hash bits - not used */
-		unsigned int  tb_rsvd      :6;
-		unsigned long tb_rpn       :40;	/* Real page number */
-		unsigned int  tb_valid     :1;	/* Tce is valid (vb only) */
-		unsigned int  tb_allio     :1;	/* Tce is valid for all lps (vb only) */
-		unsigned int  tb_lpindex   :8;	/* LpIndex for user of TCE (vb only) */
-		unsigned int  tb_pciwr     :1;	/* Write allowed (pci only) */
-		unsigned int  tb_rdwr      :1;	/* Read allowed  (pci), Write allowed (vb) */
-	} te_bits;
-#define te_cacheBits te_bits.tb_cacheBits
-#define te_rpn       te_bits.tb_rpn
-#define te_valid     te_bits.tb_valid
-#define te_allio     te_bits.tb_allio
-#define te_lpindex   te_bits.tb_lpindex
-#define te_pciwr     te_bits.tb_pciwr
-#define te_rdwr      te_bits.tb_rdwr
-};
-
-
 struct iommu_table {
 	unsigned long  it_busno;     /* Bus number this table belongs to */
 	unsigned long  it_size;      /* Size of iommu table in entries */
@@ -83,6 +49,7 @@
 };
 
 struct scatterlist;
+struct device_node;
 
 #ifdef CONFIG_PPC_MULTIPLATFORM
 
@@ -104,9 +71,8 @@
 
 #ifdef CONFIG_PPC_ISERIES
 
-struct iSeries_Device_Node;
 /* Creates table for an individual device node */
-extern void iommu_devnode_init_iSeries(struct iSeries_Device_Node *dn);
+extern void iommu_devnode_init_iSeries(struct device_node *dn);
 
 #endif /* CONFIG_PPC_ISERIES */
 
diff --git a/include/asm-ppc64/kmap_types.h b/include/asm-ppc64/kmap_types.h
deleted file mode 100644
index fd15746..0000000
--- a/include/asm-ppc64/kmap_types.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifdef __KERNEL__
-#ifndef _ASM_KMAP_TYPES_H
-#define _ASM_KMAP_TYPES_H
-
-enum km_type {
-	KM_BOUNCE_READ,
-	KM_SKB_SUNRPC_DATA,
-	KM_SKB_DATA_SOFTIRQ,
-	KM_USER0,
-	KM_USER1,
-	KM_BIO_SRC_IRQ,
-	KM_BIO_DST_IRQ,
-	KM_PTE0,
-	KM_PTE1,
-	KM_IRQ0,
-	KM_IRQ1,
-	KM_SOFTIRQ0,
-	KM_SOFTIRQ1,	
-	KM_TYPE_NR
-};
-
-#endif
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/machdep.h b/include/asm-ppc64/machdep.h
index 8027160..d35d9d3 100644
--- a/include/asm-ppc64/machdep.h
+++ b/include/asm-ppc64/machdep.h
@@ -56,9 +56,8 @@
 				       unsigned long vflags,
 				       unsigned long rflags);
 	long		(*hpte_remove)(unsigned long hpte_group);
-	void		(*flush_hash_range)(unsigned long context,
-					    unsigned long number,
-					    int local);
+	void		(*flush_hash_range)(unsigned long number, int local);
+
 	/* special for kexec, to be called in real mode, linar mapping is
 	 * destroyed as well */
 	void		(*hpte_clear_all)(void);
diff --git a/include/asm-ppc64/memory.h b/include/asm-ppc64/memory.h
deleted file mode 100644
index af53ffb..0000000
--- a/include/asm-ppc64/memory.h
+++ /dev/null
@@ -1,61 +0,0 @@
-#ifndef _ASM_PPC64_MEMORY_H_ 
-#define _ASM_PPC64_MEMORY_H_ 
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-
-/*
- * Arguably the bitops and *xchg operations don't imply any memory barrier
- * or SMP ordering, but in fact a lot of drivers expect them to imply
- * both, since they do on x86 cpus.
- */
-#ifdef CONFIG_SMP
-#define EIEIO_ON_SMP	"eieio\n"
-#define ISYNC_ON_SMP	"\n\tisync"
-#define SYNC_ON_SMP	"lwsync\n\t"
-#else
-#define EIEIO_ON_SMP
-#define ISYNC_ON_SMP
-#define SYNC_ON_SMP
-#endif
-
-static inline void eieio(void)
-{
-	__asm__ __volatile__ ("eieio" : : : "memory");
-}
-
-static inline void isync(void)
-{
-	__asm__ __volatile__ ("isync" : : : "memory");
-}
-
-#ifdef CONFIG_SMP
-#define eieio_on_smp()	eieio()
-#define isync_on_smp()	isync()
-#else
-#define eieio_on_smp()	__asm__ __volatile__("": : :"memory")
-#define isync_on_smp()	__asm__ __volatile__("": : :"memory")
-#endif
-
-/* Macros for adjusting thread priority (hardware multi-threading) */
-#define HMT_very_low()    asm volatile("or 31,31,31   # very low priority")
-#define HMT_low()	asm volatile("or 1,1,1		# low priority")
-#define HMT_medium_low()  asm volatile("or 6,6,6      # medium low priority")
-#define HMT_medium()	asm volatile("or 2,2,2		# medium priority")
-#define HMT_medium_high() asm volatile("or 5,5,5      # medium high priority")
-#define HMT_high()	asm volatile("or 3,3,3		# high priority")
-
-#define HMT_VERY_LOW    "\tor   31,31,31        # very low priority\n"
-#define HMT_LOW		"\tor	1,1,1		# low priority\n"
-#define HMT_MEDIUM_LOW  "\tor   6,6,6           # medium low priority\n"
-#define HMT_MEDIUM	"\tor	2,2,2		# medium priority\n"
-#define HMT_MEDIUM_HIGH "\tor   5,5,5           # medium high priority\n"
-#define HMT_HIGH	"\tor	3,3,3		# high priority\n"
-
-#endif
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h
index 7bc42eb..e0505ac 100644
--- a/include/asm-ppc64/mmu.h
+++ b/include/asm-ppc64/mmu.h
@@ -14,6 +14,7 @@
 #define _PPC64_MMU_H_
 
 #include <linux/config.h>
+#include <asm/ppc_asm.h> /* for ASM_CONST */
 #include <asm/page.h>
 
 /*
@@ -29,7 +30,7 @@
 
 /* Location of cpu0's segment table */
 #define STAB0_PAGE	0x6
-#define STAB0_PHYS_ADDR	(STAB0_PAGE<<PAGE_SHIFT)
+#define STAB0_PHYS_ADDR	(STAB0_PAGE<<12)
 
 #ifndef __ASSEMBLY__
 extern char initial_stab[];
@@ -205,6 +206,10 @@
 			       unsigned long prpn,
 			       unsigned long vflags, unsigned long rflags);
 
+extern long iSeries_hpte_bolt_or_insert(unsigned long hpte_group,
+		unsigned long va, unsigned long prpn,
+		unsigned long vflags, unsigned long rflags);
+
 extern void stabs_alloc(void);
 
 #endif /* __ASSEMBLY__ */
diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h
index a15422b..d404431 100644
--- a/include/asm-ppc64/page.h
+++ b/include/asm-ppc64/page.h
@@ -11,13 +11,7 @@
  */
 
 #include <linux/config.h>
-
-#ifdef __ASSEMBLY__
-  #define ASM_CONST(x) x
-#else
-  #define __ASM_CONST(x) x##UL
-  #define ASM_CONST(x) __ASM_CONST(x)
-#endif
+#include <asm/ppc_asm.h> /* for ASM_CONST */
 
 /* PAGE_SHIFT determines the page size */
 #define PAGE_SHIFT	12
diff --git a/include/asm-ppc64/pci-bridge.h b/include/asm-ppc64/pci-bridge.h
index d899138..8ca5fce 100644
--- a/include/asm-ppc64/pci-bridge.h
+++ b/include/asm-ppc64/pci-bridge.h
@@ -4,6 +4,8 @@
 
 #include <linux/pci.h>
 
+#include <asm/iSeries/HvCallPci.h>
+
 /*
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -71,6 +73,14 @@
 	struct	iommu_table *iommu_table;	/* for phb's or bridges */
 	struct	pci_dev *pcidev;	/* back-pointer to the pci device */
 	struct	device_node *node;	/* back-pointer to the device_node */
+#ifdef CONFIG_PPC_ISERIES
+	union HvDsaMap	DsaAddr;	/* Direct Select Address */
+					/* busNumber, subBusNumber, */
+					/* deviceId, barNumber */
+	int		Irq;		/* Assigned IRQ */
+	int		Flags;		/* Possible flags(disable/bist)*/
+	u8		LogicalSlot;	/* Hv Slot Index for Tces */
+#endif
 	u32	config_space[16];	/* saved PCI config space */
 };
 
diff --git a/include/asm-ppc64/ppc_asm.h b/include/asm-ppc64/ppc_asm.h
deleted file mode 100644
index 9031d8a..0000000
--- a/include/asm-ppc64/ppc_asm.h
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * arch/ppc64/kernel/ppc_asm.h
- *
- * Definitions used by various bits of low-level assembly code on PowerPC.
- *
- * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- */
-
-#ifndef _PPC64_PPC_ASM_H
-#define _PPC64_PPC_ASM_H
-/*
- * Macros for storing registers into and loading registers from
- * exception frames.
- */
-#define SAVE_GPR(n, base)	std	n,GPR0+8*(n)(base)
-#define SAVE_2GPRS(n, base)	SAVE_GPR(n, base); SAVE_GPR(n+1, base)
-#define SAVE_4GPRS(n, base)	SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
-#define SAVE_8GPRS(n, base)	SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
-#define SAVE_10GPRS(n, base)	SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
-#define REST_GPR(n, base)	ld	n,GPR0+8*(n)(base)
-#define REST_2GPRS(n, base)	REST_GPR(n, base); REST_GPR(n+1, base)
-#define REST_4GPRS(n, base)	REST_2GPRS(n, base); REST_2GPRS(n+2, base)
-#define REST_8GPRS(n, base)	REST_4GPRS(n, base); REST_4GPRS(n+4, base)
-#define REST_10GPRS(n, base)	REST_8GPRS(n, base); REST_2GPRS(n+8, base)
-
-#define SAVE_NVGPRS(base)	SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
-#define REST_NVGPRS(base)	REST_8GPRS(14, base); REST_10GPRS(22, base)
-
-#define SAVE_FPR(n, base)	stfd	n,THREAD_FPR0+8*(n)(base)
-#define SAVE_2FPRS(n, base)	SAVE_FPR(n, base); SAVE_FPR(n+1, base)
-#define SAVE_4FPRS(n, base)	SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
-#define SAVE_8FPRS(n, base)	SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
-#define SAVE_16FPRS(n, base)	SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
-#define SAVE_32FPRS(n, base)	SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
-#define REST_FPR(n, base)	lfd	n,THREAD_FPR0+8*(n)(base)
-#define REST_2FPRS(n, base)	REST_FPR(n, base); REST_FPR(n+1, base)
-#define REST_4FPRS(n, base)	REST_2FPRS(n, base); REST_2FPRS(n+2, base)
-#define REST_8FPRS(n, base)	REST_4FPRS(n, base); REST_4FPRS(n+4, base)
-#define REST_16FPRS(n, base)	REST_8FPRS(n, base); REST_8FPRS(n+8, base)
-#define REST_32FPRS(n, base)	REST_16FPRS(n, base); REST_16FPRS(n+16, base)
-
-#define SAVE_VR(n,b,base)	li b,THREAD_VR0+(16*(n));  stvx n,b,base
-#define SAVE_2VRS(n,b,base)	SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
-#define SAVE_4VRS(n,b,base)	SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
-#define SAVE_8VRS(n,b,base)	SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
-#define SAVE_16VRS(n,b,base)	SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
-#define SAVE_32VRS(n,b,base)	SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
-#define REST_VR(n,b,base)	li b,THREAD_VR0+(16*(n)); lvx n,b,base
-#define REST_2VRS(n,b,base)	REST_VR(n,b,base); REST_VR(n+1,b,base)
-#define REST_4VRS(n,b,base)	REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
-#define REST_8VRS(n,b,base)	REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
-#define REST_16VRS(n,b,base)	REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
-#define REST_32VRS(n,b,base)	REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
-
-/* Macros to adjust thread priority for Iseries hardware multithreading */
-#define HMT_LOW		or 1,1,1
-#define HMT_MEDIUM	or 2,2,2
-#define HMT_HIGH	or 3,3,3
-
-/* Insert the high 32 bits of the MSR into what will be the new
-   MSR (via SRR1 and rfid)  This preserves the MSR.SF and MSR.ISF
-   bits. */
-
-#define FIX_SRR1(ra, rb)	\
-	mr	rb,ra;		\
-	mfmsr	ra;		\
-	rldimi	ra,rb,0,32
-
-#define CLR_TOP32(r)	rlwinm	(r),(r),0,0,31	/* clear top 32 bits */
-
-/* 
- * LOADADDR( rn, name )
- *   loads the address of 'name' into 'rn'
- *
- * LOADBASE( rn, name )
- *   loads the address (less the low 16 bits) of 'name' into 'rn'
- *   suitable for base+disp addressing
- */
-#define LOADADDR(rn,name) \
-	lis	rn,name##@highest;	\
-	ori	rn,rn,name##@higher;	\
-	rldicr	rn,rn,32,31;		\
-	oris	rn,rn,name##@h;		\
-	ori	rn,rn,name##@l
-
-#define LOADBASE(rn,name) \
-	lis	rn,name@highest;	\
-	ori	rn,rn,name@higher;	\
-	rldicr	rn,rn,32,31;		\
-	oris	rn,rn,name@ha
-
-
-#define SET_REG_TO_CONST(reg, value)	         	\
-	lis     reg,(((value)>>48)&0xFFFF);             \
-	ori     reg,reg,(((value)>>32)&0xFFFF);         \
-	rldicr  reg,reg,32,31;                          \
-	oris    reg,reg,(((value)>>16)&0xFFFF);         \
-	ori     reg,reg,((value)&0xFFFF);
-
-#define SET_REG_TO_LABEL(reg, label)	         	\
-	lis     reg,(label)@highest;                    \
-	ori     reg,reg,(label)@higher;                 \
-	rldicr  reg,reg,32,31;                          \
-	oris    reg,reg,(label)@h;                      \
-	ori     reg,reg,(label)@l;
-
-
-/* PPPBBB - DRENG  If KERNELBASE is always 0xC0...,
- * Then we can easily do this with one asm insn. -Peter
- */
-#define tophys(rd,rs)                           \
-        lis     rd,((KERNELBASE>>48)&0xFFFF);   \
-        rldicr  rd,rd,32,31;                    \
-        sub     rd,rs,rd
-
-#define tovirt(rd,rs)                           \
-        lis     rd,((KERNELBASE>>48)&0xFFFF);   \
-        rldicr  rd,rd,32,31;                    \
-        add     rd,rs,rd
-
-/* Condition Register Bit Fields */
-
-#define	cr0	0
-#define	cr1	1
-#define	cr2	2
-#define	cr3	3
-#define	cr4	4
-#define	cr5	5
-#define	cr6	6
-#define	cr7	7
-
-
-/* General Purpose Registers (GPRs) */
-
-#define	r0	0
-#define	r1	1
-#define	r2	2
-#define	r3	3
-#define	r4	4
-#define	r5	5
-#define	r6	6
-#define	r7	7
-#define	r8	8
-#define	r9	9
-#define	r10	10
-#define	r11	11
-#define	r12	12
-#define	r13	13
-#define	r14	14
-#define	r15	15
-#define	r16	16
-#define	r17	17
-#define	r18	18
-#define	r19	19
-#define	r20	20
-#define	r21	21
-#define	r22	22
-#define	r23	23
-#define	r24	24
-#define	r25	25
-#define	r26	26
-#define	r27	27
-#define	r28	28
-#define	r29	29
-#define	r30	30
-#define	r31	31
-
-
-/* Floating Point Registers (FPRs) */
-
-#define	fr0	0
-#define	fr1	1
-#define	fr2	2
-#define	fr3	3
-#define	fr4	4
-#define	fr5	5
-#define	fr6	6
-#define	fr7	7
-#define	fr8	8
-#define	fr9	9
-#define	fr10	10
-#define	fr11	11
-#define	fr12	12
-#define	fr13	13
-#define	fr14	14
-#define	fr15	15
-#define	fr16	16
-#define	fr17	17
-#define	fr18	18
-#define	fr19	19
-#define	fr20	20
-#define	fr21	21
-#define	fr22	22
-#define	fr23	23
-#define	fr24	24
-#define	fr25	25
-#define	fr26	26
-#define	fr27	27
-#define	fr28	28
-#define	fr29	29
-#define	fr30	30
-#define	fr31	31
-
-#define	vr0	0
-#define	vr1	1
-#define	vr2	2
-#define	vr3	3
-#define	vr4	4
-#define	vr5	5
-#define	vr6	6
-#define	vr7	7
-#define	vr8	8
-#define	vr9	9
-#define	vr10	10
-#define	vr11	11
-#define	vr12	12
-#define	vr13	13
-#define	vr14	14
-#define	vr15	15
-#define	vr16	16
-#define	vr17	17
-#define	vr18	18
-#define	vr19	19
-#define	vr20	20
-#define	vr21	21
-#define	vr22	22
-#define	vr23	23
-#define	vr24	24
-#define	vr25	25
-#define	vr26	26
-#define	vr27	27
-#define	vr28	28
-#define	vr29	29
-#define	vr30	30
-#define	vr31	31
-
-#endif /* _PPC64_PPC_ASM_H */
diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h
index 4146189..6447fbe 100644
--- a/include/asm-ppc64/processor.h
+++ b/include/asm-ppc64/processor.h
@@ -291,6 +291,7 @@
 #define IC_OPEN_PIC   1
 #define IC_PPC_XIC    2
 #define IC_BPA_IIC    3
+#define IC_ISERIES    4
 
 #define XGLUE(a,b) a##b
 #define GLUE(a,b) XGLUE(a,b)
@@ -368,6 +369,14 @@
 #define mfasr()		({unsigned long rval; \
 			asm volatile("mfasr %0" : "=r" (rval)); rval;})
 
+/* Macros for adjusting thread priority (hardware multi-threading) */
+#define HMT_very_low()    asm volatile("or 31,31,31   # very low priority")
+#define HMT_low()	asm volatile("or 1,1,1		# low priority")
+#define HMT_medium_low()  asm volatile("or 6,6,6      # medium low priority")
+#define HMT_medium()	asm volatile("or 2,2,2		# medium priority")
+#define HMT_medium_high() asm volatile("or 5,5,5      # medium high priority")
+#define HMT_high()	asm volatile("or 3,3,3		# high priority")
+
 static inline void set_tb(unsigned int upper, unsigned int lower)
 {
 	mttbl(0);
diff --git a/include/asm-ppc64/prom.h b/include/asm-ppc64/prom.h
index c02ec1d..cf0284e 100644
--- a/include/asm-ppc64/prom.h
+++ b/include/asm-ppc64/prom.h
@@ -137,6 +137,9 @@
 	struct  kref kref;
 	unsigned long _flags;
 	void	*data;
+#ifdef CONFIG_PPC_ISERIES
+	struct list_head Device_List;
+#endif
 };
 
 extern struct device_node *of_chosen;
diff --git a/include/asm-ppc64/sections.h b/include/asm-ppc64/sections.h
deleted file mode 100644
index 308ca6f..0000000
--- a/include/asm-ppc64/sections.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef _PPC64_SECTIONS_H
-#define _PPC64_SECTIONS_H
-
-extern char _end[];
-
-#include <asm-generic/sections.h>
-
-#define __pmac
-#define __pmacdata
-
-#define __prep
-#define __prepdata
-
-#define __chrp
-#define __chrpdata
-
-#define __openfirmware
-#define __openfirmwaredata
-
-
-static inline int in_kernel_text(unsigned long addr)
-{
-	if (addr >= (unsigned long)_stext && addr < (unsigned long)__init_end)
-		return 1;
-
-	return 0;
-}
-
-#endif
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h
index 375015c..1fbdc9f 100644
--- a/include/asm-ppc64/system.h
+++ b/include/asm-ppc64/system.h
@@ -13,7 +13,7 @@
 #include <asm/page.h>
 #include <asm/processor.h>
 #include <asm/hw_irq.h>
-#include <asm/memory.h>
+#include <asm/synch.h>
 
 /*
  * Memory barrier.
@@ -48,7 +48,7 @@
 #ifdef CONFIG_SMP
 #define smp_mb()	mb()
 #define smp_rmb()	rmb()
-#define smp_wmb()	__asm__ __volatile__ ("eieio" : : : "memory")
+#define smp_wmb()	eieio()
 #define smp_read_barrier_depends()  read_barrier_depends()
 #else
 #define smp_mb()	__asm__ __volatile__("": : :"memory")
diff --git a/include/asm-ppc64/tce.h b/include/asm-ppc64/tce.h
new file mode 100644
index 0000000..d40b6b4
--- /dev/null
+++ b/include/asm-ppc64/tce.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
+ * Rewrite, cleanup:
+ * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#ifndef _ASM_TCE_H
+#define _ASM_TCE_H
+
+/*
+ * Tces come in two formats, one for the virtual bus and a different
+ * format for PCI
+ */
+#define TCE_VB  0
+#define TCE_PCI 1
+
+/* TCE page size is 4096 bytes (1 << 12) */
+
+#define TCE_SHIFT	12
+#define TCE_PAGE_SIZE	(1 << TCE_SHIFT)
+#define TCE_PAGE_FACTOR	(PAGE_SHIFT - TCE_SHIFT)
+
+
+/* tce_entry
+ * Used by pSeries (SMP) and iSeries/pSeries LPAR, but there it's
+ * abstracted so layout is irrelevant.
+ */
+union tce_entry {
+   	unsigned long te_word;
+	struct {
+		unsigned int  tb_cacheBits :6;	/* Cache hash bits - not used */
+		unsigned int  tb_rsvd      :6;
+		unsigned long tb_rpn       :40;	/* Real page number */
+		unsigned int  tb_valid     :1;	/* Tce is valid (vb only) */
+		unsigned int  tb_allio     :1;	/* Tce is valid for all lps (vb only) */
+		unsigned int  tb_lpindex   :8;	/* LpIndex for user of TCE (vb only) */
+		unsigned int  tb_pciwr     :1;	/* Write allowed (pci only) */
+		unsigned int  tb_rdwr      :1;	/* Read allowed  (pci), Write allowed (vb) */
+	} te_bits;
+#define te_cacheBits te_bits.tb_cacheBits
+#define te_rpn       te_bits.tb_rpn
+#define te_valid     te_bits.tb_valid
+#define te_allio     te_bits.tb_allio
+#define te_lpindex   te_bits.tb_lpindex
+#define te_pciwr     te_bits.tb_pciwr
+#define te_rdwr      te_bits.tb_rdwr
+};
+
+
+#endif
diff --git a/include/asm-ppc64/thread_info.h b/include/asm-ppc64/thread_info.h
index 0494df6..71244d8 100644
--- a/include/asm-ppc64/thread_info.h
+++ b/include/asm-ppc64/thread_info.h
@@ -54,9 +54,9 @@
 
 /* thread information allocation */
 
-#define THREAD_ORDER		2
-#define THREAD_SIZE		(PAGE_SIZE << THREAD_ORDER)
-#define THREAD_SHIFT		(PAGE_SHIFT + THREAD_ORDER)
+#define THREAD_SHIFT		14
+#define THREAD_ORDER		(THREAD_SHIFT - PAGE_SHIFT)
+#define THREAD_SIZE		(1 << THREAD_SHIFT)
 #ifdef CONFIG_DEBUG_STACK_USAGE
 #define alloc_thread_info(tsk)					\
 	({							\
diff --git a/include/asm-ppc64/tlbflush.h b/include/asm-ppc64/tlbflush.h
index 74271d7..626f505 100644
--- a/include/asm-ppc64/tlbflush.h
+++ b/include/asm-ppc64/tlbflush.h
@@ -20,10 +20,8 @@
 struct mm_struct;
 struct ppc64_tlb_batch {
 	unsigned long index;
-	unsigned long context;
 	struct mm_struct *mm;
 	pte_t pte[PPC64_TLB_BATCH_NR];
-	unsigned long addr[PPC64_TLB_BATCH_NR];
 	unsigned long vaddr[PPC64_TLB_BATCH_NR];
 	unsigned int large;
 };
@@ -48,8 +46,7 @@
 #define flush_tlb_kernel_range(start, end)	flush_tlb_pending()
 #define flush_tlb_pgtables(mm, start, end)	do { } while (0)
 
-extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
-			    int local);
-void flush_hash_range(unsigned long context, unsigned long number, int local);
+extern void flush_hash_page(unsigned long va, pte_t pte, int local);
+void flush_hash_range(unsigned long number, int local);
 
 #endif /* _PPC64_TLBFLUSH_H */
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 3ff7b92..51df337 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -117,14 +117,16 @@
 		/*
 		 * No locking required for CPU-local interrupts:
 		 */
-		desc->handler->ack(irq);
+		if (desc->handler->ack)
+			desc->handler->ack(irq);
 		action_ret = handle_IRQ_event(irq, regs, desc->action);
 		desc->handler->end(irq);
 		return 1;
 	}
 
 	spin_lock(&desc->lock);
-	desc->handler->ack(irq);
+	if (desc->handler->ack)
+		desc->handler->ack(irq);
 	/*
 	 * REPLAY is when Linux resends an IRQ that was dropped earlier
 	 * WAITING is used by probe to mark irqs that are being tested