Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
new file mode 100644
index 0000000..fb11896
--- /dev/null
+++ b/arch/sparc64/Kconfig
@@ -0,0 +1,630 @@
+# $Id: config.in,v 1.158 2002/01/24 22:14:44 davem Exp $
+# For a description of the syntax of this configuration file,
+# see the Configure script.
+#
+
+mainmenu "Linux/UltraSPARC Kernel Configuration"
+
+config 64BIT
+	def_bool y
+
+config MMU
+	bool
+	default y
+
+config TIME_INTERPOLATION
+	bool
+	default y
+
+choice
+	prompt "Kernel page size"
+	default SPARC64_PAGE_SIZE_8KB
+
+config SPARC64_PAGE_SIZE_8KB
+	bool "8KB"
+	help
+	  This lets you select the page size of the kernel.
+
+	  8KB and 64KB work quite well, since Sparc ELF sections
+	  provide for up to 64KB alignment.
+
+	  Therefore, 512KB and 4MB are for expert hackers only.
+
+	  If you don't know what to do, choose 8KB.
+
+config SPARC64_PAGE_SIZE_64KB
+	bool "64KB"
+
+config SPARC64_PAGE_SIZE_512KB
+	bool "512KB"
+
+config SPARC64_PAGE_SIZE_4MB
+	bool "4MB"
+
+endchoice
+
+source "init/Kconfig"
+
+config SYSVIPC_COMPAT
+	bool
+	depends on COMPAT && SYSVIPC
+	default y
+
+menu "General machine setup"
+
+config BBC_I2C
+	tristate "UltraSPARC-III bootbus i2c controller driver"
+	depends on PCI
+	help
+	  The BBC devices on the UltraSPARC III have two I2C controllers.  The
+	  first I2C controller connects mainly to configuration PROMs (NVRAM,
+	  CPU configuration, DIMM types, etc.).  The second I2C controller
+	  connects to environmental control devices such as fans and
+	  temperature sensors.  The second controller also connects to the
+	  smartcard reader, if present.  Say Y to enable support for these.
+
+config VT
+	bool "Virtual terminal" if EMBEDDED
+	select INPUT
+	default y
+	---help---
+	  If you say Y here, you will get support for terminal devices with
+	  display and keyboard devices. These are called "virtual" because you
+	  can run several virtual terminals (also called virtual consoles) on
+	  one physical terminal. This is rather useful, for example one
+	  virtual terminal can collect system messages and warnings, another
+	  one can be used for a text-mode user session, and a third could run
+	  an X session, all in parallel. Switching between virtual terminals
+	  is done with certain key combinations, usually Alt-<function key>.
+
+	  The setterm command ("man setterm") can be used to change the
+	  properties (such as colors or beeping) of a virtual terminal. The
+	  man page console_codes(4) ("man console_codes") contains the special
+	  character sequences that can be used to change those properties
+	  directly. The fonts used on virtual terminals can be changed with
+	  the setfont ("man setfont") command and the key bindings are defined
+	  with the loadkeys ("man loadkeys") command.
+
+	  You need at least one virtual terminal device in order to make use
+	  of your keyboard and monitor. Therefore, only people configuring an
+	  embedded system would want to say N here in order to save some
+	  memory; the only way to log into such a system is then via a serial
+	  or network connection.
+
+	  If unsure, say Y, or else you won't be able to do much with your new
+	  shiny Linux system :-)
+
+config VT_CONSOLE
+	bool "Support for console on virtual terminal" if EMBEDDED
+	depends on VT
+	default y
+	---help---
+	  The system console is the device which receives all kernel messages
+	  and warnings and which allows logins in single user mode. If you
+	  answer Y here, a virtual terminal (the device used to interact with
+	  a physical terminal) can be used as system console. This is the most
+	  common mode of operations, so you should say Y here unless you want
+	  the kernel messages be output only to a serial port (in which case
+	  you should say Y to "Console on serial port", below).
+
+	  If you do say Y here, by default the currently visible virtual
+	  terminal (/dev/tty0) will be used as system console. You can change
+	  that with a kernel command line option such as "console=tty3" which
+	  would use the third virtual terminal as system console. (Try "man
+	  bootparam" or see the documentation of your boot loader (lilo or
+	  loadlin) about how to pass options to the kernel at boot time.)
+
+	  If unsure, say Y.
+
+config HW_CONSOLE
+	bool
+	default y
+
+config SMP
+	bool "Symmetric multi-processing support"
+	---help---
+	  This enables support for systems with more than one CPU. If you have
+	  a system with only one CPU, say N. If you have a system with more than
+	  one CPU, say Y.
+
+	  If you say N here, the kernel will run on single and multiprocessor
+	  machines, but will use only one CPU of a multiprocessor machine. If
+	  you say Y here, the kernel will run on many, but not all,
+	  singleprocessor machines. On a singleprocessor machine, the kernel
+	  will run faster if you say N here.
+
+	  People using multiprocessor machines who say Y here should also say
+	  Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
+	  Management" code will be disabled if you say Y here.
+
+	  See also the <file:Documentation/smp.txt>,
+	  <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
+	  <http://www.tldp.org/docs.html#howto>.
+
+	  If you don't know what to do here, say N.
+
+config PREEMPT
+	bool "Preemptible Kernel"
+	help
+	  This option reduces the latency of the kernel when reacting to
+	  real-time or interactive events by allowing a low priority process to
+	  be preempted even if it is in kernel mode executing a system call.
+	  This allows applications to run more reliably even when the system is
+	  under load.
+
+	  Say Y here if you are building a kernel for a desktop, embedded
+	  or real-time system.  Say N if you are unsure.
+
+config NR_CPUS
+	int "Maximum number of CPUs (2-64)"
+	range 2 64
+	depends on SMP
+	default "32"
+
+source "drivers/cpufreq/Kconfig"
+
+config US3_FREQ
+	tristate "UltraSPARC-III CPU Frequency driver"
+	depends on CPU_FREQ
+	select CPU_FREQ_TABLE
+	help
+	  This adds the CPUFreq driver for UltraSPARC-III processors.
+
+	  For details, take a look at <file:Documentation/cpu-freq>.
+
+	  If in doubt, say N.
+
+config US2E_FREQ
+	tristate "UltraSPARC-IIe CPU Frequency driver"
+	depends on CPU_FREQ
+	select CPU_FREQ_TABLE
+	help
+	  This adds the CPUFreq driver for UltraSPARC-IIe processors.
+
+	  For details, take a look at <file:Documentation/cpu-freq>.
+
+	  If in doubt, say N.
+
+# Identify this as a Sparc64 build
+config SPARC64
+	bool
+	default y
+	help
+	  SPARC is a family of RISC microprocessors designed and marketed by
+	  Sun Microsystems, incorporated.  This port covers the newer 64-bit
+	  UltraSPARC.  The UltraLinux project maintains both the SPARC32 and
+	  SPARC64 ports; its web page is available at
+	  <http://www.ultralinux.org/>.
+
+# Global things across all Sun machines.
+config RWSEM_GENERIC_SPINLOCK
+	bool
+
+config RWSEM_XCHGADD_ALGORITHM
+	bool
+	default y
+
+config GENERIC_CALIBRATE_DELAY
+	bool
+	default y
+
+choice
+	prompt "SPARC64 Huge TLB Page Size"
+	depends on HUGETLB_PAGE
+	default HUGETLB_PAGE_SIZE_4MB
+
+config HUGETLB_PAGE_SIZE_4MB
+	bool "4MB"
+
+config HUGETLB_PAGE_SIZE_512K
+	depends on !SPARC64_PAGE_SIZE_4MB
+	bool "512K"
+
+config HUGETLB_PAGE_SIZE_64K
+	depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512K
+	bool "64K"
+
+endchoice
+
+config GENERIC_ISA_DMA
+	bool
+	default y
+
+config ISA
+	bool
+	help
+	  Find out whether you have ISA slots on your motherboard.  ISA is the
+	  name of a bus system, i.e. the way the CPU talks to the other stuff
+	  inside your box.  Other bus systems are PCI, EISA, MicroChannel
+	  (MCA) or VESA.  ISA is an older system, now being displaced by PCI;
+	  newer boards don't support it.  If you have ISA, say Y, otherwise N.
+
+config ISAPNP
+	bool
+	help
+	  Say Y here if you would like support for ISA Plug and Play devices.
+	  Some information is in <file:Documentation/isapnp.txt>.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called isapnp.
+
+	  If unsure, say Y.
+
+config EISA
+	bool
+	---help---
+	  The Extended Industry Standard Architecture (EISA) bus was
+	  developed as an open alternative to the IBM MicroChannel bus.
+
+	  The EISA bus provided some of the features of the IBM MicroChannel
+	  bus while maintaining backward compatibility with cards made for
+	  the older ISA bus.  The EISA bus saw limited use between 1988 and
+	  1995 when it was made obsolete by the PCI bus.
+
+	  Say Y here if you are building a kernel for an EISA-based machine.
+
+	  Otherwise, say N.
+
+config MCA
+	bool
+	help
+	  MicroChannel Architecture is found in some IBM PS/2 machines and
+	  laptops.  It is a bus system similar to PCI or ISA. See
+	  <file:Documentation/mca.txt> (and especially the web page given
+	  there) before attempting to build an MCA bus kernel.
+
+config PCMCIA
+	tristate
+	---help---
+	  Say Y here if you want to attach PCMCIA- or PC-cards to your Linux
+	  computer.  These are credit-card size devices such as network cards,
+	  modems or hard drives often used with laptops computers.  There are
+	  actually two varieties of these cards: the older 16 bit PCMCIA cards
+	  and the newer 32 bit CardBus cards.  If you want to use CardBus
+	  cards, you need to say Y here and also to "CardBus support" below.
+
+	  To use your PC-cards, you will need supporting software from David
+	  Hinds' pcmcia-cs package (see the file <file:Documentation/Changes>
+	  for location).  Please also read the PCMCIA-HOWTO, available from
+	  <http://www.tldp.org/docs.html#howto>.
+
+	  To compile this driver as modules, choose M here: the
+	  modules will be called pcmcia_core and ds.
+
+config SBUS
+	bool
+	default y
+
+config SBUSCHAR
+	bool
+	default y
+
+config SUN_AUXIO
+	bool
+	default y
+
+config SUN_IO
+	bool
+	default y
+
+config PCI
+	bool "PCI support"
+	help
+	  Find out whether you have a PCI motherboard. PCI is the name of a
+	  bus system, i.e. the way the CPU talks to the other stuff inside
+	  your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
+	  VESA. If you have PCI, say Y, otherwise N.
+
+	  The PCI-HOWTO, available from
+	  <http://www.tldp.org/docs.html#howto>, contains valuable
+	  information about which PCI hardware does work under Linux and which
+	  doesn't.
+
+config PCI_DOMAINS
+	bool
+	default PCI
+
+config RTC
+	tristate
+	depends on PCI
+	default y
+	---help---
+	  If you say Y here and create a character special file /dev/rtc with
+	  major number 10 and minor number 135 using mknod ("man mknod"), you
+	  will get access to the real time clock (or hardware clock) built
+	  into your computer.
+
+	  Every PC has such a clock built in. It can be used to generate
+	  signals from as low as 1Hz up to 8192Hz, and can also be used
+	  as a 24 hour alarm. It reports status information via the file
+	  /proc/driver/rtc and its behaviour is set by various ioctls on
+	  /dev/rtc.
+
+	  If you run Linux on a multiprocessor machine and said Y to
+	  "Symmetric Multi Processing" above, you should say Y here to read
+	  and set the RTC in an SMP compatible fashion.
+
+	  If you think you have a use for such a device (such as periodic data
+	  sampling), then say Y here, and read <file:Documentation/rtc.txt>
+	  for details.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called rtc.
+
+source "drivers/pci/Kconfig"
+
+config SUN_OPENPROMFS
+	tristate "Openprom tree appears in /proc/openprom"
+	help
+	  If you say Y, the OpenPROM device tree will be available as a
+	  virtual file system, which you can mount to /proc/openprom by "mount
+	  -t openpromfs none /proc/openprom".
+
+	  To compile the /proc/openprom support as a module, choose M here: the
+	  module will be called openpromfs.  If unsure, choose M.
+
+config SPARC32_COMPAT
+	bool "Kernel support for Linux/Sparc 32bit binary compatibility"
+	help
+	  This allows you to run 32-bit binaries on your Ultra.
+	  Everybody wants this; say Y.
+
+config COMPAT
+	bool
+	depends on SPARC32_COMPAT
+	default y
+
+config UID16
+	bool
+	depends on SPARC32_COMPAT
+	default y
+
+config BINFMT_ELF32
+	tristate "Kernel support for 32-bit ELF binaries"
+	depends on SPARC32_COMPAT
+	help
+	  This allows you to run 32-bit Linux/ELF binaries on your Ultra.
+	  Everybody wants this; say Y.
+
+config BINFMT_AOUT32
+	bool "Kernel support for 32-bit (ie. SunOS) a.out binaries"
+	depends on SPARC32_COMPAT
+	help
+	  This allows you to run 32-bit a.out format binaries on your Ultra.
+	  If you want to run SunOS binaries (see SunOS binary emulation below)
+	  or other a.out binaries, say Y. If unsure, say N.
+
+source "fs/Kconfig.binfmt"
+
+config SUNOS_EMUL
+	bool "SunOS binary emulation"
+	depends on BINFMT_AOUT32
+	help
+	  This allows you to run most SunOS binaries.  If you want to do this,
+	  say Y here and place appropriate files in /usr/gnemul/sunos. See
+	  <http://www.ultralinux.org/faq.html> for more information.  If you
+	  want to run SunOS binaries on an Ultra you must also say Y to
+	  "Kernel support for 32-bit a.out binaries" above.
+
+config SOLARIS_EMUL
+	tristate "Solaris binary emulation (EXPERIMENTAL)"
+	depends on SPARC32_COMPAT && EXPERIMENTAL
+	help
+	  This is experimental code which will enable you to run (many)
+	  Solaris binaries on your SPARC Linux machine.
+
+	  To compile this code as a module, choose M here: the
+	  module will be called solaris.
+
+source "drivers/parport/Kconfig"
+
+config PRINTER
+	tristate "Parallel printer support"
+	depends on PARPORT
+	---help---
+	  If you intend to attach a printer to the parallel port of your Linux
+	  box (as opposed to using a serial printer; if the connector at the
+	  printer has 9 or 25 holes ["female"], then it's serial), say Y.
+	  Also read the Printing-HOWTO, available from
+	  <http://www.tldp.org/docs.html#howto>.
+
+	  It is possible to share one parallel port among several devices
+	  (e.g. printer and ZIP drive) and it is safe to compile the
+	  corresponding drivers into the kernel.
+	  To compile this driver as a module, choose M here and read
+	  <file:Documentation/parport.txt>.  The module will be called lp.
+
+	  If you have several parallel ports, you can specify which ports to
+	  use with the "lp" kernel command line option.  (Try "man bootparam"
+	  or see the documentation of your boot loader (lilo or loadlin) about
+	  how to pass options to the kernel at boot time.)  The syntax of the
+	  "lp" command line option can be found in <file:drivers/char/lp.c>.
+
+	  If you have more than 8 printers, you need to increase the LP_NO
+	  macro in lp.c and the PARPORT_MAX macro in parport.h.
+
+config ENVCTRL
+	tristate "SUNW, envctrl support"
+	depends on PCI
+	help
+	  Kernel support for temperature and fan monitoring on Sun SME
+	  machines.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called envctrl.
+
+config DISPLAY7SEG
+	tristate "7-Segment Display support"
+	depends on PCI
+	---help---
+	  This is the driver for the 7-segment display and LED present on
+	  Sun Microsystems CompactPCI models CP1400 and CP1500.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called display7seg.
+
+	  If you do not have a CompactPCI model CP1400 or CP1500, or
+	  another UltraSPARC-IIi-cEngine boardset with a 7-segment display,
+	  you should say N to this option.
+
+config CMDLINE_BOOL
+	bool "Default bootloader kernel arguments"
+
+config CMDLINE
+	string "Initial kernel command string"
+	depends on CMDLINE_BOOL
+	default "console=ttyS0,9600 root=/dev/sda1"
+	help
+	  Say Y here if you want to be able to pass default arguments to
+	  the kernel. This will be overridden by the bootloader, if you
+	  use one (such as SILO). This is most useful if you want to boot
+	  a kernel from TFTP, and want default options to be available
+	  with having them passed on the command line.
+
+	  NOTE: This option WILL override the PROM bootargs setting!
+
+endmenu
+
+source "drivers/base/Kconfig"
+
+source "drivers/video/Kconfig"
+
+source "drivers/serial/Kconfig"
+
+source "drivers/sbus/char/Kconfig"
+
+source "drivers/mtd/Kconfig"
+
+source "drivers/block/Kconfig"
+
+source "drivers/ide/Kconfig"
+
+source "drivers/scsi/Kconfig"
+
+source "drivers/fc4/Kconfig"
+
+source "drivers/md/Kconfig"
+
+if PCI
+source "drivers/message/fusion/Kconfig"
+endif
+
+source "drivers/ieee1394/Kconfig"
+
+source "net/Kconfig"
+
+source "drivers/isdn/Kconfig"
+
+source "drivers/telephony/Kconfig"
+
+# This one must be before the filesystem configs. -DaveM
+
+menu "Unix98 PTY support"
+
+config UNIX98_PTYS
+	bool "Unix98 PTY support"
+	---help---
+	  A pseudo terminal (PTY) is a software device consisting of two
+	  halves: a master and a slave. The slave device behaves identical to
+	  a physical terminal; the master device is used by a process to
+	  read data from and write data to the slave, thereby emulating a
+	  terminal. Typical programs for the master side are telnet servers
+	  and xterms.
+
+	  Linux has traditionally used the BSD-like names /dev/ptyxx for
+	  masters and /dev/ttyxx for slaves of pseudo terminals. This scheme
+	  has a number of problems. The GNU C library glibc 2.1 and later,
+	  however, supports the Unix98 naming standard: in order to acquire a
+	  pseudo terminal, a process opens /dev/ptmx; the number of the pseudo
+	  terminal is then made available to the process and the pseudo
+	  terminal slave can be accessed as /dev/pts/<number>. What was
+	  traditionally /dev/ttyp2 will then be /dev/pts/2, for example.
+
+	  The entries in /dev/pts/ are created on the fly by a virtual
+	  file system; therefore, if you say Y here you should say Y to
+	  "/dev/pts file system for Unix98 PTYs" as well.
+
+	  If you want to say Y here, you need to have the C library glibc 2.1
+	  or later (equal to libc-6.1, check with "ls -l /lib/libc.so.*").
+	  Read the instructions in <file:Documentation/Changes> pertaining to
+	  pseudo terminals. It's safe to say N.
+
+config UNIX98_PTY_COUNT
+	int "Maximum number of Unix98 PTYs in use (0-2048)"
+	depends on UNIX98_PTYS
+	default "256"
+	help
+	  The maximum number of Unix98 PTYs that can be used at any one time.
+	  The default is 256, and should be enough for desktop systems. Server
+	  machines which support incoming telnet/rlogin/ssh connections and/or
+	  serve several X terminals may want to increase this: every incoming
+	  connection and every xterm uses up one PTY.
+
+	  When not in use, each additional set of 256 PTYs occupy
+	  approximately 8 KB of kernel memory on 32-bit architectures.
+
+endmenu
+
+menu "XFree86 DRI support"
+
+config DRM
+	bool "Direct Rendering Manager (XFree86 DRI support)"
+	help
+	  Kernel-level support for the Direct Rendering Infrastructure (DRI)
+	  introduced in XFree86 4.0. If you say Y here, you need to select
+	  the module that's right for your graphics card from the list below.
+	  These modules provide support for synchronization, security, and
+	  DMA transfers. Please see <http://dri.sourceforge.net/> for more
+	  details.  You should also select and configure AGP
+	  (/dev/agpgart) support.
+
+config DRM_FFB
+	tristate "Creator/Creator3D"
+	depends on DRM && BROKEN
+	help
+	  Choose this option if you have one of Sun's Creator3D-based graphics
+	  and frame buffer cards.  Product page at
+	  <http://www.sun.com/desktop/products/Graphics/creator3d.html>.
+
+config DRM_TDFX
+	tristate "3dfx Banshee/Voodoo3+"
+	depends on DRM
+	help
+	  Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
+	  graphics card.  If M is selected, the module will be called tdfx.
+
+config DRM_R128
+	tristate "ATI Rage 128"
+	depends on DRM
+	help
+	  Choose this option if you have an ATI Rage 128 graphics card.  If M
+	  is selected, the module will be called r128.  AGP support for
+	  this card is strongly suggested (unless you have a PCI version).
+
+endmenu
+
+source "drivers/input/Kconfig"
+
+source "drivers/i2c/Kconfig"
+
+source "fs/Kconfig"
+
+source "drivers/media/Kconfig"
+
+source "sound/Kconfig"
+
+source "drivers/usb/Kconfig"
+
+source "drivers/infiniband/Kconfig"
+
+source "drivers/char/watchdog/Kconfig"
+
+source "arch/sparc64/oprofile/Kconfig"
+
+source "arch/sparc64/Kconfig.debug"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug
new file mode 100644
index 0000000..cd8d39fb
--- /dev/null
+++ b/arch/sparc64/Kconfig.debug
@@ -0,0 +1,54 @@
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config DEBUG_STACK_USAGE
+	bool "Enable stack utilization instrumentation"
+	depends on DEBUG_KERNEL
+	help
+	  Enables the display of the minimum amount of free stack which each
+	  task has ever had available in the sysrq-T and sysrq-P debug output.
+
+	  This option will slow down process creation somewhat.
+
+config KPROBES
+	bool "Kprobes"
+	depends on DEBUG_KERNEL
+	help
+	  Kprobes allows you to trap at almost any kernel address and
+	  execute a callback function.  register_kprobe() establishes
+	  a probepoint and specifies the callback.  Kprobes is useful
+	  for kernel debugging, non-intrusive instrumentation and testing.
+	  If in doubt, say "N".
+
+config DEBUG_DCFLUSH
+	bool "D-cache flush debugging"
+	depends on DEBUG_KERNEL
+
+config STACK_DEBUG
+	depends on DEBUG_KERNEL
+	bool "Stack Overflow Detection Support"
+
+config DEBUG_BOOTMEM
+	depends on DEBUG_KERNEL
+	bool "Debug BOOTMEM initialization"
+
+# We have a custom atomic_dec_and_lock() implementation but it's not
+# compatible with spinlock debugging so we need to fall back on
+# the generic version in that case.
+config HAVE_DEC_LOCK
+	bool
+	depends on SMP && !DEBUG_SPINLOCK
+	default y
+
+config MCOUNT
+	bool
+	depends on STACK_DEBUG
+	default y
+
+config FRAME_POINTER
+	bool
+	depends on MCOUNT
+	default y
+
+endmenu
diff --git a/arch/sparc64/Makefile b/arch/sparc64/Makefile
new file mode 100644
index 0000000..43fe382
--- /dev/null
+++ b/arch/sparc64/Makefile
@@ -0,0 +1,83 @@
+# $Id: Makefile,v 1.52 2002/02/09 19:49:31 davem Exp $
+# sparc64/Makefile
+#
+# Makefile for the architecture dependent flags and dependencies on the
+# 64-bit Sparc.
+#
+# Copyright (C) 1996,1998 David S. Miller (davem@caip.rutgers.edu)
+# Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
+#
+
+CHECKFLAGS	+= -D__sparc__ -D__sparc_v9__ -m64
+
+CPPFLAGS_vmlinux.lds += -Usparc
+
+CC		:= $(shell if $(CC) -m64 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo $(CC); else echo sparc64-linux-gcc; fi )
+
+NEW_GCC := $(call cc-option-yn, -m64 -mcmodel=medlow)
+NEW_GAS := $(shell if $(LD) -V 2>&1 | grep 'elf64_sparc' > /dev/null; then echo y; else echo n; fi)
+UNDECLARED_REGS := $(shell if $(CC) -c -x assembler /dev/null -Wa,--help | grep undeclared-regs > /dev/null; then echo y; else echo n; fi; )
+INLINE_LIMIT := $(call cc-option-yn, -m64 -finline-limit=100000)
+
+export NEW_GCC
+
+ifneq ($(NEW_GAS),y)
+AS		= sparc64-linux-as
+LD		= sparc64-linux-ld
+NM		= sparc64-linux-nm
+AR		= sparc64-linux-ar
+RANLIB		= sparc64-linux-ranlib
+else
+AS		:= $(AS) -64
+LDFLAGS		:= -m elf64_sparc
+endif
+
+ifneq ($(UNDECLARED_REGS),y)
+CC_UNDECL	=
+else
+CC_UNDECL	= -Wa,--undeclared-regs
+AS		:= $(AS) --undeclared-regs
+endif
+
+ifneq ($(NEW_GCC),y)
+  CFLAGS := $(CFLAGS) -pipe -mno-fpu -mtune=ultrasparc -mmedlow \
+	    -ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare
+else
+  CFLAGS := $(CFLAGS) -m64 -pipe -mno-fpu -mcpu=ultrasparc -mcmodel=medlow \
+	    -ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare \
+	    $(CC_UNDECL)
+  AFLAGS += -m64 -mcpu=ultrasparc $(CC_UNDECL)
+endif
+
+ifeq ($(INLINE_LIMIT),y)
+  CFLAGS := $(CFLAGS) -finline-limit=100000
+endif
+
+ifeq ($(CONFIG_MCOUNT),y)
+  CFLAGS := $(CFLAGS) -pg
+endif
+
+head-y := arch/sparc64/kernel/head.o arch/sparc64/kernel/init_task.o
+
+core-y				+= arch/sparc64/kernel/ arch/sparc64/mm/
+core-$(CONFIG_SOLARIS_EMUL)	+= arch/sparc64/solaris/
+core-y				+= arch/sparc64/math-emu/
+libs-y				+= arch/sparc64/prom/ arch/sparc64/lib/
+
+# FIXME: is drivers- right?
+drivers-$(CONFIG_OPROFILE)	+= arch/sparc64/oprofile/
+
+boot := arch/sparc64/boot
+
+image tftpboot.img vmlinux.aout: vmlinux
+	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+archclean:
+	$(Q)$(MAKE) $(clean)=$(boot)
+
+define archhelp
+  echo  '* vmlinux       - Standard sparc64 kernel'
+  echo  '  vmlinux.aout  - a.out kernel for sparc64'
+  echo  '  tftpboot.img  - Image prepared for tftp'
+endef
+
diff --git a/arch/sparc64/boot/Makefile b/arch/sparc64/boot/Makefile
new file mode 100644
index 0000000..6968a6d
--- /dev/null
+++ b/arch/sparc64/boot/Makefile
@@ -0,0 +1,34 @@
+# $Id: Makefile,v 1.4 1997/12/15 20:08:56 ecd Exp $
+# Makefile for the Sparc64 boot stuff.
+#
+# Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+# Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+
+ROOT_IMG	:= /usr/src/root.img
+ELFTOAOUT	:= elftoaout
+
+hostprogs-y	:= piggyback
+targets		:= image tftpboot.img vmlinux.aout
+
+quiet_cmd_elftoaout = ELF2AOUT $@
+      cmd_elftoaout = $(ELFTOAOUT) vmlinux -o $@
+quiet_cmd_piggy     = PIGGY   $@
+      cmd_piggy     = $(obj)/piggyback $@ System.map $(ROOT_IMG)
+quiet_cmd_strip     = STRIP   $@
+      cmd_strip     = $(STRIP) -R .comment -R .note -K sun4u_init -K _end -K _start vmlinux -o $@
+
+
+# Actual linking
+$(obj)/image: vmlinux FORCE
+	$(call if_changed,strip)
+	@echo '  kernel: $@ is ready'
+
+$(obj)/tftpboot.img: vmlinux $(obj)/piggyback System.map $(ROOT_IMG) FORCE
+	$(call if_changed,elftoaout)
+	$(call if_changed,piggy)
+	@echo '  kernel: $@ is ready'
+
+$(obj)/vmlinux.aout: vmlinux FORCE
+	$(call if_changed,elftoaout)
+	@echo '  kernel: $@ is ready'
+
diff --git a/arch/sparc64/boot/piggyback.c b/arch/sparc64/boot/piggyback.c
new file mode 100644
index 0000000..36f9074
--- /dev/null
+++ b/arch/sparc64/boot/piggyback.c
@@ -0,0 +1,109 @@
+/* $Id: piggyback.c,v 1.2 2000/09/19 14:34:39 anton Exp $
+   Simple utility to make a single-image install kernel with initial ramdisk
+   for Sparc64 tftpbooting without need to set up nfs.
+   
+   Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+   
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+   
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  */
+   
+#include <stdio.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <dirent.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+/* Note: run this on an a.out kernel (use elftoaout for it), as PROM looks for a.out image onlly
+   usage: piggyback vmlinux System.map tail, where tail is gzipped fs of the initial ramdisk */
+
+void die(char *str)
+{
+	perror (str);
+	exit(1);
+}
+
+int main(int argc,char **argv)
+{
+	char buffer [1024], *q, *r;
+	unsigned int i, j, k, start, end, offset;
+	FILE *map;
+	struct stat s;
+	int image, tail;
+	
+	if (stat (argv[3], &s) < 0) die (argv[3]);
+	map = fopen (argv[2], "r");
+	if (!map) die(argv[2]);
+	while (fgets (buffer, 1024, map)) {
+		if (!strcmp (buffer + 19, "_start\n"))
+		start = strtoul (buffer + 8, NULL, 16);
+		else if (!strcmp (buffer + 19, "_end\n"))
+		end = strtoul (buffer + 8, NULL, 16);
+	}
+	fclose (map);
+	if ((image = open(argv[1],O_RDWR)) < 0) die(argv[1]);
+	if (read(image,buffer,512) != 512) die(argv[1]);
+	if (!memcmp (buffer, "\177ELF", 4)) {
+		unsigned int *p = (unsigned int *)(buffer + *(unsigned int *)(buffer + 28));
+		
+		i = p[1] + *(unsigned int *)(buffer + 24) - p[2];
+		if (lseek(image,i,0) < 0) die("lseek");
+		if (read(image,buffer,512) != 512) die(argv[1]);
+		j = 0;
+	} else if (*(unsigned int *)buffer == 0x01030107) {
+		i = j = 32;
+	} else {
+		fprintf (stderr, "Not ELF nor a.out. Don't blame me.\n");
+		exit(1);
+	}
+	k = i;
+	if (j == 32 && buffer[40] == 'H' && buffer[41] == 'd' && buffer[42] == 'r' && buffer[43] == 'S') {
+		offset = 40 + 10;
+	} else {
+		i += ((*(unsigned short *)(buffer + j + 2))<<2) - 512;
+		if (lseek(image,i,0) < 0) die("lseek");
+		if (read(image,buffer,1024) != 1024) die(argv[1]);
+		for (q = buffer, r = q + 512; q < r; q += 4) {
+			if (*q == 'H' && q[1] == 'd' && q[2] == 'r' && q[3] == 'S')
+				break;
+		}
+		if (q == r) {
+			fprintf (stderr, "Couldn't find headers signature in the kernel.\n");
+			exit(1);
+		}
+		offset = i + (q - buffer) + 10;
+	}
+	if (lseek(image, offset, 0) < 0) die ("lseek");
+	*(unsigned *)buffer = 0;
+	*(unsigned *)(buffer + 4) = 0x01000000;
+	*(unsigned *)(buffer + 8) = ((end + 32 + 8191) & ~8191);
+	*(unsigned *)(buffer + 12) = s.st_size;
+	if (write(image,buffer+2,14) != 14) die (argv[1]);
+	if (lseek(image, 4, 0) < 0) die ("lseek");
+	*(unsigned *)buffer = ((end + 32 + 8191) & ~8191) - (start & ~0x3fffffUL) + s.st_size;
+	*(unsigned *)(buffer + 4) = 0;
+	*(unsigned *)(buffer + 8) = 0;
+	if (write(image,buffer,12) != 12) die (argv[1]);
+	if (lseek(image, k - start + ((end + 32 + 8191) & ~8191), 0) < 0) die ("lseek");
+	if ((tail = open(argv[3],O_RDONLY)) < 0) die(argv[3]);
+	while ((i = read (tail,buffer,1024)) > 0)
+		if (write(image,buffer,i) != i) die (argv[1]);
+	if (close(image) < 0) die("close");
+	if (close(tail) < 0) die("close");
+    	return 0;
+}
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
new file mode 100644
index 0000000..46a6ad6
--- /dev/null
+++ b/arch/sparc64/defconfig
@@ -0,0 +1,1951 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.11
+# Sun Mar  6 20:47:29 2005
+#
+CONFIG_64BIT=y
+CONFIG_MMU=y
+CONFIG_TIME_INTERPOLATION=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_LOCK_KERNEL=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_HOTPLUG=y
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_OBSOLETE_MODPARM=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_KMOD=y
+CONFIG_STOP_MACHINE=y
+CONFIG_SYSVIPC_COMPAT=y
+
+#
+# General machine setup
+#
+CONFIG_BBC_I2C=m
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_SMP=y
+# CONFIG_PREEMPT is not set
+CONFIG_NR_CPUS=4
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_DEBUG is not set
+CONFIG_CPU_FREQ_STAT=m
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_ONDEMAND=m
+CONFIG_CPU_FREQ_TABLE=y
+CONFIG_US3_FREQ=m
+CONFIG_US2E_FREQ=m
+CONFIG_SPARC64=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_HUGETLB_PAGE_SIZE_4MB=y
+# CONFIG_HUGETLB_PAGE_SIZE_512K is not set
+# CONFIG_HUGETLB_PAGE_SIZE_64K is not set
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_SBUS=y
+CONFIG_SBUSCHAR=y
+CONFIG_SUN_AUXIO=y
+CONFIG_SUN_IO=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_RTC=y
+# CONFIG_PCI_LEGACY_PROC is not set
+# CONFIG_PCI_NAMES is not set
+CONFIG_SUN_OPENPROMFS=m
+CONFIG_SPARC32_COMPAT=y
+CONFIG_COMPAT=y
+CONFIG_UID16=y
+CONFIG_BINFMT_ELF32=y
+# CONFIG_BINFMT_AOUT32 is not set
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_MISC=m
+CONFIG_SOLARIS_EMUL=m
+
+#
+# Parallel port support
+#
+CONFIG_PARPORT=m
+CONFIG_PARPORT_PC=m
+CONFIG_PARPORT_PC_FIFO=y
+# CONFIG_PARPORT_PC_SUPERIO is not set
+# CONFIG_PARPORT_SUNBPP is not set
+# CONFIG_PARPORT_OTHER is not set
+CONFIG_PARPORT_1284=y
+CONFIG_PRINTER=m
+CONFIG_ENVCTRL=m
+CONFIG_DISPLAY7SEG=m
+# CONFIG_CMDLINE_BOOL is not set
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_FW_LOADER=m
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Graphics support
+#
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+# CONFIG_FB_CIRRUS is not set
+CONFIG_FB_PM2=y
+# CONFIG_FB_PM2_FIFO_DISCONNECT is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_BW2 is not set
+# CONFIG_FB_CG3 is not set
+CONFIG_FB_CG6=y
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON_OLD is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+CONFIG_FB_ATY=y
+CONFIG_FB_ATY_CT=y
+# CONFIG_FB_ATY_GENERIC_LCD is not set
+# CONFIG_FB_ATY_XL_INIT is not set
+CONFIG_FB_ATY_GX=y
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_TRIDENT is not set
+CONFIG_FB_SBUS=y
+CONFIG_FB_FFB=y
+# CONFIG_FB_TCX is not set
+# CONFIG_FB_CG14 is not set
+# CONFIG_FB_P9100 is not set
+# CONFIG_FB_LEO is not set
+# CONFIG_FB_PCI is not set
+# CONFIG_FB_VIRTUAL is not set
+
+#
+# Console display driver support
+#
+# CONFIG_PROM_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FONTS=y
+# CONFIG_FONT_8x8 is not set
+# CONFIG_FONT_8x16 is not set
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+CONFIG_FONT_SUN8x16=y
+# CONFIG_FONT_SUN12x22 is not set
+
+#
+# Logo configuration
+#
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_LOGO_SUN_CLUT224=y
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Serial drivers
+#
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_SUNCORE=y
+CONFIG_SERIAL_SUNZILOG=y
+CONFIG_SERIAL_SUNZILOG_CONSOLE=y
+CONFIG_SERIAL_SUNSU=y
+CONFIG_SERIAL_SUNSU_CONSOLE=y
+CONFIG_SERIAL_SUNSAB=m
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+
+#
+# Misc Linux/SPARC drivers
+#
+CONFIG_SUN_OPENPROMIO=m
+CONFIG_SUN_MOSTEK_RTC=y
+CONFIG_OBP_FLASH=m
+# CONFIG_SUN_BPP is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_PARIDE is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_SX8=m
+CONFIG_BLK_DEV_UB=m
+# CONFIG_BLK_DEV_RAM is not set
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+CONFIG_CDROM_PKTCDVD_WCACHE=y
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_ATA_OVER_ETH=m
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDETAPE=m
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
+# CONFIG_BLK_DEV_IDESCSI is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_IDEPCI=y
+# CONFIG_IDEPCI_SHARE_IRQ is not set
+# CONFIG_BLK_DEV_OFFBOARD is not set
+# CONFIG_BLK_DEV_GENERIC is not set
+CONFIG_BLK_DEV_OPTI621=m
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+CONFIG_IDEDMA_PCI_AUTO=y
+CONFIG_IDEDMA_ONLYDISK=y
+# CONFIG_BLK_DEV_AEC62XX is not set
+CONFIG_BLK_DEV_ALI15X3=y
+# CONFIG_WDC_ALI15X3 is not set
+CONFIG_BLK_DEV_AMD74XX=m
+CONFIG_BLK_DEV_CMD64X=m
+CONFIG_BLK_DEV_TRIFLEX=m
+CONFIG_BLK_DEV_CY82C693=m
+CONFIG_BLK_DEV_CS5520=m
+CONFIG_BLK_DEV_CS5530=m
+CONFIG_BLK_DEV_HPT34X=m
+# CONFIG_HPT34X_AUTODMA is not set
+CONFIG_BLK_DEV_HPT366=m
+CONFIG_BLK_DEV_SC1200=m
+CONFIG_BLK_DEV_PIIX=m
+CONFIG_BLK_DEV_NS87415=m
+CONFIG_BLK_DEV_PDC202XX_OLD=m
+# CONFIG_PDC202XX_BURST is not set
+CONFIG_BLK_DEV_PDC202XX_NEW=m
+# CONFIG_PDC202XX_FORCE is not set
+CONFIG_BLK_DEV_SVWKS=m
+CONFIG_BLK_DEV_SIIMAGE=m
+CONFIG_BLK_DEV_SLC90E66=m
+CONFIG_BLK_DEV_TRM290=m
+CONFIG_BLK_DEV_VIA82CXXX=m
+# CONFIG_IDE_ARM is not set
+CONFIG_BLK_DEV_IDEDMA=y
+# CONFIG_IDEDMA_IVB is not set
+CONFIG_IDEDMA_AUTO=y
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_SCSI_ISCSI_ATTRS=m
+
+#
+# SCSI low-level drivers
+#
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+CONFIG_SCSI_AIC79XX=m
+CONFIG_AIC79XX_CMDS_PER_DEVICE=32
+CONFIG_AIC79XX_RESET_DELAY_MS=15000
+# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
+# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
+# CONFIG_AIC79XX_DEBUG_ENABLE is not set
+CONFIG_AIC79XX_DEBUG_MASK=0
+# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_LEGACY is not set
+CONFIG_SCSI_SATA=y
+CONFIG_SCSI_SATA_AHCI=m
+CONFIG_SCSI_SATA_SVW=m
+CONFIG_SCSI_ATA_PIIX=m
+CONFIG_SCSI_SATA_NV=m
+CONFIG_SCSI_SATA_PROMISE=m
+CONFIG_SCSI_SATA_QSTOR=m
+CONFIG_SCSI_SATA_SX4=m
+CONFIG_SCSI_SATA_SIL=m
+CONFIG_SCSI_SATA_SIS=m
+CONFIG_SCSI_SATA_ULI=m
+CONFIG_SCSI_SATA_VIA=m
+CONFIG_SCSI_SATA_VITESSE=m
+CONFIG_SCSI_DMX3191D=m
+CONFIG_SCSI_EATA_PIO=m
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+CONFIG_SCSI_IPS=m
+CONFIG_SCSI_INITIO=m
+CONFIG_SCSI_INIA100=m
+CONFIG_SCSI_PPA=m
+CONFIG_SCSI_IMM=m
+# CONFIG_SCSI_IZIP_EPP16 is not set
+# CONFIG_SCSI_IZIP_SLOW_CTR is not set
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
+CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
+CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
+# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
+# CONFIG_SCSI_IPR is not set
+CONFIG_SCSI_QLOGIC_ISP=m
+CONFIG_SCSI_QLOGIC_FC=y
+CONFIG_SCSI_QLOGIC_FC_FIRMWARE=y
+# CONFIG_SCSI_QLOGIC_1280 is not set
+CONFIG_SCSI_QLOGICPTI=m
+CONFIG_SCSI_QLA2XXX=y
+# CONFIG_SCSI_QLA21XX is not set
+# CONFIG_SCSI_QLA22XX is not set
+# CONFIG_SCSI_QLA2300 is not set
+# CONFIG_SCSI_QLA2322 is not set
+# CONFIG_SCSI_QLA6312 is not set
+CONFIG_SCSI_DC395x=m
+# CONFIG_SCSI_DC390T is not set
+CONFIG_SCSI_DEBUG=m
+CONFIG_SCSI_SUNESP=y
+
+#
+# Fibre Channel support
+#
+CONFIG_FC4=m
+
+#
+# FC4 drivers
+#
+CONFIG_FC4_SOC=m
+CONFIG_FC4_SOCAL=m
+
+#
+# FC4 targets
+#
+CONFIG_SCSI_PLUTO=m
+CONFIG_SCSI_FCAL=m
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID5=m
+CONFIG_MD_RAID6=m
+CONFIG_MD_MULTIPATH=m
+# CONFIG_MD_FAULTY is not set
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+
+#
+# Fusion MPT device support
+#
+CONFIG_FUSION=m
+CONFIG_FUSION_MAX_SGE=40
+CONFIG_FUSION_CTL=m
+CONFIG_FUSION_LAN=m
+
+#
+# IEEE 1394 (FireWire) support
+#
+CONFIG_IEEE1394=m
+
+#
+# Subsystem Options
+#
+# CONFIG_IEEE1394_VERBOSEDEBUG is not set
+CONFIG_IEEE1394_OUI_DB=y
+CONFIG_IEEE1394_EXTRA_CONFIG_ROMS=y
+CONFIG_IEEE1394_CONFIG_ROM_IP1394=y
+
+#
+# Device Drivers
+#
+CONFIG_IEEE1394_PCILYNX=m
+CONFIG_IEEE1394_OHCI1394=m
+
+#
+# Protocol Drivers
+#
+CONFIG_IEEE1394_VIDEO1394=m
+CONFIG_IEEE1394_SBP2=m
+# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
+CONFIG_IEEE1394_ETH1394=m
+CONFIG_IEEE1394_DV1394=m
+CONFIG_IEEE1394_RAWIO=m
+CONFIG_IEEE1394_CMP=m
+CONFIG_IEEE1394_AMDTP=m
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_NETLINK_DEV=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+# CONFIG_IP_PNP is not set
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_ARPD=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_TUNNEL=y
+CONFIG_IP_TCPDIAG=y
+# CONFIG_IP_TCPDIAG_IPV6 is not set
+
+#
+# IP: Virtual Server Configuration
+#
+CONFIG_IP_VS=m
+# CONFIG_IP_VS_DEBUG is not set
+CONFIG_IP_VS_TAB_BITS=12
+
+#
+# IPVS transport protocol load balancing support
+#
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+
+#
+# IPVS scheduler
+#
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+
+#
+# IPVS application helper
+#
+CONFIG_IP_VS_FTP=m
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+CONFIG_IP_NF_CT_ACCT=y
+CONFIG_IP_NF_CONNTRACK_MARK=y
+CONFIG_IP_NF_CT_PROTO_SCTP=m
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+CONFIG_IP_NF_TFTP=m
+CONFIG_IP_NF_AMANDA=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+CONFIG_IP_NF_MATCH_MAC=m
+CONFIG_IP_NF_MATCH_PKTTYPE=m
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_DSCP=m
+CONFIG_IP_NF_MATCH_AH_ESP=m
+CONFIG_IP_NF_MATCH_LENGTH=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_MATCH_TCPMSS=m
+CONFIG_IP_NF_MATCH_HELPER=m
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_CONNTRACK=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_MATCH_PHYSDEV=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_MATCH_REALM=m
+CONFIG_IP_NF_MATCH_SCTP=m
+CONFIG_IP_NF_MATCH_COMMENT=m
+CONFIG_IP_NF_MATCH_CONNMARK=m
+CONFIG_IP_NF_MATCH_HASHLIMIT=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_TARGET_TCPMSS=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+CONFIG_IP_NF_NAT_SNMP_BASIC=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_NAT_AMANDA=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_DSCP=m
+CONFIG_IP_NF_TARGET_MARK=m
+CONFIG_IP_NF_TARGET_CLASSIFY=m
+CONFIG_IP_NF_TARGET_CONNMARK=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_TARGET_NOTRACK=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_LIMIT=m
+CONFIG_IP6_NF_MATCH_MAC=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_MULTIPORT=m
+CONFIG_IP6_NF_MATCH_OWNER=m
+CONFIG_IP6_NF_MATCH_MARK=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_AHESP=m
+CONFIG_IP6_NF_MATCH_LENGTH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_PHYSDEV=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_TARGET_MARK=m
+CONFIG_IP6_NF_RAW=m
+
+#
+# DECnet: Netfilter Configuration
+#
+CONFIG_DECNET_NF_GRABULATOR=m
+
+#
+# Bridge: Netfilter Configuration
+#
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_ULOG=m
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=m
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+# CONFIG_SCTP_HMAC_SHA1 is not set
+CONFIG_SCTP_HMAC_MD5=y
+CONFIG_ATM=y
+CONFIG_ATM_CLIP=y
+# CONFIG_ATM_CLIP_NO_ICMP is not set
+CONFIG_ATM_LANE=m
+CONFIG_ATM_MPOA=m
+CONFIG_ATM_BR2684=m
+CONFIG_ATM_BR2684_IPFILTER=y
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_DECNET=m
+CONFIG_DECNET_ROUTER=y
+CONFIG_DECNET_ROUTE_FWMARK=y
+CONFIG_LLC=m
+CONFIG_LLC2=m
+CONFIG_IPX=m
+# CONFIG_IPX_INTERN is not set
+CONFIG_ATALK=m
+# CONFIG_DEV_APPLETALK is not set
+CONFIG_X25=m
+CONFIG_LAPB=m
+CONFIG_NET_DIVERT=y
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+CONFIG_NET_SCHED=y
+# CONFIG_NET_SCH_CLK_JIFFIES is not set
+# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
+CONFIG_NET_SCH_CLK_CPU=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_ATM=y
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_QOS=y
+CONFIG_NET_ESTIMATOR=y
+CONFIG_NET_CLS=y
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_ROUTE=y
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_PERF=y
+CONFIG_NET_CLS_IND=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_STACK=32
+CONFIG_NET_EMATCH_CMP=m
+CONFIG_NET_EMATCH_NBYTE=m
+CONFIG_NET_EMATCH_U32=m
+CONFIG_NET_EMATCH_META=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_PEDIT=m
+
+#
+# Network testing
+#
+CONFIG_NET_PKTGEN=m
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+CONFIG_HAMRADIO=y
+
+#
+# Packet Radio protocols
+#
+CONFIG_AX25=m
+CONFIG_AX25_DAMA_SLAVE=y
+CONFIG_NETROM=m
+CONFIG_ROSE=m
+
+#
+# AX.25 network device drivers
+#
+# CONFIG_BPQETHER is not set
+# CONFIG_BAYCOM_SER_FDX is not set
+# CONFIG_BAYCOM_SER_HDX is not set
+# CONFIG_BAYCOM_PAR is not set
+# CONFIG_YAM is not set
+CONFIG_IRDA=m
+
+#
+# IrDA protocols
+#
+CONFIG_IRLAN=m
+CONFIG_IRNET=m
+CONFIG_IRCOMM=m
+CONFIG_IRDA_ULTRA=y
+
+#
+# IrDA options
+#
+CONFIG_IRDA_CACHE_LAST_LSAP=y
+CONFIG_IRDA_FAST_RR=y
+# CONFIG_IRDA_DEBUG is not set
+
+#
+# Infrared-port device drivers
+#
+
+#
+# SIR device drivers
+#
+# CONFIG_IRTTY_SIR is not set
+
+#
+# Dongle support
+#
+
+#
+# Old SIR device drivers
+#
+
+#
+# Old Serial dongle support
+#
+
+#
+# FIR device drivers
+#
+# CONFIG_USB_IRDA is not set
+CONFIG_SIGMATEL_FIR=m
+# CONFIG_VLSI_FIR is not set
+CONFIG_BT=m
+CONFIG_BT_L2CAP=m
+CONFIG_BT_SCO=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_CMTP=m
+CONFIG_BT_HIDP=m
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_HCIUSB=m
+CONFIG_BT_HCIUSB_SCO=y
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_BCSP_TXCRC=y
+CONFIG_BT_HCIBCM203X=m
+CONFIG_BT_HCIBPA10X=m
+CONFIG_BT_HCIBFUSB=m
+CONFIG_BT_HCIVHCI=m
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_BONDING=m
+CONFIG_EQUALIZER=m
+CONFIG_TUN=m
+# CONFIG_ETHERTAP is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+CONFIG_SUNLANCE=y
+CONFIG_HAPPYMEAL=y
+CONFIG_SUNBMAC=m
+CONFIG_SUNQE=m
+CONFIG_SUNGEM=y
+CONFIG_NET_VENDOR_3COM=y
+CONFIG_VORTEX=m
+CONFIG_TYPHOON=m
+
+#
+# Tulip family network device support
+#
+CONFIG_NET_TULIP=y
+CONFIG_DE2104X=m
+CONFIG_TULIP=m
+# CONFIG_TULIP_MWI is not set
+# CONFIG_TULIP_MMIO is not set
+CONFIG_TULIP_NAPI=y
+CONFIG_TULIP_NAPI_HW_MITIGATION=y
+CONFIG_DE4X5=m
+CONFIG_WINBOND_840=m
+# CONFIG_DM9102 is not set
+# CONFIG_HP100 is not set
+CONFIG_NET_PCI=y
+CONFIG_PCNET32=m
+# CONFIG_AMD8111_ETH is not set
+CONFIG_ADAPTEC_STARFIRE=m
+CONFIG_ADAPTEC_STARFIRE_NAPI=y
+CONFIG_B44=m
+CONFIG_FORCEDETH=m
+CONFIG_DGRS=m
+CONFIG_EEPRO100=m
+CONFIG_E100=m
+CONFIG_FEALNX=m
+CONFIG_NATSEMI=m
+CONFIG_NE2K_PCI=m
+# CONFIG_8139CP is not set
+CONFIG_8139TOO=m
+# CONFIG_8139TOO_PIO is not set
+# CONFIG_8139TOO_TUNE_TWISTER is not set
+# CONFIG_8139TOO_8129 is not set
+# CONFIG_8139_OLD_RX_RESET is not set
+CONFIG_SIS900=m
+CONFIG_EPIC100=m
+CONFIG_SUNDANCE=m
+CONFIG_SUNDANCE_MMIO=y
+CONFIG_VIA_RHINE=m
+# CONFIG_VIA_RHINE_MMIO is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+CONFIG_ACENIC=m
+# CONFIG_ACENIC_OMIT_TIGON_I is not set
+CONFIG_DL2K=m
+CONFIG_E1000=m
+CONFIG_E1000_NAPI=y
+CONFIG_MYRI_SBUS=m
+CONFIG_NS83820=m
+CONFIG_HAMACHI=m
+CONFIG_YELLOWFIN=m
+CONFIG_R8169=m
+CONFIG_R8169_NAPI=y
+CONFIG_R8169_VLAN=y
+CONFIG_SK98LIN=m
+CONFIG_VIA_VELOCITY=m
+CONFIG_TIGON3=m
+
+#
+# Ethernet (10000 Mbit)
+#
+CONFIG_IXGB=m
+CONFIG_IXGB_NAPI=y
+CONFIG_S2IO=m
+CONFIG_S2IO_NAPI=y
+CONFIG_2BUFF_MODE=y
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+CONFIG_NET_RADIO=y
+
+#
+# Obsolete Wireless cards support (pre-802.11)
+#
+# CONFIG_STRIP is not set
+
+#
+# Wireless 802.11b ISA/PCI cards support
+#
+CONFIG_HERMES=m
+CONFIG_PLX_HERMES=m
+CONFIG_TMD_HERMES=m
+CONFIG_PCI_HERMES=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+
+#
+# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
+#
+CONFIG_PRISM54=m
+CONFIG_NET_WIRELESS=y
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+
+#
+# ATM drivers
+#
+CONFIG_ATM_TCP=m
+# CONFIG_ATM_LANAI is not set
+# CONFIG_ATM_ENI is not set
+# CONFIG_ATM_FIRESTREAM is not set
+# CONFIG_ATM_ZATM is not set
+# CONFIG_ATM_IDT77252 is not set
+# CONFIG_ATM_AMBASSADOR is not set
+# CONFIG_ATM_HORIZON is not set
+CONFIG_ATM_FORE200E_MAYBE=m
+CONFIG_ATM_FORE200E_PCA=y
+CONFIG_ATM_FORE200E_PCA_DEFAULT_FW=y
+CONFIG_ATM_FORE200E_SBA=y
+CONFIG_ATM_FORE200E_SBA_DEFAULT_FW=y
+CONFIG_ATM_FORE200E_USE_TASKLET=y
+CONFIG_ATM_FORE200E_TX_RETRY=16
+CONFIG_ATM_FORE200E_DEBUG=0
+CONFIG_ATM_FORE200E=m
+CONFIG_ATM_HE=m
+CONFIG_ATM_HE_USE_SUNI=y
+CONFIG_FDDI=y
+# CONFIG_DEFXX is not set
+CONFIG_SKFP=m
+CONFIG_HIPPI=y
+# CONFIG_ROADRUNNER is not set
+CONFIG_PLIP=m
+CONFIG_PPP=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPPOE=m
+CONFIG_PPPOATM=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+# CONFIG_SLIP_MODE_SLIP6 is not set
+CONFIG_NET_FC=y
+CONFIG_SHAPER=m
+CONFIG_NETCONSOLE=m
+
+#
+# ISDN subsystem
+#
+CONFIG_ISDN=m
+
+#
+# Old ISDN4Linux
+#
+# CONFIG_ISDN_I4L is not set
+
+#
+# CAPI subsystem
+#
+CONFIG_ISDN_CAPI=m
+# CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON is not set
+# CONFIG_ISDN_CAPI_MIDDLEWARE is not set
+CONFIG_ISDN_CAPI_CAPI20=m
+
+#
+# CAPI hardware drivers
+#
+
+#
+# Active AVM cards
+#
+CONFIG_CAPI_AVM=y
+CONFIG_ISDN_DRV_AVMB1_B1PCI=m
+CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
+CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
+CONFIG_ISDN_DRV_AVMB1_T1PCI=m
+CONFIG_ISDN_DRV_AVMB1_C4=m
+
+#
+# Active Eicon DIVA Server cards
+#
+CONFIG_CAPI_EICON=y
+CONFIG_ISDN_DIVAS=m
+CONFIG_ISDN_DIVAS_BRIPCI=y
+CONFIG_ISDN_DIVAS_PRIPCI=y
+CONFIG_ISDN_DIVAS_DIVACAPI=m
+CONFIG_ISDN_DIVAS_USERIDI=m
+CONFIG_ISDN_DIVAS_MAINT=m
+
+#
+# Telephony Support
+#
+CONFIG_PHONE=m
+CONFIG_PHONE_IXJ=m
+
+#
+# Unix98 PTY support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_UNIX98_PTY_COUNT=256
+
+#
+# XFree86 DRI support
+#
+CONFIG_DRM=y
+CONFIG_DRM_TDFX=m
+# CONFIG_DRM_R128 is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+CONFIG_GAMEPORT=m
+CONFIG_SOUND_GAMEPORT=m
+# CONFIG_GAMEPORT_NS558 is not set
+# CONFIG_GAMEPORT_L4 is not set
+# CONFIG_GAMEPORT_EMU10K1 is not set
+# CONFIG_GAMEPORT_VORTEX is not set
+# CONFIG_GAMEPORT_FM801 is not set
+CONFIG_GAMEPORT_CS461X=m
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PARKBD is not set
+CONFIG_SERIO_PCIPS2=m
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=m
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+CONFIG_KEYBOARD_SUNKBD=y
+CONFIG_KEYBOARD_LKKBD=m
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_SERIAL=y
+CONFIG_MOUSE_VSXXXAA=m
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_SPARCSPKR=y
+# CONFIG_INPUT_UINPUT is not set
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=m
+
+#
+# I2C Algorithms
+#
+CONFIG_I2C_ALGOBIT=y
+CONFIG_I2C_ALGOPCF=m
+CONFIG_I2C_ALGOPCA=m
+
+#
+# I2C Hardware Bus support
+#
+CONFIG_I2C_ALI1535=m
+CONFIG_I2C_ALI1563=m
+CONFIG_I2C_ALI15X3=m
+CONFIG_I2C_AMD756=m
+# CONFIG_I2C_AMD756_S4882 is not set
+CONFIG_I2C_AMD8111=m
+CONFIG_I2C_I801=m
+CONFIG_I2C_I810=m
+CONFIG_I2C_ISA=m
+CONFIG_I2C_NFORCE2=m
+CONFIG_I2C_PARPORT=m
+CONFIG_I2C_PARPORT_LIGHT=m
+CONFIG_I2C_PIIX4=m
+CONFIG_I2C_PROSAVAGE=m
+CONFIG_I2C_SAVAGE4=m
+CONFIG_SCx200_ACB=m
+CONFIG_I2C_SIS5595=m
+CONFIG_I2C_SIS630=m
+CONFIG_I2C_SIS96X=m
+CONFIG_I2C_STUB=m
+CONFIG_I2C_VIA=m
+CONFIG_I2C_VIAPRO=m
+CONFIG_I2C_VOODOO3=m
+CONFIG_I2C_PCA_ISA=m
+
+#
+# Hardware Sensors Chip support
+#
+CONFIG_I2C_SENSOR=m
+CONFIG_SENSORS_ADM1021=m
+CONFIG_SENSORS_ADM1025=m
+CONFIG_SENSORS_ADM1026=m
+CONFIG_SENSORS_ADM1031=m
+CONFIG_SENSORS_ASB100=m
+CONFIG_SENSORS_DS1621=m
+CONFIG_SENSORS_FSCHER=m
+CONFIG_SENSORS_FSCPOS=m
+CONFIG_SENSORS_GL518SM=m
+CONFIG_SENSORS_GL520SM=m
+CONFIG_SENSORS_IT87=m
+CONFIG_SENSORS_LM63=m
+CONFIG_SENSORS_LM75=m
+CONFIG_SENSORS_LM77=m
+CONFIG_SENSORS_LM78=m
+CONFIG_SENSORS_LM80=m
+CONFIG_SENSORS_LM83=m
+CONFIG_SENSORS_LM85=m
+CONFIG_SENSORS_LM87=m
+CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_MAX1619=m
+CONFIG_SENSORS_PC87360=m
+CONFIG_SENSORS_SMSC47B397=m
+CONFIG_SENSORS_SIS5595=m
+CONFIG_SENSORS_SMSC47M1=m
+CONFIG_SENSORS_VIA686A=m
+CONFIG_SENSORS_W83781D=m
+CONFIG_SENSORS_W83L785TS=m
+CONFIG_SENSORS_W83627HF=m
+
+#
+# Other I2C Chip support
+#
+CONFIG_SENSORS_EEPROM=m
+CONFIG_SENSORS_PCF8574=m
+CONFIG_SENSORS_PCF8591=m
+CONFIG_SENSORS_RTC8564=m
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+CONFIG_FS_POSIX_ACL=y
+
+#
+# XFS support
+#
+CONFIG_XFS_FS=m
+CONFIG_XFS_EXPORT=y
+# CONFIG_XFS_RT is not set
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_SECURITY=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+# CONFIG_QUOTA is not set
+CONFIG_QUOTACTL=y
+CONFIG_DNOTIFY=y
+CONFIG_AUTOFS_FS=m
+CONFIG_AUTOFS4_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+# CONFIG_ZISOFS is not set
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+CONFIG_DEVFS_FS=y
+CONFIG_DEVFS_MOUNT=y
+# CONFIG_DEVFS_DEBUG is not set
+CONFIG_DEVPTS_FS_XATTR=y
+# CONFIG_DEVPTS_FS_SECURITY is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_TMPFS_SECURITY=y
+CONFIG_HUGETLBFS=y
+CONFIG_HUGETLB_PAGE=y
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+CONFIG_ADFS_FS=m
+# CONFIG_ADFS_FS_RW is not set
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+# CONFIG_BEFS_DEBUG is not set
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_UFS_FS_WRITE=y
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_DIRECTIO=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_TCP=y
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=m
+CONFIG_SUNRPC_GSS=m
+CONFIG_RPCSEC_GSS_KRB5=m
+CONFIG_RPCSEC_GSS_SPKM3=m
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+CONFIG_NCP_FS=m
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+# CONFIG_NCPFS_NFS_NS is not set
+# CONFIG_NCPFS_OS2_NS is not set
+# CONFIG_NCPFS_SMALLDOS is not set
+# CONFIG_NCPFS_NLS is not set
+# CONFIG_NCPFS_EXTRAS is not set
+CONFIG_CODA_FS=m
+# CONFIG_CODA_FS_OLD_API is not set
+CONFIG_AFS_FS=m
+CONFIG_RXRPC=m
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_SUN_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+
+#
+# Multimedia devices
+#
+CONFIG_VIDEO_DEV=y
+
+#
+# Video For Linux
+#
+
+#
+# Video Adapters
+#
+CONFIG_VIDEO_BT848=m
+CONFIG_VIDEO_BWQCAM=m
+CONFIG_VIDEO_CQCAM=m
+CONFIG_VIDEO_W9966=m
+CONFIG_VIDEO_CPIA=m
+CONFIG_VIDEO_CPIA_PP=m
+CONFIG_VIDEO_CPIA_USB=m
+CONFIG_VIDEO_SAA5246A=m
+CONFIG_VIDEO_SAA5249=m
+CONFIG_TUNER_3036=m
+# CONFIG_VIDEO_STRADIS is not set
+# CONFIG_VIDEO_ZORAN is not set
+# CONFIG_VIDEO_SAA7134 is not set
+CONFIG_VIDEO_MXB=m
+CONFIG_VIDEO_DPC=m
+CONFIG_VIDEO_HEXIUM_ORION=m
+CONFIG_VIDEO_HEXIUM_GEMINI=m
+CONFIG_VIDEO_CX88=m
+CONFIG_VIDEO_OVCAMCHIP=m
+
+#
+# Radio Adapters
+#
+CONFIG_RADIO_GEMTEK_PCI=m
+CONFIG_RADIO_MAXIRADIO=m
+CONFIG_RADIO_MAESTRO=m
+
+#
+# Digital Video Broadcasting Devices
+#
+CONFIG_DVB=y
+CONFIG_DVB_CORE=m
+
+#
+# Supported SAA7146 based PCI Adapters
+#
+CONFIG_DVB_AV7110=m
+# CONFIG_DVB_AV7110_OSD is not set
+CONFIG_DVB_BUDGET=m
+CONFIG_DVB_BUDGET_CI=m
+CONFIG_DVB_BUDGET_AV=m
+CONFIG_DVB_BUDGET_PATCH=m
+
+#
+# Supported USB Adapters
+#
+# CONFIG_DVB_TTUSB_BUDGET is not set
+CONFIG_DVB_TTUSB_DEC=m
+CONFIG_DVB_DIBUSB=m
+# CONFIG_DVB_DIBUSB_MISDESIGNED_DEVICES is not set
+CONFIG_DVB_DIBCOM_DEBUG=y
+CONFIG_DVB_CINERGYT2=m
+# CONFIG_DVB_CINERGYT2_TUNING is not set
+
+#
+# Supported FlexCopII (B2C2) Adapters
+#
+CONFIG_DVB_B2C2_SKYSTAR=m
+CONFIG_DVB_B2C2_USB=m
+
+#
+# Supported BT878 Adapters
+#
+CONFIG_DVB_BT8XX=m
+
+#
+# Supported DVB Frontends
+#
+
+#
+# Customise DVB Frontends
+#
+
+#
+# DVB-S (satellite) frontends
+#
+CONFIG_DVB_STV0299=m
+CONFIG_DVB_CX24110=m
+CONFIG_DVB_TDA8083=m
+CONFIG_DVB_TDA80XX=m
+CONFIG_DVB_MT312=m
+CONFIG_DVB_VES1X93=m
+
+#
+# DVB-T (terrestrial) frontends
+#
+CONFIG_DVB_SP8870=m
+CONFIG_DVB_SP887X=m
+CONFIG_DVB_CX22700=m
+CONFIG_DVB_CX22702=m
+CONFIG_DVB_L64781=m
+CONFIG_DVB_TDA1004X=m
+CONFIG_DVB_NXT6000=m
+CONFIG_DVB_MT352=m
+CONFIG_DVB_DIB3000MB=m
+CONFIG_DVB_DIB3000MC=m
+
+#
+# DVB-C (cable) frontends
+#
+CONFIG_DVB_ATMEL_AT76C651=m
+CONFIG_DVB_VES1820=m
+CONFIG_DVB_TDA10021=m
+CONFIG_DVB_STV0297=m
+
+#
+# ATSC (North American/Korean Terresterial DTV) frontends
+#
+CONFIG_DVB_NXT2002=m
+CONFIG_VIDEO_SAA7146=m
+CONFIG_VIDEO_SAA7146_VV=m
+CONFIG_VIDEO_VIDEOBUF=m
+CONFIG_VIDEO_TUNER=m
+CONFIG_VIDEO_BUF=m
+CONFIG_VIDEO_BTCX=m
+CONFIG_VIDEO_IR=m
+CONFIG_VIDEO_TVEEPROM=m
+
+#
+# Sound
+#
+CONFIG_SOUND=m
+
+#
+# Advanced Linux Sound Architecture
+#
+CONFIG_SND=m
+CONFIG_SND_TIMER=m
+CONFIG_SND_PCM=m
+CONFIG_SND_HWDEP=m
+CONFIG_SND_RAWMIDI=m
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_BIT32_EMUL=m
+# CONFIG_SND_RTCTIMER is not set
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+
+#
+# Generic devices
+#
+CONFIG_SND_MPU401_UART=m
+CONFIG_SND_OPL3_LIB=m
+CONFIG_SND_VX_LIB=m
+CONFIG_SND_DUMMY=m
+CONFIG_SND_VIRMIDI=m
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+
+#
+# PCI devices
+#
+CONFIG_SND_AC97_CODEC=m
+CONFIG_SND_ALI5451=m
+CONFIG_SND_ATIIXP=m
+CONFIG_SND_ATIIXP_MODEM=m
+CONFIG_SND_AU8810=m
+CONFIG_SND_AU8820=m
+CONFIG_SND_AU8830=m
+CONFIG_SND_AZT3328=m
+CONFIG_SND_BT87X=m
+# CONFIG_SND_BT87X_OVERCLOCK is not set
+CONFIG_SND_CS46XX=m
+# CONFIG_SND_CS46XX_NEW_DSP is not set
+CONFIG_SND_CS4281=m
+CONFIG_SND_EMU10K1=m
+CONFIG_SND_EMU10K1X=m
+CONFIG_SND_CA0106=m
+CONFIG_SND_KORG1212=m
+CONFIG_SND_MIXART=m
+CONFIG_SND_NM256=m
+# CONFIG_SND_RME32 is not set
+# CONFIG_SND_RME96 is not set
+# CONFIG_SND_RME9652 is not set
+# CONFIG_SND_HDSP is not set
+CONFIG_SND_TRIDENT=m
+CONFIG_SND_YMFPCI=m
+CONFIG_SND_ALS4000=m
+CONFIG_SND_CMIPCI=m
+CONFIG_SND_ENS1370=m
+CONFIG_SND_ENS1371=m
+CONFIG_SND_ES1938=m
+CONFIG_SND_ES1968=m
+CONFIG_SND_MAESTRO3=m
+CONFIG_SND_FM801=m
+CONFIG_SND_FM801_TEA575X=m
+CONFIG_SND_ICE1712=m
+# CONFIG_SND_ICE1724 is not set
+CONFIG_SND_INTEL8X0=m
+CONFIG_SND_INTEL8X0M=m
+CONFIG_SND_SONICVIBES=m
+# CONFIG_SND_VIA82XX is not set
+CONFIG_SND_VIA82XX_MODEM=m
+CONFIG_SND_VX222=m
+
+#
+# USB devices
+#
+# CONFIG_SND_USB_AUDIO is not set
+
+#
+# ALSA Sparc devices
+#
+CONFIG_SND_SUN_AMD7930=m
+CONFIG_SND_SUN_CS4231=m
+
+#
+# USB support
+#
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_BANDWIDTH is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+
+#
+# USB Host Controller Drivers
+#
+CONFIG_USB_EHCI_HCD=m
+# CONFIG_USB_EHCI_SPLIT_ISO is not set
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=m
+CONFIG_USB_SL811_HCD=m
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_AUDIO is not set
+
+#
+# USB Bluetooth TTY can only be used with disabled Bluetooth subsystem
+#
+# CONFIG_USB_MIDI is not set
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+CONFIG_USB_STORAGE_RW_DETECT=y
+# CONFIG_USB_STORAGE_DATAFAB is not set
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_DPCM=y
+CONFIG_USB_STORAGE_HP8200e=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+CONFIG_USB_HIDINPUT=y
+# CONFIG_HID_FF is not set
+CONFIG_USB_HIDDEV=y
+# CONFIG_USB_AIPTEK is not set
+CONFIG_USB_WACOM=m
+CONFIG_USB_KBTAB=m
+# CONFIG_USB_POWERMATE is not set
+CONFIG_USB_MTOUCH=m
+CONFIG_USB_EGALAX=m
+# CONFIG_USB_XPAD is not set
+CONFIG_USB_ATI_REMOTE=m
+
+#
+# USB Imaging devices
+#
+CONFIG_USB_MDC800=m
+CONFIG_USB_MICROTEK=m
+
+#
+# USB Multimedia devices
+#
+# CONFIG_USB_DABUSB is not set
+# CONFIG_USB_VICAM is not set
+# CONFIG_USB_DSBR is not set
+# CONFIG_USB_IBMCAM is not set
+# CONFIG_USB_KONICAWC is not set
+# CONFIG_USB_OV511 is not set
+# CONFIG_USB_SE401 is not set
+CONFIG_USB_SN9C102=m
+# CONFIG_USB_STV680 is not set
+CONFIG_USB_W9968CF=m
+
+#
+# USB Network Adapters
+#
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_USBNET=m
+
+#
+# USB Host-to-Host Cables
+#
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+CONFIG_USB_BELKIN=y
+CONFIG_USB_GENESYS=y
+CONFIG_USB_NET1080=y
+CONFIG_USB_PL2301=y
+CONFIG_USB_KC2190=y
+
+#
+# Intelligent USB Devices/Gadgets
+#
+CONFIG_USB_ARMLINUX=y
+CONFIG_USB_EPSON2888=y
+CONFIG_USB_ZAURUS=y
+CONFIG_USB_CDCETHER=y
+
+#
+# USB Network Adapters
+#
+CONFIG_USB_AX8817X=y
+
+#
+# USB port drivers
+#
+CONFIG_USB_USS720=m
+
+#
+# USB Serial Converter support
+#
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_BELKIN=m
+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
+CONFIG_USB_SERIAL_CYPRESS_M8=m
+CONFIG_USB_SERIAL_EMPEG=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+# CONFIG_USB_SERIAL_VISOR is not set
+CONFIG_USB_SERIAL_IPAQ=m
+# CONFIG_USB_SERIAL_IR is not set
+CONFIG_USB_SERIAL_EDGEPORT=m
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+CONFIG_USB_SERIAL_GARMIN=m
+CONFIG_USB_SERIAL_IPW=m
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
+CONFIG_USB_SERIAL_KEYSPAN=m
+# CONFIG_USB_SERIAL_KEYSPAN_MPR is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA28 is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA28X is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA28XA is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA28XB is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA19 is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA18X is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA19W is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA19QW is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA19QI is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA49W is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA49WLC is not set
+CONFIG_USB_SERIAL_KLSI=m
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_PL2303=m
+# CONFIG_USB_SERIAL_SAFE is not set
+CONFIG_USB_SERIAL_TI=m
+CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_XIRCOM=m
+CONFIG_USB_SERIAL_OMNINET=m
+CONFIG_USB_EZUSB=y
+
+#
+# USB Miscellaneous drivers
+#
+CONFIG_USB_EMI62=m
+CONFIG_USB_EMI26=m
+CONFIG_USB_AUERSWALD=m
+CONFIG_USB_RIO500=m
+CONFIG_USB_LEGOTOWER=m
+CONFIG_USB_LCD=m
+CONFIG_USB_LED=m
+CONFIG_USB_CYTHERM=m
+CONFIG_USB_PHIDGETKIT=m
+CONFIG_USB_PHIDGETSERVO=m
+CONFIG_USB_IDMOUSE=m
+CONFIG_USB_TEST=m
+
+#
+# USB ATM/DSL drivers
+#
+CONFIG_USB_ATM=m
+CONFIG_USB_SPEEDTOUCH=m
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# InfiniBand support
+#
+CONFIG_INFINIBAND=m
+CONFIG_INFINIBAND_MTHCA=m
+# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
+CONFIG_INFINIBAND_IPOIB=m
+# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
+
+#
+# Watchdog Cards
+#
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_WATCHDOG_CP1XXX=m
+CONFIG_WATCHDOG_RIO=m
+
+#
+# PCI-based Watchdog Cards
+#
+CONFIG_PCIPCWATCHDOG=m
+CONFIG_WDTPCI=m
+CONFIG_WDT_501_PCI=y
+
+#
+# USB-based Watchdog Cards
+#
+CONFIG_USBPCWATCHDOG=m
+
+#
+# Profiling support
+#
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_KPROBES=y
+# CONFIG_DEBUG_DCFLUSH is not set
+# CONFIG_STACK_DEBUG is not set
+# CONFIG_DEBUG_BOOTMEM is not set
+CONFIG_HAVE_DEC_LOCK=y
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Hardware crypto devices
+#
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=m
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
new file mode 100644
index 0000000..093281b
--- /dev/null
+++ b/arch/sparc64/kernel/Makefile
@@ -0,0 +1,44 @@
+# $Id: Makefile,v 1.70 2002/02/09 19:49:30 davem Exp $
+# Makefile for the linux kernel.
+#
+
+EXTRA_AFLAGS := -ansi
+EXTRA_CFLAGS := -Werror
+
+extra-y		:= head.o init_task.o vmlinux.lds
+
+obj-y		:= process.o setup.o cpu.o idprom.o \
+		   traps.o devices.o auxio.o \
+		   irq.o ptrace.o time.o sys_sparc.o signal.o \
+		   unaligned.o central.o pci.o starfire.o semaphore.o \
+		   power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o
+
+obj-$(CONFIG_PCI)	 += ebus.o isa.o pci_common.o pci_iommu.o \
+			    pci_psycho.o pci_sabre.o pci_schizo.o
+obj-$(CONFIG_SMP)	 += smp.o trampoline.o
+obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o ioctl32.o
+obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
+obj-$(CONFIG_BINFMT_AOUT32) += binfmt_aout32.o
+obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_US3_FREQ) += us3_cpufreq.o
+obj-$(CONFIG_US2E_FREQ) += us2e_cpufreq.o
+obj-$(CONFIG_KPROBES) += kprobes.o
+
+ifdef CONFIG_SUNOS_EMUL
+  obj-y += sys_sunos32.o sunos_ioctl32.o
+else
+  ifdef CONFIG_SOLARIS_EMUL
+    obj-y += sys_sunos32.o sunos_ioctl32.o
+  endif
+endif
+
+ifneq ($(NEW_GCC),y)
+  CMODEL_CFLAG := -mmedlow
+else
+  CMODEL_CFLAG := -m64 -mcmodel=medlow
+endif
+
+head.o: head.S ttable.S itlb_base.S dtlb_base.S dtlb_backend.S dtlb_prot.S \
+	etrap.S rtrap.S winfixup.S entry.S
+
+CFLAGS_ioctl32.o += -Ifs/
diff --git a/arch/sparc64/kernel/auxio.c b/arch/sparc64/kernel/auxio.c
new file mode 100644
index 0000000..a0716cc
--- /dev/null
+++ b/arch/sparc64/kernel/auxio.c
@@ -0,0 +1,152 @@
+/* auxio.c: Probing for the Sparc AUXIO register at boot time.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ *
+ * Refactoring for unified NCR/PCIO support 2002 Eric Brower (ebrower@usa.net)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+
+#include <asm/oplib.h>
+#include <asm/io.h>
+#include <asm/sbus.h>
+#include <asm/ebus.h>
+#include <asm/auxio.h>
+
+/* This cannot be static, as it is referenced in entry.S */
+void __iomem *auxio_register = NULL;
+
+enum auxio_type {
+	AUXIO_TYPE_NODEV,
+	AUXIO_TYPE_SBUS,
+	AUXIO_TYPE_EBUS
+};
+
+static enum auxio_type auxio_devtype = AUXIO_TYPE_NODEV;
+static DEFINE_SPINLOCK(auxio_lock);
+
+static void __auxio_sbus_set(u8 bits_on, u8 bits_off)
+{
+	if (auxio_register) {
+		unsigned char regval;
+		unsigned long flags;
+		unsigned char newval;
+
+		spin_lock_irqsave(&auxio_lock, flags);
+
+		regval =  sbus_readb(auxio_register);
+		newval =  regval | bits_on;
+		newval &= ~bits_off;
+		newval &= ~AUXIO_AUX1_MASK;
+		sbus_writeb(newval, auxio_register);
+		
+		spin_unlock_irqrestore(&auxio_lock, flags);
+	}
+}
+
+static void __auxio_ebus_set(u8 bits_on, u8 bits_off)
+{
+	if (auxio_register) {
+		unsigned char regval;
+		unsigned long flags;
+		unsigned char newval;
+
+		spin_lock_irqsave(&auxio_lock, flags);
+
+		regval =  (u8)readl(auxio_register);
+		newval =  regval | bits_on;
+		newval &= ~bits_off;
+		writel((u32)newval, auxio_register);
+
+		spin_unlock_irqrestore(&auxio_lock, flags);
+	}
+}
+
+static inline void __auxio_ebus_set_led(int on)
+{
+	(on) ? __auxio_ebus_set(AUXIO_PCIO_LED, 0) :
+		__auxio_ebus_set(0, AUXIO_PCIO_LED) ;
+}
+
+static inline void __auxio_sbus_set_led(int on)
+{
+	(on) ? __auxio_sbus_set(AUXIO_AUX1_LED, 0) :
+		__auxio_sbus_set(0, AUXIO_AUX1_LED) ;
+}
+
+void auxio_set_led(int on)
+{
+	switch(auxio_devtype) {
+	case AUXIO_TYPE_SBUS:
+		__auxio_sbus_set_led(on);
+		break;
+	case AUXIO_TYPE_EBUS:
+		__auxio_ebus_set_led(on);
+		break;
+	default:
+		break;
+	}
+}
+
+static inline void __auxio_sbus_set_lte(int on)
+{
+	(on) ? __auxio_sbus_set(AUXIO_AUX1_LTE, 0) : 
+		__auxio_sbus_set(0, AUXIO_AUX1_LTE) ;
+}
+
+void auxio_set_lte(int on)
+{
+	switch(auxio_devtype) {
+	case AUXIO_TYPE_SBUS:
+		__auxio_sbus_set_lte(on);
+		break;
+	case AUXIO_TYPE_EBUS:
+		/* FALL-THROUGH */
+	default:
+		break;
+	}
+}
+
+void __init auxio_probe(void)
+{
+        struct sbus_bus *sbus;
+        struct sbus_dev *sdev = NULL;
+
+        for_each_sbus(sbus) {
+                for_each_sbusdev(sdev, sbus) {
+                        if(!strcmp(sdev->prom_name, "auxio"))
+				goto found_sdev;
+                }
+        }
+
+found_sdev:
+	if (sdev) {
+		auxio_devtype  = AUXIO_TYPE_SBUS;
+		auxio_register = sbus_ioremap(&sdev->resource[0], 0,
+					      sdev->reg_addrs[0].reg_size,
+					      "auxiliaryIO");
+	}
+#ifdef CONFIG_PCI
+	else {
+		struct linux_ebus *ebus;
+		struct linux_ebus_device *edev = NULL;
+
+		for_each_ebus(ebus) {
+			for_each_ebusdev(edev, ebus) {
+				if (!strcmp(edev->prom_name, "auxio"))
+					goto ebus_done;
+			}
+		}
+	ebus_done:
+		if (edev) {
+			auxio_devtype  = AUXIO_TYPE_EBUS;
+			auxio_register =
+				ioremap(edev->resource[0].start, sizeof(u32));
+		}
+	}
+	auxio_set_led(AUXIO_LED_ON);
+#endif
+}
diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c
new file mode 100644
index 0000000..b2854ef
--- /dev/null
+++ b/arch/sparc64/kernel/binfmt_aout32.c
@@ -0,0 +1,424 @@
+/*
+ *  linux/fs/binfmt_aout.c
+ *
+ *  Copyright (C) 1991, 1992, 1996  Linus Torvalds
+ *
+ *  Hacked a bit by DaveM to make it work with 32-bit SunOS
+ *  binaries on the sparc64 port.
+ */
+
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/a.out.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/slab.h>
+#include <linux/binfmts.h>
+#include <linux/personality.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+
+static int load_aout32_binary(struct linux_binprm *, struct pt_regs * regs);
+static int load_aout32_library(struct file*);
+static int aout32_core_dump(long signr, struct pt_regs * regs, struct file *file);
+
+extern void dump_thread(struct pt_regs *, struct user *);
+
+static struct linux_binfmt aout32_format = {
+	NULL, THIS_MODULE, load_aout32_binary, load_aout32_library, aout32_core_dump,
+	PAGE_SIZE
+};
+
+static void set_brk(unsigned long start, unsigned long end)
+{
+	start = PAGE_ALIGN(start);
+	end = PAGE_ALIGN(end);
+	if (end <= start)
+		return;
+	down_write(&current->mm->mmap_sem);
+	do_brk(start, end - start);
+	up_write(&current->mm->mmap_sem);
+}
+
+/*
+ * These are the only things you should do on a core-file: use only these
+ * macros to write out all the necessary info.
+ */
+
+static int dump_write(struct file *file, const void *addr, int nr)
+{
+	return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
+}
+
+#define DUMP_WRITE(addr, nr)	\
+	if (!dump_write(file, (void *)(addr), (nr))) \
+		goto end_coredump;
+
+#define DUMP_SEEK(offset) \
+if (file->f_op->llseek) { \
+	if (file->f_op->llseek(file,(offset),0) != (offset)) \
+ 		goto end_coredump; \
+} else file->f_pos = (offset)
+
+/*
+ * Routine writes a core dump image in the current directory.
+ * Currently only a stub-function.
+ *
+ * Note that setuid/setgid files won't make a core-dump if the uid/gid
+ * changed due to the set[u|g]id. It's enforced by the "current->mm->dumpable"
+ * field, which also makes sure the core-dumps won't be recursive if the
+ * dumping of the process results in another error..
+ */
+
+static int aout32_core_dump(long signr, struct pt_regs *regs, struct file *file)
+{
+	mm_segment_t fs;
+	int has_dumped = 0;
+	unsigned long dump_start, dump_size;
+	struct user dump;
+#       define START_DATA(u)    (u.u_tsize)
+#       define START_STACK(u)   ((regs->u_regs[UREG_FP]) & ~(PAGE_SIZE - 1))
+
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+	has_dumped = 1;
+	current->flags |= PF_DUMPCORE;
+       	strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm));
+	dump.signal = signr;
+	dump_thread(regs, &dump);
+
+/* If the size of the dump file exceeds the rlimit, then see what would happen
+   if we wrote the stack, but not the data area.  */
+	if ((dump.u_dsize+dump.u_ssize) >
+	    current->signal->rlim[RLIMIT_CORE].rlim_cur)
+		dump.u_dsize = 0;
+
+/* Make sure we have enough room to write the stack and data areas. */
+	if ((dump.u_ssize) >
+	    current->signal->rlim[RLIMIT_CORE].rlim_cur)
+		dump.u_ssize = 0;
+
+/* make sure we actually have a data and stack area to dump */
+	set_fs(USER_DS);
+	if (!access_ok(VERIFY_READ, (void __user *) START_DATA(dump), dump.u_dsize))
+		dump.u_dsize = 0;
+	if (!access_ok(VERIFY_READ, (void __user *) START_STACK(dump), dump.u_ssize))
+		dump.u_ssize = 0;
+
+	set_fs(KERNEL_DS);
+/* struct user */
+	DUMP_WRITE(&dump,sizeof(dump));
+/* now we start writing out the user space info */
+	set_fs(USER_DS);
+/* Dump the data area */
+	if (dump.u_dsize != 0) {
+		dump_start = START_DATA(dump);
+		dump_size = dump.u_dsize;
+		DUMP_WRITE(dump_start,dump_size);
+	}
+/* Now prepare to dump the stack area */
+	if (dump.u_ssize != 0) {
+		dump_start = START_STACK(dump);
+		dump_size = dump.u_ssize;
+		DUMP_WRITE(dump_start,dump_size);
+	}
+/* Finally dump the task struct.  Not be used by gdb, but could be useful */
+	set_fs(KERNEL_DS);
+	DUMP_WRITE(current,sizeof(*current));
+end_coredump:
+	set_fs(fs);
+	return has_dumped;
+}
+
+/*
+ * create_aout32_tables() parses the env- and arg-strings in new user
+ * memory and creates the pointer tables from them, and puts their
+ * addresses on the "stack", returning the new stack pointer value.
+ */
+
+static u32 __user *create_aout32_tables(char __user *p, struct linux_binprm *bprm)
+{
+	u32 __user *argv;
+	u32 __user *envp;
+	u32 __user *sp;
+	int argc = bprm->argc;
+	int envc = bprm->envc;
+
+	sp = (u32 __user *)((-(unsigned long)sizeof(char *))&(unsigned long)p);
+
+	/* This imposes the proper stack alignment for a new process. */
+	sp = (u32 __user *) (((unsigned long) sp) & ~7);
+	if ((envc+argc+3)&1)
+		--sp;
+
+	sp -= envc+1;
+	envp = sp;
+	sp -= argc+1;
+	argv = sp;
+	put_user(argc,--sp);
+	current->mm->arg_start = (unsigned long) p;
+	while (argc-->0) {
+		char c;
+		put_user(((u32)(unsigned long)(p)),argv++);
+		do {
+			get_user(c,p++);
+		} while (c);
+	}
+	put_user(NULL,argv);
+	current->mm->arg_end = current->mm->env_start = (unsigned long) p;
+	while (envc-->0) {
+		char c;
+		put_user(((u32)(unsigned long)(p)),envp++);
+		do {
+			get_user(c,p++);
+		} while (c);
+	}
+	put_user(NULL,envp);
+	current->mm->env_end = (unsigned long) p;
+	return sp;
+}
+
+/*
+ * These are the functions used to load a.out style executables and shared
+ * libraries.  There is no binary dependent code anywhere else.
+ */
+
+static int load_aout32_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+{
+	struct exec ex;
+	unsigned long error;
+	unsigned long fd_offset;
+	unsigned long rlim;
+	unsigned long orig_thr_flags;
+	int retval;
+
+	ex = *((struct exec *) bprm->buf);		/* exec-header */
+	if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC &&
+	     N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) ||
+	    N_TRSIZE(ex) || N_DRSIZE(ex) ||
+	    bprm->file->f_dentry->d_inode->i_size < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
+		return -ENOEXEC;
+	}
+
+	fd_offset = N_TXTOFF(ex);
+
+	/* Check initial limits. This avoids letting people circumvent
+	 * size limits imposed on them by creating programs with large
+	 * arrays in the data or bss.
+	 */
+	rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
+	if (rlim >= RLIM_INFINITY)
+		rlim = ~0;
+	if (ex.a_data + ex.a_bss > rlim)
+		return -ENOMEM;
+
+	/* Flush all traces of the currently running executable */
+	retval = flush_old_exec(bprm);
+	if (retval)
+		return retval;
+
+	/* OK, This is the point of no return */
+	set_personality(PER_SUNOS);
+
+	current->mm->end_code = ex.a_text +
+		(current->mm->start_code = N_TXTADDR(ex));
+	current->mm->end_data = ex.a_data +
+		(current->mm->start_data = N_DATADDR(ex));
+	current->mm->brk = ex.a_bss +
+		(current->mm->start_brk = N_BSSADDR(ex));
+
+	set_mm_counter(current->mm, rss, 0);
+	current->mm->mmap = NULL;
+	compute_creds(bprm);
+ 	current->flags &= ~PF_FORKNOEXEC;
+	if (N_MAGIC(ex) == NMAGIC) {
+		loff_t pos = fd_offset;
+		/* Fuck me plenty... */
+		down_write(&current->mm->mmap_sem);	
+		error = do_brk(N_TXTADDR(ex), ex.a_text);
+		up_write(&current->mm->mmap_sem);
+		bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex),
+			  ex.a_text, &pos);
+		down_write(&current->mm->mmap_sem);
+		error = do_brk(N_DATADDR(ex), ex.a_data);
+		up_write(&current->mm->mmap_sem);
+		bprm->file->f_op->read(bprm->file, (char __user *)N_DATADDR(ex),
+			  ex.a_data, &pos);
+		goto beyond_if;
+	}
+
+	if (N_MAGIC(ex) == OMAGIC) {
+		loff_t pos = fd_offset;
+		down_write(&current->mm->mmap_sem);
+		do_brk(N_TXTADDR(ex) & PAGE_MASK,
+			ex.a_text+ex.a_data + PAGE_SIZE - 1);
+		up_write(&current->mm->mmap_sem);
+		bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex),
+			  ex.a_text+ex.a_data, &pos);
+	} else {
+		static unsigned long error_time;
+		if ((ex.a_text & 0xfff || ex.a_data & 0xfff) &&
+		    (N_MAGIC(ex) != NMAGIC) && (jiffies-error_time) > 5*HZ)
+		{
+			printk(KERN_NOTICE "executable not page aligned\n");
+			error_time = jiffies;
+		}
+
+		if (!bprm->file->f_op->mmap) {
+			loff_t pos = fd_offset;
+			down_write(&current->mm->mmap_sem);
+			do_brk(0, ex.a_text+ex.a_data);
+			up_write(&current->mm->mmap_sem);
+			bprm->file->f_op->read(bprm->file,
+				  (char __user *)N_TXTADDR(ex),
+				  ex.a_text+ex.a_data, &pos);
+			goto beyond_if;
+		}
+
+	        down_write(&current->mm->mmap_sem);
+		error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
+			PROT_READ | PROT_EXEC,
+			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
+			fd_offset);
+	        up_write(&current->mm->mmap_sem);
+
+		if (error != N_TXTADDR(ex)) {
+			send_sig(SIGKILL, current, 0);
+			return error;
+		}
+
+	        down_write(&current->mm->mmap_sem);
+ 		error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
+				PROT_READ | PROT_WRITE | PROT_EXEC,
+				MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
+				fd_offset + ex.a_text);
+	        up_write(&current->mm->mmap_sem);
+		if (error != N_DATADDR(ex)) {
+			send_sig(SIGKILL, current, 0);
+			return error;
+		}
+	}
+beyond_if:
+	set_binfmt(&aout32_format);
+
+	set_brk(current->mm->start_brk, current->mm->brk);
+
+	/* Make sure STACK_TOP returns the right thing.  */
+	orig_thr_flags = current_thread_info()->flags;
+	current_thread_info()->flags |= _TIF_32BIT;
+
+	retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
+	if (retval < 0) { 
+		current_thread_info()->flags = orig_thr_flags;
+
+		/* Someone check-me: is this error path enough? */ 
+		send_sig(SIGKILL, current, 0); 
+		return retval;
+	}
+
+	current->mm->start_stack =
+		(unsigned long) create_aout32_tables((char __user *)bprm->p, bprm);
+	if (!(orig_thr_flags & _TIF_32BIT)) {
+		unsigned long pgd_cache = get_pgd_cache(current->mm->pgd);
+
+		__asm__ __volatile__("stxa\t%0, [%1] %2\n\t"
+				     "membar #Sync"
+				     : /* no outputs */
+				     : "r" (pgd_cache),
+				       "r" (TSB_REG), "i" (ASI_DMMU));
+	}
+	start_thread32(regs, ex.a_entry, current->mm->start_stack);
+	if (current->ptrace & PT_PTRACED)
+		send_sig(SIGTRAP, current, 0);
+	return 0;
+}
+
+/* N.B. Move to .h file and use code in fs/binfmt_aout.c? */
+static int load_aout32_library(struct file *file)
+{
+	struct inode * inode;
+	unsigned long bss, start_addr, len;
+	unsigned long error;
+	int retval;
+	struct exec ex;
+
+	inode = file->f_dentry->d_inode;
+
+	retval = -ENOEXEC;
+	error = kernel_read(file, 0, (char *) &ex, sizeof(ex));
+	if (error != sizeof(ex))
+		goto out;
+
+	/* We come in here for the regular a.out style of shared libraries */
+	if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) ||
+	    N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) ||
+	    inode->i_size < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
+		goto out;
+	}
+
+	if (N_MAGIC(ex) == ZMAGIC && N_TXTOFF(ex) &&
+	    (N_TXTOFF(ex) < inode->i_sb->s_blocksize)) {
+		printk("N_TXTOFF < BLOCK_SIZE. Please convert library\n");
+		goto out;
+	}
+
+	if (N_FLAGS(ex))
+		goto out;
+
+	/* For  QMAGIC, the starting address is 0x20 into the page.  We mask
+	   this off to get the starting address for the page */
+
+	start_addr =  ex.a_entry & 0xfffff000;
+
+	/* Now use mmap to map the library into memory. */
+	down_write(&current->mm->mmap_sem);
+	error = do_mmap(file, start_addr, ex.a_text + ex.a_data,
+			PROT_READ | PROT_WRITE | PROT_EXEC,
+			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
+			N_TXTOFF(ex));
+	up_write(&current->mm->mmap_sem);
+	retval = error;
+	if (error != start_addr)
+		goto out;
+
+	len = PAGE_ALIGN(ex.a_text + ex.a_data);
+	bss = ex.a_text + ex.a_data + ex.a_bss;
+	if (bss > len) {
+		down_write(&current->mm->mmap_sem);
+		error = do_brk(start_addr + len, bss - len);
+		up_write(&current->mm->mmap_sem);
+		retval = error;
+		if (error != start_addr + len)
+			goto out;
+	}
+	retval = 0;
+out:
+	return retval;
+}
+
+static int __init init_aout32_binfmt(void)
+{
+	return register_binfmt(&aout32_format);
+}
+
+static void __exit exit_aout32_binfmt(void)
+{
+	unregister_binfmt(&aout32_format);
+}
+
+module_init(init_aout32_binfmt);
+module_exit(exit_aout32_binfmt);
diff --git a/arch/sparc64/kernel/binfmt_elf32.c b/arch/sparc64/kernel/binfmt_elf32.c
new file mode 100644
index 0000000..a1a12d2
--- /dev/null
+++ b/arch/sparc64/kernel/binfmt_elf32.c
@@ -0,0 +1,159 @@
+/*
+ * binfmt_elf32.c: Support 32-bit Sparc ELF binaries on Ultra.
+ *
+ * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller	(davem@redhat.com)
+ * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek	(jj@ultra.linux.cz)
+ */
+
+#define ELF_ARCH		EM_SPARC
+#define ELF_CLASS		ELFCLASS32
+#define ELF_DATA		ELFDATA2MSB;
+
+/* For the most part we present code dumps in the format
+ * Solaris does.
+ */
+typedef unsigned int elf_greg_t;
+#define ELF_NGREG 38
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+/* Format is:
+ * 	G0 --> G7
+ *	O0 --> O7
+ *	L0 --> L7
+ *	I0 --> I7
+ *	PSR, PC, nPC, Y, WIM, TBR
+ */
+#include <asm/psrcompat.h>
+#define ELF_CORE_COPY_REGS(__elf_regs, __pt_regs)	\
+do {	unsigned int *dest = &(__elf_regs[0]);		\
+	struct pt_regs *src = (__pt_regs);		\
+	unsigned int __user *sp;			\
+	int i;						\
+	for(i = 0; i < 16; i++)				\
+		dest[i] = (unsigned int) src->u_regs[i];\
+	/* Don't try this at home kids... */		\
+	sp = (unsigned int __user *) (src->u_regs[14] &	\
+		0x00000000fffffffc);			\
+	for(i = 0; i < 16; i++)				\
+		__get_user(dest[i+16], &sp[i]);		\
+	dest[32] = tstate_to_psr(src->tstate);		\
+	dest[33] = (unsigned int) src->tpc;		\
+	dest[34] = (unsigned int) src->tnpc;		\
+	dest[35] = src->y;				\
+	dest[36] = dest[37] = 0; /* XXX */		\
+} while(0);
+
+typedef struct {
+	union {
+		unsigned int	pr_regs[32];
+		unsigned long	pr_dregs[16];
+	} pr_fr;
+	unsigned int __unused;
+	unsigned int	pr_fsr;
+	unsigned char	pr_qcnt;
+	unsigned char	pr_q_entrysize;
+	unsigned char	pr_en;
+	unsigned int	pr_q[64];
+} elf_fpregset_t;
+
+/* UltraSparc extensions.  Still unused, but will be eventually.  */
+typedef struct {
+	unsigned int pr_type;
+	unsigned int pr_align;
+	union {
+		struct {
+			union {
+				unsigned int	pr_regs[32];
+				unsigned long	pr_dregs[16];
+				long double	pr_qregs[8];
+			} pr_xfr;
+		} pr_v8p;
+		unsigned int	pr_xfsr;
+		unsigned int	pr_fprs;
+		unsigned int	pr_xg[8];
+		unsigned int	pr_xo[8];
+		unsigned long	pr_tstate;
+		unsigned int	pr_filler[8];
+	} pr_un;
+} elf_xregset_t;
+
+#define elf_check_arch(x)	(((x)->e_machine == EM_SPARC) || ((x)->e_machine == EM_SPARC32PLUS))
+
+#define ELF_ET_DYN_BASE         0x70000000
+
+
+#include <asm/processor.h>
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/elfcore.h>
+#include <linux/compat.h>
+
+#define elf_prstatus elf_prstatus32
+struct elf_prstatus32
+{
+	struct elf_siginfo pr_info;	/* Info associated with signal */
+	short	pr_cursig;		/* Current signal */
+	unsigned int pr_sigpend;	/* Set of pending signals */
+	unsigned int pr_sighold;	/* Set of held signals */
+	pid_t	pr_pid;
+	pid_t	pr_ppid;
+	pid_t	pr_pgrp;
+	pid_t	pr_sid;
+	struct compat_timeval pr_utime;	/* User time */
+	struct compat_timeval pr_stime;	/* System time */
+	struct compat_timeval pr_cutime;	/* Cumulative user time */
+	struct compat_timeval pr_cstime;	/* Cumulative system time */
+	elf_gregset_t pr_reg;	/* GP registers */
+	int pr_fpvalid;		/* True if math co-processor being used.  */
+};
+
+#define elf_prpsinfo elf_prpsinfo32
+struct elf_prpsinfo32
+{
+	char	pr_state;	/* numeric process state */
+	char	pr_sname;	/* char for pr_state */
+	char	pr_zomb;	/* zombie */
+	char	pr_nice;	/* nice val */
+	unsigned int pr_flag;	/* flags */
+	u16	pr_uid;
+	u16	pr_gid;
+	pid_t	pr_pid, pr_ppid, pr_pgrp, pr_sid;
+	/* Lots missing */
+	char	pr_fname[16];	/* filename of executable */
+	char	pr_psargs[ELF_PRARGSZ];	/* initial part of arg list */
+};
+
+#include <linux/highuid.h>
+
+#undef NEW_TO_OLD_UID
+#undef NEW_TO_OLD_GID
+#define NEW_TO_OLD_UID(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid)
+#define NEW_TO_OLD_GID(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid)
+
+#include <linux/time.h>
+
+#undef cputime_to_timeval
+#define cputime_to_timeval cputime_to_compat_timeval
+static __inline__ void
+cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
+{
+	unsigned long jiffies = cputime_to_jiffies(cputime);
+	value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
+	value->tv_sec = jiffies / HZ;
+}
+
+#define elf_addr_t	u32
+#undef start_thread
+#define start_thread start_thread32
+#define init_elf_binfmt init_elf32_binfmt
+
+MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit SparcLinux binaries on the Ultra");
+MODULE_AUTHOR("Eric Youngdale, David S. Miller, Jakub Jelinek");
+
+#undef MODULE_DESCRIPTION
+#undef MODULE_AUTHOR
+
+#undef TASK_SIZE
+#define TASK_SIZE 0xf0000000
+
+#include "../../../fs/binfmt_elf.c"
diff --git a/arch/sparc64/kernel/central.c b/arch/sparc64/kernel/central.c
new file mode 100644
index 0000000..3d184a7
--- /dev/null
+++ b/arch/sparc64/kernel/central.c
@@ -0,0 +1,457 @@
+/* $Id: central.c,v 1.15 2001/12/19 00:29:51 davem Exp $
+ * central.c: Central FHC driver for Sunfire/Starfire/Wildfire.
+ *
+ * Copyright (C) 1997, 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#include <asm/page.h>
+#include <asm/fhc.h>
+#include <asm/starfire.h>
+
+struct linux_central *central_bus = NULL;
+struct linux_fhc *fhc_list = NULL;
+
+#define IS_CENTRAL_FHC(__fhc)	((__fhc) == central_bus->child)
+
+static void central_probe_failure(int line)
+{
+	prom_printf("CENTRAL: Critical device probe failure at central.c:%d\n",
+		    line);
+	prom_halt();
+}
+
+static void central_ranges_init(int cnode, struct linux_central *central)
+{
+	int success;
+	
+	central->num_central_ranges = 0;
+	success = prom_getproperty(central->prom_node, "ranges",
+				   (char *) central->central_ranges,
+				   sizeof (central->central_ranges));
+	if (success != -1)
+		central->num_central_ranges = (success/sizeof(struct linux_prom_ranges));
+}
+
+static void fhc_ranges_init(int fnode, struct linux_fhc *fhc)
+{
+	int success;
+	
+	fhc->num_fhc_ranges = 0;
+	success = prom_getproperty(fhc->prom_node, "ranges",
+				   (char *) fhc->fhc_ranges,
+				   sizeof (fhc->fhc_ranges));
+	if (success != -1)
+		fhc->num_fhc_ranges = (success/sizeof(struct linux_prom_ranges));
+}
+
+/* Range application routines are exported to various drivers,
+ * so do not __init this.
+ */
+static void adjust_regs(struct linux_prom_registers *regp, int nregs,
+			struct linux_prom_ranges *rangep, int nranges)
+{
+	int regc, rngc;
+
+	for (regc = 0; regc < nregs; regc++) {
+		for (rngc = 0; rngc < nranges; rngc++)
+			if (regp[regc].which_io == rangep[rngc].ot_child_space)
+				break; /* Fount it */
+		if (rngc == nranges) /* oops */
+			central_probe_failure(__LINE__);
+		regp[regc].which_io = rangep[rngc].ot_parent_space;
+		regp[regc].phys_addr -= rangep[rngc].ot_child_base;
+		regp[regc].phys_addr += rangep[rngc].ot_parent_base;
+	}
+}
+
+/* Apply probed fhc ranges to registers passed, if no ranges return. */
+void apply_fhc_ranges(struct linux_fhc *fhc,
+		      struct linux_prom_registers *regs,
+		      int nregs)
+{
+	if (fhc->num_fhc_ranges)
+		adjust_regs(regs, nregs, fhc->fhc_ranges,
+			    fhc->num_fhc_ranges);
+}
+
+/* Apply probed central ranges to registers passed, if no ranges return. */
+void apply_central_ranges(struct linux_central *central,
+			  struct linux_prom_registers *regs, int nregs)
+{
+	if (central->num_central_ranges)
+		adjust_regs(regs, nregs, central->central_ranges,
+			    central->num_central_ranges);
+}
+
+void * __init central_alloc_bootmem(unsigned long size)
+{
+	void *ret;
+
+	ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
+	if (ret != NULL)
+		memset(ret, 0, size);
+
+	return ret;
+}
+
+static unsigned long prom_reg_to_paddr(struct linux_prom_registers *r)
+{
+	unsigned long ret = ((unsigned long) r->which_io) << 32;
+
+	return ret | (unsigned long) r->phys_addr;
+}
+
+static void probe_other_fhcs(void)
+{
+	struct linux_prom64_registers fpregs[6];
+	char namebuf[128];
+	int node;
+
+	node = prom_getchild(prom_root_node);
+	node = prom_searchsiblings(node, "fhc");
+	if (node == 0)
+		central_probe_failure(__LINE__);
+	while (node) {
+		struct linux_fhc *fhc;
+		int board;
+		u32 tmp;
+
+		fhc = (struct linux_fhc *)
+			central_alloc_bootmem(sizeof(struct linux_fhc));
+		if (fhc == NULL)
+			central_probe_failure(__LINE__);
+
+		/* Link it into the FHC chain. */
+		fhc->next = fhc_list;
+		fhc_list = fhc;
+
+		/* Toplevel FHCs have no parent. */
+		fhc->parent = NULL;
+		
+		fhc->prom_node = node;
+		prom_getstring(node, "name", namebuf, sizeof(namebuf));
+		strcpy(fhc->prom_name, namebuf);
+		fhc_ranges_init(node, fhc);
+
+		/* Non-central FHC's have 64-bit OBP format registers. */
+		if (prom_getproperty(node, "reg",
+				    (char *)&fpregs[0], sizeof(fpregs)) == -1)
+			central_probe_failure(__LINE__);
+
+		/* Only central FHC needs special ranges applied. */
+		fhc->fhc_regs.pregs = fpregs[0].phys_addr;
+		fhc->fhc_regs.ireg = fpregs[1].phys_addr;
+		fhc->fhc_regs.ffregs = fpregs[2].phys_addr;
+		fhc->fhc_regs.sregs = fpregs[3].phys_addr;
+		fhc->fhc_regs.uregs = fpregs[4].phys_addr;
+		fhc->fhc_regs.tregs = fpregs[5].phys_addr;
+
+		board = prom_getintdefault(node, "board#", -1);
+		fhc->board = board;
+
+		tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_JCTRL);
+		if ((tmp & FHC_JTAG_CTRL_MENAB) != 0)
+			fhc->jtag_master = 1;
+		else
+			fhc->jtag_master = 0;
+
+		tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_ID);
+		printk("FHC(board %d): Version[%x] PartID[%x] Manuf[%x] %s\n",
+		       board,
+		       (tmp & FHC_ID_VERS) >> 28,
+		       (tmp & FHC_ID_PARTID) >> 12,
+		       (tmp & FHC_ID_MANUF) >> 1,
+		       (fhc->jtag_master ? "(JTAG Master)" : ""));
+		
+		/* This bit must be set in all non-central FHC's in
+		 * the system.  When it is clear, this identifies
+		 * the central board.
+		 */
+		tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
+		tmp |= FHC_CONTROL_IXIST;
+		upa_writel(tmp, fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
+
+		/* Look for the next FHC. */
+		node = prom_getsibling(node);
+		if (node == 0)
+			break;
+		node = prom_searchsiblings(node, "fhc");
+		if (node == 0)
+			break;
+	}
+}
+
+static void probe_clock_board(struct linux_central *central,
+			      struct linux_fhc *fhc,
+			      int cnode, int fnode)
+{
+	struct linux_prom_registers cregs[3];
+	int clknode, nslots, tmp, nregs;
+
+	clknode = prom_searchsiblings(prom_getchild(fnode), "clock-board");
+	if (clknode == 0 || clknode == -1)
+		central_probe_failure(__LINE__);
+
+	nregs = prom_getproperty(clknode, "reg", (char *)&cregs[0], sizeof(cregs));
+	if (nregs == -1)
+		central_probe_failure(__LINE__);
+
+	nregs /= sizeof(struct linux_prom_registers);
+	apply_fhc_ranges(fhc, &cregs[0], nregs);
+	apply_central_ranges(central, &cregs[0], nregs);
+	central->cfreg = prom_reg_to_paddr(&cregs[0]);
+	central->clkregs = prom_reg_to_paddr(&cregs[1]);
+
+	if (nregs == 2)
+		central->clkver = 0UL;
+	else
+		central->clkver = prom_reg_to_paddr(&cregs[2]);
+
+	tmp = upa_readb(central->clkregs + CLOCK_STAT1);
+	tmp &= 0xc0;
+	switch(tmp) {
+	case 0x40:
+		nslots = 16;
+		break;
+	case 0xc0:
+		nslots = 8;
+		break;
+	case 0x80:
+		if (central->clkver != 0UL &&
+		   upa_readb(central->clkver) != 0) {
+			if ((upa_readb(central->clkver) & 0x80) != 0)
+				nslots = 4;
+			else
+				nslots = 5;
+			break;
+		}
+	default:
+		nslots = 4;
+		break;
+	};
+	central->slots = nslots;
+	printk("CENTRAL: Detected %d slot Enterprise system. cfreg[%02x] cver[%02x]\n",
+	       central->slots, upa_readb(central->cfreg),
+	       (central->clkver ? upa_readb(central->clkver) : 0x00));
+}
+
+static void ZAP(unsigned long iclr, unsigned long imap)
+{
+	u32 imap_tmp;
+
+	upa_writel(0, iclr);
+	upa_readl(iclr);
+	imap_tmp = upa_readl(imap);
+	imap_tmp &= ~(0x80000000);
+	upa_writel(imap_tmp, imap);
+	upa_readl(imap);
+}
+
+static void init_all_fhc_hw(void)
+{
+	struct linux_fhc *fhc;
+
+	for (fhc = fhc_list; fhc != NULL; fhc = fhc->next) {
+		u32 tmp;
+
+		/* Clear all of the interrupt mapping registers
+		 * just in case OBP left them in a foul state.
+		 */
+		ZAP(fhc->fhc_regs.ffregs + FHC_FFREGS_ICLR,
+		    fhc->fhc_regs.ffregs + FHC_FFREGS_IMAP);
+		ZAP(fhc->fhc_regs.sregs + FHC_SREGS_ICLR,
+		    fhc->fhc_regs.sregs + FHC_SREGS_IMAP);
+		ZAP(fhc->fhc_regs.uregs + FHC_UREGS_ICLR,
+		    fhc->fhc_regs.uregs + FHC_UREGS_IMAP);
+		ZAP(fhc->fhc_regs.tregs + FHC_TREGS_ICLR,
+		    fhc->fhc_regs.tregs + FHC_TREGS_IMAP);
+
+		/* Setup FHC control register. */
+		tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
+
+		/* All non-central boards have this bit set. */
+		if (! IS_CENTRAL_FHC(fhc))
+			tmp |= FHC_CONTROL_IXIST;
+
+		/* For all FHCs, clear the firmware synchronization
+		 * line and both low power mode enables.
+		 */
+		tmp &= ~(FHC_CONTROL_AOFF | FHC_CONTROL_BOFF |
+			 FHC_CONTROL_SLINE);
+
+		upa_writel(tmp, fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
+		upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
+	}
+
+}
+
+void central_probe(void)
+{
+	struct linux_prom_registers fpregs[6];
+	struct linux_fhc *fhc;
+	char namebuf[128];
+	int cnode, fnode, err;
+
+	cnode = prom_finddevice("/central");
+	if (cnode == 0 || cnode == -1) {
+		if (this_is_starfire)
+			starfire_cpu_setup();
+		return;
+	}
+
+	/* Ok we got one, grab some memory for software state. */
+	central_bus = (struct linux_central *)
+		central_alloc_bootmem(sizeof(struct linux_central));
+	if (central_bus == NULL)
+		central_probe_failure(__LINE__);
+
+	fhc = (struct linux_fhc *)
+		central_alloc_bootmem(sizeof(struct linux_fhc));
+	if (fhc == NULL)
+		central_probe_failure(__LINE__);
+
+	/* First init central. */
+	central_bus->child = fhc;
+	central_bus->prom_node = cnode;
+
+	prom_getstring(cnode, "name", namebuf, sizeof(namebuf));
+	strcpy(central_bus->prom_name, namebuf);
+
+	central_ranges_init(cnode, central_bus);
+
+	/* And then central's FHC. */
+	fhc->next = fhc_list;
+	fhc_list = fhc;
+
+	fhc->parent = central_bus;
+	fnode = prom_searchsiblings(prom_getchild(cnode), "fhc");
+	if (fnode == 0 || fnode == -1)
+		central_probe_failure(__LINE__);
+
+	fhc->prom_node = fnode;
+	prom_getstring(fnode, "name", namebuf, sizeof(namebuf));
+	strcpy(fhc->prom_name, namebuf);
+
+	fhc_ranges_init(fnode, fhc);
+
+	/* Now, map in FHC register set. */
+	if (prom_getproperty(fnode, "reg", (char *)&fpregs[0], sizeof(fpregs)) == -1)
+		central_probe_failure(__LINE__);
+
+	apply_central_ranges(central_bus, &fpregs[0], 6);
+	
+	fhc->fhc_regs.pregs = prom_reg_to_paddr(&fpregs[0]);
+	fhc->fhc_regs.ireg = prom_reg_to_paddr(&fpregs[1]);
+	fhc->fhc_regs.ffregs = prom_reg_to_paddr(&fpregs[2]);
+	fhc->fhc_regs.sregs = prom_reg_to_paddr(&fpregs[3]);
+	fhc->fhc_regs.uregs = prom_reg_to_paddr(&fpregs[4]);
+	fhc->fhc_regs.tregs = prom_reg_to_paddr(&fpregs[5]);
+
+	/* Obtain board number from board status register, Central's
+	 * FHC lacks "board#" property.
+	 */
+	err = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_BSR);
+	fhc->board = (((err >> 16) & 0x01) |
+		      ((err >> 12) & 0x0e));
+
+	fhc->jtag_master = 0;
+
+	/* Attach the clock board registers for CENTRAL. */
+	probe_clock_board(central_bus, fhc, cnode, fnode);
+
+	err = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_ID);
+	printk("FHC(board %d): Version[%x] PartID[%x] Manuf[%x] (CENTRAL)\n",
+	       fhc->board,
+	       ((err & FHC_ID_VERS) >> 28),
+	       ((err & FHC_ID_PARTID) >> 12),
+	       ((err & FHC_ID_MANUF) >> 1));
+
+	probe_other_fhcs();
+
+	init_all_fhc_hw();
+}
+
+static __inline__ void fhc_ledblink(struct linux_fhc *fhc, int on)
+{
+	u32 tmp;
+
+	tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
+
+	/* NOTE: reverse logic on this bit */
+	if (on)
+		tmp &= ~(FHC_CONTROL_RLED);
+	else
+		tmp |= FHC_CONTROL_RLED;
+	tmp &= ~(FHC_CONTROL_AOFF | FHC_CONTROL_BOFF | FHC_CONTROL_SLINE);
+
+	upa_writel(tmp, fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
+	upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
+}
+
+static __inline__ void central_ledblink(struct linux_central *central, int on)
+{
+	u8 tmp;
+
+	tmp = upa_readb(central->clkregs + CLOCK_CTRL);
+
+	/* NOTE: reverse logic on this bit */
+	if (on)
+		tmp &= ~(CLOCK_CTRL_RLED);
+	else
+		tmp |= CLOCK_CTRL_RLED;
+
+	upa_writeb(tmp, central->clkregs + CLOCK_CTRL);
+	upa_readb(central->clkregs + CLOCK_CTRL);
+}
+
+static struct timer_list sftimer;
+static int led_state;
+
+static void sunfire_timer(unsigned long __ignored)
+{
+	struct linux_fhc *fhc;
+
+	central_ledblink(central_bus, led_state);
+	for (fhc = fhc_list; fhc != NULL; fhc = fhc->next)
+		if (! IS_CENTRAL_FHC(fhc))
+			fhc_ledblink(fhc, led_state);
+	led_state = ! led_state;
+	sftimer.expires = jiffies + (HZ >> 1);
+	add_timer(&sftimer);
+}
+
+/* After PCI/SBUS busses have been probed, this is called to perform
+ * final initialization of all FireHose Controllers in the system.
+ */
+void firetruck_init(void)
+{
+	struct linux_central *central = central_bus;
+	u8 ctrl;
+
+	/* No central bus, nothing to do. */
+	if (central == NULL)
+		return;
+
+	/* OBP leaves it on, turn it off so clock board timer LED
+	 * is in sync with FHC ones.
+	 */
+	ctrl = upa_readb(central->clkregs + CLOCK_CTRL);
+	ctrl &= ~(CLOCK_CTRL_RLED);
+	upa_writeb(ctrl, central->clkregs + CLOCK_CTRL);
+
+	led_state = 0;
+	init_timer(&sftimer);
+	sftimer.data = 0;
+	sftimer.function = &sunfire_timer;
+	sftimer.expires = jiffies + (HZ >> 1);
+	add_timer(&sftimer);
+}
diff --git a/arch/sparc64/kernel/chmc.c b/arch/sparc64/kernel/chmc.c
new file mode 100644
index 0000000..97cf912
--- /dev/null
+++ b/arch/sparc64/kernel/chmc.c
@@ -0,0 +1,458 @@
+/* $Id: chmc.c,v 1.4 2002/01/08 16:00:14 davem Exp $
+ * memctrlr.c: Driver for UltraSPARC-III memory controller.
+ *
+ * Copyright (C) 2001 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <asm/spitfire.h>
+#include <asm/chmctrl.h>
+#include <asm/oplib.h>
+#include <asm/io.h>
+
+#define CHMCTRL_NDGRPS	2
+#define CHMCTRL_NDIMMS	4
+
+#define DIMMS_PER_MC	(CHMCTRL_NDGRPS * CHMCTRL_NDIMMS)
+
+/* OBP memory-layout property format. */
+struct obp_map {
+	unsigned char	dimm_map[144];
+	unsigned char	pin_map[576];
+};
+
+#define DIMM_LABEL_SZ	8
+
+struct obp_mem_layout {
+	/* One max 8-byte string label per DIMM.  Usually
+	 * this matches the label on the motherboard where
+	 * that DIMM resides.
+	 */
+	char		dimm_labels[DIMMS_PER_MC][DIMM_LABEL_SZ];
+
+	/* If symmetric use map[0], else it is
+	 * asymmetric and map[1] should be used.
+	 */
+	char		symmetric;
+
+	struct obp_map	map[2];
+};
+
+#define CHMCTRL_NBANKS	4
+
+struct bank_info {
+	struct mctrl_info	*mp;
+	int			bank_id;
+
+	u64			raw_reg;
+	int			valid;
+	int			uk;
+	int			um;
+	int			lk;
+	int			lm;
+	int			interleave;
+	unsigned long		base;
+	unsigned long		size;
+};
+
+struct mctrl_info {
+	struct list_head	list;
+	int			portid;
+	int			index;
+
+	struct obp_mem_layout	layout_prop;
+	int			layout_size;
+
+	void __iomem		*regs;
+
+	u64			timing_control1;
+	u64			timing_control2;
+	u64			timing_control3;
+	u64			timing_control4;
+	u64			memaddr_control;
+
+	struct bank_info	logical_banks[CHMCTRL_NBANKS];
+};
+
+static LIST_HEAD(mctrl_list);
+
+/* Does BANK decode PHYS_ADDR? */
+static int bank_match(struct bank_info *bp, unsigned long phys_addr)
+{
+	unsigned long upper_bits = (phys_addr & PA_UPPER_BITS) >> PA_UPPER_BITS_SHIFT;
+	unsigned long lower_bits = (phys_addr & PA_LOWER_BITS) >> PA_LOWER_BITS_SHIFT;
+
+	/* Bank must be enabled to match. */
+	if (bp->valid == 0)
+		return 0;
+
+	/* Would BANK match upper bits? */
+	upper_bits ^= bp->um;		/* What bits are different? */
+	upper_bits  = ~upper_bits;	/* Invert. */
+	upper_bits |= bp->uk;		/* What bits don't matter for matching? */
+	upper_bits  = ~upper_bits;	/* Invert. */
+
+	if (upper_bits)
+		return 0;
+
+	/* Would BANK match lower bits? */
+	lower_bits ^= bp->lm;		/* What bits are different? */
+	lower_bits  = ~lower_bits;	/* Invert. */
+	lower_bits |= bp->lk;		/* What bits don't matter for matching? */
+	lower_bits  = ~lower_bits;	/* Invert. */
+
+	if (lower_bits)
+		return 0;
+
+	/* I always knew you'd be the one. */
+	return 1;
+}
+
+/* Given PHYS_ADDR, search memory controller banks for a match. */
+static struct bank_info *find_bank(unsigned long phys_addr)
+{
+	struct list_head *mctrl_head = &mctrl_list;
+	struct list_head *mctrl_entry = mctrl_head->next;
+
+	for (;;) {
+		struct mctrl_info *mp =
+			list_entry(mctrl_entry, struct mctrl_info, list);
+		int bank_no;
+
+		if (mctrl_entry == mctrl_head)
+			break;
+		mctrl_entry = mctrl_entry->next;
+
+		for (bank_no = 0; bank_no < CHMCTRL_NBANKS; bank_no++) {
+			struct bank_info *bp;
+
+			bp = &mp->logical_banks[bank_no];
+			if (bank_match(bp, phys_addr))
+				return bp;
+		}
+	}
+
+	return NULL;
+}
+
+/* This is the main purpose of this driver. */
+#define SYNDROME_MIN	-1
+#define SYNDROME_MAX	144
+int chmc_getunumber(int syndrome_code,
+		    unsigned long phys_addr,
+		    char *buf, int buflen)
+{
+	struct bank_info *bp;
+	struct obp_mem_layout *prop;
+	int bank_in_controller, first_dimm;
+
+	bp = find_bank(phys_addr);
+	if (bp == NULL ||
+	    syndrome_code < SYNDROME_MIN ||
+	    syndrome_code > SYNDROME_MAX) {
+		buf[0] = '?';
+		buf[1] = '?';
+		buf[2] = '?';
+		buf[3] = '\0';
+		return 0;
+	}
+
+	prop = &bp->mp->layout_prop;
+	bank_in_controller = bp->bank_id & (CHMCTRL_NBANKS - 1);
+	first_dimm  = (bank_in_controller & (CHMCTRL_NDGRPS - 1));
+	first_dimm *= CHMCTRL_NDIMMS;
+
+	if (syndrome_code != SYNDROME_MIN) {
+		struct obp_map *map;
+		int qword, where_in_line, where, map_index, map_offset;
+		unsigned int map_val;
+
+		/* Yaay, single bit error so we can figure out
+		 * the exact dimm.
+		 */
+		if (prop->symmetric)
+			map = &prop->map[0];
+		else
+			map = &prop->map[1];
+
+		/* Covert syndrome code into the way the bits are
+		 * positioned on the bus.
+		 */
+		if (syndrome_code < 144 - 16)
+			syndrome_code += 16;
+		else if (syndrome_code < 144)
+			syndrome_code -= (144 - 7);
+		else if (syndrome_code < (144 + 3))
+			syndrome_code -= (144 + 3 - 4);
+		else
+			syndrome_code -= 144 + 3;
+
+		/* All this magic has to do with how a cache line
+		 * comes over the wire on Safari.  A 64-bit line
+		 * comes over in 4 quadword cycles, each of which
+		 * transmit ECC/MTAG info as well as the actual
+		 * data.  144 bits per quadword, 576 total.
+		 */
+#define LINE_SIZE	64
+#define LINE_ADDR_MSK	(LINE_SIZE - 1)
+#define QW_PER_LINE	4
+#define QW_BYTES	(LINE_SIZE / QW_PER_LINE)
+#define QW_BITS		144
+#define LAST_BIT	(576 - 1)
+
+		qword = (phys_addr & LINE_ADDR_MSK) / QW_BYTES;
+		where_in_line = ((3 - qword) * QW_BITS) + syndrome_code;
+		where = (LAST_BIT - where_in_line);
+		map_index = where >> 2;
+		map_offset = where & 0x3;
+		map_val = map->dimm_map[map_index];
+		map_val = ((map_val >> ((3 - map_offset) << 1)) & (2 - 1));
+
+		sprintf(buf, "%s, pin %3d",
+			prop->dimm_labels[first_dimm + map_val],
+			map->pin_map[where_in_line]);
+	} else {
+		int dimm;
+
+		/* Multi-bit error, we just dump out all the
+		 * dimm labels associated with this bank.
+		 */
+		for (dimm = 0; dimm < CHMCTRL_NDIMMS; dimm++) {
+			sprintf(buf, "%s ",
+				prop->dimm_labels[first_dimm + dimm]);
+			buf += strlen(buf);
+		}
+	}
+	return 0;
+}
+
+/* Accessing the registers is slightly complicated.  If you want
+ * to get at the memory controller which is on the same processor
+ * the code is executing, you must use special ASI load/store else
+ * you go through the global mapping.
+ */
+static u64 read_mcreg(struct mctrl_info *mp, unsigned long offset)
+{
+	unsigned long ret;
+	int this_cpu = get_cpu();
+
+	if (mp->portid == this_cpu) {
+		__asm__ __volatile__("ldxa	[%1] %2, %0"
+				     : "=r" (ret)
+				     : "r" (offset), "i" (ASI_MCU_CTRL_REG));
+	} else {
+		__asm__ __volatile__("ldxa	[%1] %2, %0"
+				     : "=r" (ret)
+				     : "r" (mp->regs + offset),
+				       "i" (ASI_PHYS_BYPASS_EC_E));
+	}
+	put_cpu();
+
+	return ret;
+}
+
+#if 0 /* currently unused */
+static void write_mcreg(struct mctrl_info *mp, unsigned long offset, u64 val)
+{
+	if (mp->portid == smp_processor_id()) {
+		__asm__ __volatile__("stxa	%0, [%1] %2"
+				     : : "r" (val),
+				         "r" (offset), "i" (ASI_MCU_CTRL_REG));
+	} else {
+		__asm__ __volatile__("ldxa	%0, [%1] %2"
+				     : : "r" (val),
+				         "r" (mp->regs + offset),
+				         "i" (ASI_PHYS_BYPASS_EC_E));
+	}
+}
+#endif
+
+static void interpret_one_decode_reg(struct mctrl_info *mp, int which_bank, u64 val)
+{
+	struct bank_info *p = &mp->logical_banks[which_bank];
+
+	p->mp = mp;
+	p->bank_id = (CHMCTRL_NBANKS * mp->portid) + which_bank;
+	p->raw_reg = val;
+	p->valid = (val & MEM_DECODE_VALID) >> MEM_DECODE_VALID_SHIFT;
+	p->uk = (val & MEM_DECODE_UK) >> MEM_DECODE_UK_SHIFT;
+	p->um = (val & MEM_DECODE_UM) >> MEM_DECODE_UM_SHIFT;
+	p->lk = (val & MEM_DECODE_LK) >> MEM_DECODE_LK_SHIFT;
+	p->lm = (val & MEM_DECODE_LM) >> MEM_DECODE_LM_SHIFT;
+
+	p->base  =  (p->um);
+	p->base &= ~(p->uk);
+	p->base <<= PA_UPPER_BITS_SHIFT;
+
+	switch(p->lk) {
+	case 0xf:
+	default:
+		p->interleave = 1;
+		break;
+
+	case 0xe:
+		p->interleave = 2;
+		break;
+
+	case 0xc:
+		p->interleave = 4;
+		break;
+
+	case 0x8:
+		p->interleave = 8;
+		break;
+
+	case 0x0:
+		p->interleave = 16;
+		break;
+	};
+
+	/* UK[10] is reserved, and UK[11] is not set for the SDRAM
+	 * bank size definition.
+	 */
+	p->size = (((unsigned long)p->uk &
+		    ((1UL << 10UL) - 1UL)) + 1UL) << PA_UPPER_BITS_SHIFT;
+	p->size /= p->interleave;
+}
+
+static void fetch_decode_regs(struct mctrl_info *mp)
+{
+	if (mp->layout_size == 0)
+		return;
+
+	interpret_one_decode_reg(mp, 0,
+				 read_mcreg(mp, CHMCTRL_DECODE1));
+	interpret_one_decode_reg(mp, 1,
+				 read_mcreg(mp, CHMCTRL_DECODE2));
+	interpret_one_decode_reg(mp, 2,
+				 read_mcreg(mp, CHMCTRL_DECODE3));
+	interpret_one_decode_reg(mp, 3,
+				 read_mcreg(mp, CHMCTRL_DECODE4));
+}
+
+static int init_one_mctrl(int node, int index)
+{
+	struct mctrl_info *mp = kmalloc(sizeof(*mp), GFP_KERNEL);
+	int portid = prom_getintdefault(node, "portid", -1);
+	struct linux_prom64_registers p_reg_prop;
+	int t;
+
+	if (!mp)
+		return -1;
+	memset(mp, 0, sizeof(*mp));
+	if (portid == -1)
+		goto fail;
+
+	mp->portid = portid;
+	mp->layout_size = prom_getproplen(node, "memory-layout");
+	if (mp->layout_size < 0)
+		mp->layout_size = 0;
+	if (mp->layout_size > sizeof(mp->layout_prop))
+		goto fail;
+
+	if (mp->layout_size > 0)
+		prom_getproperty(node, "memory-layout",
+				 (char *) &mp->layout_prop,
+				 mp->layout_size);
+
+	t = prom_getproperty(node, "reg",
+			     (char *) &p_reg_prop,
+			     sizeof(p_reg_prop));
+	if (t < 0 || p_reg_prop.reg_size != 0x48)
+		goto fail;
+
+	mp->regs = ioremap(p_reg_prop.phys_addr, p_reg_prop.reg_size);
+	if (mp->regs == NULL)
+		goto fail;
+
+	if (mp->layout_size != 0UL) {
+		mp->timing_control1 = read_mcreg(mp, CHMCTRL_TCTRL1);
+		mp->timing_control2 = read_mcreg(mp, CHMCTRL_TCTRL2);
+		mp->timing_control3 = read_mcreg(mp, CHMCTRL_TCTRL3);
+		mp->timing_control4 = read_mcreg(mp, CHMCTRL_TCTRL4);
+		mp->memaddr_control = read_mcreg(mp, CHMCTRL_MACTRL);
+	}
+
+	fetch_decode_regs(mp);
+
+	mp->index = index;
+
+	list_add(&mp->list, &mctrl_list);
+
+	/* Report the device. */
+	printk(KERN_INFO "chmc%d: US3 memory controller at %p [%s]\n",
+	       mp->index,
+	       mp->regs, (mp->layout_size ? "ACTIVE" : "INACTIVE"));
+
+	return 0;
+
+fail:
+	if (mp) {
+		if (mp->regs != NULL)
+			iounmap(mp->regs);
+		kfree(mp);
+	}
+	return -1;
+}
+
+static int __init probe_for_string(char *name, int index)
+{
+	int node = prom_getchild(prom_root_node);
+
+	while ((node = prom_searchsiblings(node, name)) != 0) {
+		int ret = init_one_mctrl(node, index);
+
+		if (!ret)
+			index++;
+
+		node = prom_getsibling(node);
+		if (!node)
+			break;
+	}
+
+	return index;
+}
+
+static int __init chmc_init(void)
+{
+	int index;
+
+	/* This driver is only for cheetah platforms. */
+	if (tlb_type != cheetah && tlb_type != cheetah_plus)
+		return -ENODEV;
+
+	index = probe_for_string("memory-controller", 0);
+	index = probe_for_string("mc-us3", index);
+
+	return 0;
+}
+
+static void __exit chmc_cleanup(void)
+{
+	struct list_head *head = &mctrl_list;
+	struct list_head *tmp = head->next;
+
+	for (;;) {
+		struct mctrl_info *p =
+			list_entry(tmp, struct mctrl_info, list);
+		if (tmp == head)
+			break;
+		tmp = tmp->next;
+
+		list_del(&p->list);
+		iounmap(p->regs);
+		kfree(p);
+	}
+}
+
+module_init(chmc_init);
+module_exit(chmc_cleanup);
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c
new file mode 100644
index 0000000..4875695
--- /dev/null
+++ b/arch/sparc64/kernel/cpu.c
@@ -0,0 +1,124 @@
+/* cpu.c: Dinky routines to look for the kind of Sparc cpu
+ *        we are on.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <asm/asi.h>
+#include <asm/system.h>
+#include <asm/fpumacro.h>
+#include <asm/cpudata.h>
+
+DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
+
+struct cpu_iu_info {
+  short manuf;
+  short impl;
+  char* cpu_name;   /* should be enough I hope... */
+};
+
+struct cpu_fp_info {
+  short manuf;
+  short impl;
+  char fpu_vers;
+  char* fp_name;
+};
+
+struct cpu_fp_info linux_sparc_fpu[] = {
+  { 0x17, 0x10, 0, "UltraSparc I integrated FPU"},
+  { 0x22, 0x10, 0, "UltraSparc I integrated FPU"},
+  { 0x17, 0x11, 0, "UltraSparc II integrated FPU"},
+  { 0x17, 0x12, 0, "UltraSparc IIi integrated FPU"},
+  { 0x17, 0x13, 0, "UltraSparc IIe integrated FPU"},
+  { 0x3e, 0x14, 0, "UltraSparc III integrated FPU"},
+  { 0x3e, 0x15, 0, "UltraSparc III+ integrated FPU"},
+  { 0x3e, 0x16, 0, "UltraSparc IIIi integrated FPU"},
+  { 0x3e, 0x18, 0, "UltraSparc IV integrated FPU"},
+};
+
+#define NSPARCFPU  (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info))
+
+struct cpu_iu_info linux_sparc_chips[] = {
+  { 0x17, 0x10, "TI UltraSparc I   (SpitFire)"},
+  { 0x22, 0x10, "TI UltraSparc I   (SpitFire)"},
+  { 0x17, 0x11, "TI UltraSparc II  (BlackBird)"},
+  { 0x17, 0x12, "TI UltraSparc IIi (Sabre)"},
+  { 0x17, 0x13, "TI UltraSparc IIe (Hummingbird)"},
+  { 0x3e, 0x14, "TI UltraSparc III (Cheetah)"},
+  { 0x3e, 0x15, "TI UltraSparc III+ (Cheetah+)"},
+  { 0x3e, 0x16, "TI UltraSparc IIIi (Jalapeno)"},
+  { 0x3e, 0x18, "TI UltraSparc IV (Jaguar)"},
+};
+
+#define NSPARCCHIPS  (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
+
+char *sparc_cpu_type = "cpu-oops";
+char *sparc_fpu_type = "fpu-oops";
+
+unsigned int fsr_storage;
+
+void __init cpu_probe(void)
+{
+	unsigned long ver, fpu_vers, manuf, impl, fprs;
+	int i;
+	
+	fprs = fprs_read();
+	fprs_write(FPRS_FEF);
+	__asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]"
+			      : "=&r" (ver)
+			      : "r" (&fpu_vers));
+	fprs_write(fprs);
+	
+	manuf = ((ver >> 48) & 0xffff);
+	impl = ((ver >> 32) & 0xffff);
+
+	fpu_vers = ((fpu_vers >> 17) & 0x7);
+
+retry:
+	for (i = 0; i < NSPARCCHIPS; i++) {
+		if (linux_sparc_chips[i].manuf == manuf) {
+			if (linux_sparc_chips[i].impl == impl) {
+				sparc_cpu_type =
+					linux_sparc_chips[i].cpu_name;
+				break;
+			}
+		}
+	}
+
+	if (i == NSPARCCHIPS) {
+ 		/* Maybe it is a cheetah+ derivative, report it as cheetah+
+ 		 * in that case until we learn the real names.
+ 		 */
+ 		if (manuf == 0x3e &&
+ 		    impl > 0x15) {
+ 			impl = 0x15;
+ 			goto retry;
+ 		} else {
+ 			printk("DEBUG: manuf[%lx] impl[%lx]\n",
+ 			       manuf, impl);
+ 		}
+		sparc_cpu_type = "Unknown CPU";
+	}
+
+	for (i = 0; i < NSPARCFPU; i++) {
+		if (linux_sparc_fpu[i].manuf == manuf &&
+		    linux_sparc_fpu[i].impl == impl) {
+			if (linux_sparc_fpu[i].fpu_vers == fpu_vers) {
+				sparc_fpu_type =
+					linux_sparc_fpu[i].fp_name;
+				break;
+			}
+		}
+	}
+
+	if (i == NSPARCFPU) {
+ 		printk("DEBUG: manuf[%lx] impl[%lx] fsr.vers[%lx]\n",
+ 		       manuf, impl, fpu_vers);
+		sparc_fpu_type = "Unknown FPU";
+	}
+}
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
new file mode 100644
index 0000000..d710274
--- /dev/null
+++ b/arch/sparc64/kernel/devices.c
@@ -0,0 +1,144 @@
+/* devices.c: Initial scan of the prom device tree for important
+ *            Sparc device nodes which we need to find.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/threads.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+
+#include <asm/page.h>
+#include <asm/oplib.h>
+#include <asm/system.h>
+#include <asm/smp.h>
+#include <asm/spitfire.h>
+#include <asm/timer.h>
+#include <asm/cpudata.h>
+
+/* Used to synchronize acceses to NatSemi SUPER I/O chip configure
+ * operations in asm/ns87303.h
+ */
+DEFINE_SPINLOCK(ns87303_lock);
+
+extern void cpu_probe(void);
+extern void central_probe(void);
+
+static char *cpu_mid_prop(void)
+{
+	if (tlb_type == spitfire)
+		return "upa-portid";
+	return "portid";
+}
+
+static int check_cpu_node(int nd, int *cur_inst,
+			  int (*compare)(int, int, void *), void *compare_arg,
+			  int *prom_node, int *mid)
+{
+	char node_str[128];
+
+	prom_getstring(nd, "device_type", node_str, sizeof(node_str));
+	if (strcmp(node_str, "cpu"))
+		return -ENODEV;
+
+	if (!compare(nd, *cur_inst, compare_arg)) {
+		if (prom_node)
+			*prom_node = nd;
+		if (mid)
+			*mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
+		return 0;
+	}
+
+	(*cur_inst)++;
+
+	return -ENODEV;
+}
+
+static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg,
+			 int *prom_node, int *mid)
+{
+	int nd, cur_inst, err;
+
+	nd = prom_root_node;
+	cur_inst = 0;
+
+	err = check_cpu_node(nd, &cur_inst,
+			     compare, compare_arg,
+			     prom_node, mid);
+	if (err == 0)
+		return 0;
+
+	nd = prom_getchild(nd);
+	while ((nd = prom_getsibling(nd)) != 0) {
+		err = check_cpu_node(nd, &cur_inst,
+				     compare, compare_arg,
+				     prom_node, mid);
+		if (err == 0)
+			return 0;
+	}
+
+	return -ENODEV;
+}
+
+static int cpu_instance_compare(int nd, int instance, void *_arg)
+{
+	int desired_instance = (int) (long) _arg;
+
+	if (instance == desired_instance)
+		return 0;
+	return -ENODEV;
+}
+
+int cpu_find_by_instance(int instance, int *prom_node, int *mid)
+{
+	return __cpu_find_by(cpu_instance_compare, (void *)(long)instance,
+			     prom_node, mid);
+}
+
+static int cpu_mid_compare(int nd, int instance, void *_arg)
+{
+	int desired_mid = (int) (long) _arg;
+	int this_mid;
+
+	this_mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
+	if (this_mid == desired_mid)
+		return 0;
+	return -ENODEV;
+}
+
+int cpu_find_by_mid(int mid, int *prom_node)
+{
+	return __cpu_find_by(cpu_mid_compare, (void *)(long)mid,
+			     prom_node, NULL);
+}
+
+void __init device_scan(void)
+{
+	/* FIX ME FAST... -DaveM */
+	ioport_resource.end = 0xffffffffffffffffUL;
+
+	prom_printf("Booting Linux...\n");
+
+#ifndef CONFIG_SMP
+	{
+		int err, cpu_node;
+		err = cpu_find_by_instance(0, &cpu_node, NULL);
+		if (err) {
+			prom_printf("No cpu nodes, cannot continue\n");
+			prom_halt();
+		}
+		cpu_data(0).clock_tick = prom_getintdefault(cpu_node,
+							    "clock-frequency",
+							    0);
+	}
+#endif
+
+	central_probe();
+
+	cpu_probe();
+}
diff --git a/arch/sparc64/kernel/dtlb_backend.S b/arch/sparc64/kernel/dtlb_backend.S
new file mode 100644
index 0000000..b73a3c8
--- /dev/null
+++ b/arch/sparc64/kernel/dtlb_backend.S
@@ -0,0 +1,181 @@
+/* $Id: dtlb_backend.S,v 1.16 2001/10/09 04:02:11 davem Exp $
+ * dtlb_backend.S: Back end to DTLB miss replacement strategy.
+ *                 This is included directly into the trap table.
+ *
+ * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1997,1998 Jakub Jelinek   (jj@ultra.linux.cz)
+ */
+
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+
+#if PAGE_SHIFT == 13
+#define SZ_BITS		_PAGE_SZ8K
+#elif PAGE_SHIFT == 16
+#define SZ_BITS		_PAGE_SZ64K
+#elif PAGE_SHIFT == 19
+#define SZ_BITS		_PAGE_SZ512K
+#elif PAGE_SHIFT == 22
+#define SZ_BITS		_PAGE_SZ4M
+#endif
+
+#define VALID_SZ_BITS	(_PAGE_VALID | SZ_BITS)
+
+#define VPTE_BITS		(_PAGE_CP | _PAGE_CV | _PAGE_P )
+#define VPTE_SHIFT		(PAGE_SHIFT - 3)
+
+/* Ways we can get here:
+ *
+ * 1) Nucleus loads and stores to/from PA-->VA direct mappings at tl>1.
+ * 2) Nucleus loads and stores to/from user/kernel window save areas.
+ * 3) VPTE misses from dtlb_base and itlb_base.
+ *
+ * We need to extract out the PMD and PGDIR indexes from the
+ * linear virtual page table access address.  The PTE index
+ * is at the bottom, but we are not concerned with it.  Bits
+ * 0 to 2 are clear since each PTE is 8 bytes in size.  Each
+ * PMD and PGDIR entry are 4 bytes in size.   Thus, this
+ * address looks something like:
+ *
+ * |---------------------------------------------------------------|
+ * |  ...   |    PGDIR index    |    PMD index    | PTE index  |   |
+ * |---------------------------------------------------------------|
+ *   63   F   E               D   C             B   A         3 2 0  <- bit nr
+ *
+ *  The variable bits above are defined as:
+ *  A --> 3 + (PAGE_SHIFT - log2(8))
+ *    --> 3 + (PAGE_SHIFT - 3) - 1
+ *        (ie. this is "bit 3" + PAGE_SIZE - size of PTE entry in bits - 1)
+ *  B --> A + 1
+ *  C --> B + (PAGE_SHIFT - log2(4))
+ *    -->  B + (PAGE_SHIFT - 2) - 1
+ *        (ie. this is "bit B" + PAGE_SIZE - size of PMD entry in bits - 1)
+ *  D --> C + 1
+ *  E --> D + (PAGE_SHIFT - log2(4))
+ *    --> D + (PAGE_SHIFT - 2) - 1
+ *        (ie. this is "bit D" + PAGE_SIZE - size of PGDIR entry in bits - 1)
+ *  F --> E + 1
+ *
+ * (Note how "B" always evalutes to PAGE_SHIFT, all the other constants
+ *  cancel out.)
+ *
+ * For 8K PAGE_SIZE (thus, PAGE_SHIFT of 13) the bit numbers are:
+ * A --> 12
+ * B --> 13
+ * C --> 23
+ * D --> 24
+ * E --> 34
+ * F --> 35
+ *
+ * For 64K PAGE_SIZE (thus, PAGE_SHIFT of 16) the bit numbers are:
+ * A --> 15
+ * B --> 16
+ * C --> 29
+ * D --> 30
+ * E --> 43
+ * F --> 44
+ *
+ * Because bits both above and below each PGDIR and PMD index need to
+ * be masked out, and the index can be as long as 14 bits (when using a
+ * 64K PAGE_SIZE, and thus a PAGE_SHIFT of 16), we need 3 instructions
+ * to extract each index out.
+ *
+ * Shifts do not pair very well on UltraSPARC-I, II, IIi, and IIe, so
+ * we try to avoid using them for the entire operation.  We could setup
+ * a mask anywhere from bit 31 down to bit 10 using the sethi instruction.
+ *
+ * We need a mask covering bits B --> C and one covering D --> E.
+ * For 8K PAGE_SIZE these masks are 0x00ffe000 and 0x7ff000000.
+ * For 64K PAGE_SIZE these masks are 0x3fff0000 and 0xfffc0000000.
+ * The second in each set cannot be loaded with a single sethi
+ * instruction, because the upper bits are past bit 32.  We would
+ * need to use a sethi + a shift.
+ *
+ * For the time being, we use 2 shifts and a simple "and" mask.
+ * We shift left to clear the bits above the index, we shift down
+ * to clear the bits below the index (sans the log2(4 or 8) bits)
+ * and a mask to clear the log2(4 or 8) bits.  We need therefore
+ * define 4 shift counts, all of which are relative to PAGE_SHIFT.
+ *
+ * Although unsupportable for other reasons, this does mean that
+ * 512K and 4MB page sizes would be generaally supported by the
+ * kernel.  (ELF binaries would break with > 64K PAGE_SIZE since
+ * the sections are only aligned that strongly).
+ *
+ * The operations performed for extraction are thus:
+ *
+ *      ((X << FOO_SHIFT_LEFT) >> FOO_SHIFT_RIGHT) & ~0x3
+ *
+ */
+
+#define A (3 + (PAGE_SHIFT - 3) - 1)
+#define B (A + 1)
+#define C (B + (PAGE_SHIFT - 2) - 1)
+#define D (C + 1)
+#define E (D + (PAGE_SHIFT - 2) - 1)
+#define F (E + 1)
+
+#define PMD_SHIFT_LEFT		(64 - D)
+#define PMD_SHIFT_RIGHT		(64 - (D - B) - 2)
+#define PGDIR_SHIFT_LEFT 	(64 - F)
+#define PGDIR_SHIFT_RIGHT	(64 - (F - D) - 2)
+#define LOW_MASK_BITS		0x3
+
+/* TLB1 ** ICACHE line 1: tl1 DTLB and quick VPTE miss	*/
+	ldxa		[%g1 + %g1] ASI_DMMU, %g4	! Get TAG_ACCESS
+	add		%g3, %g3, %g5			! Compute VPTE base
+	cmp		%g4, %g5			! VPTE miss?
+	bgeu,pt		%xcc, 1f			! Continue here
+	 andcc		%g4, TAG_CONTEXT_BITS, %g5	! tl0 miss Nucleus test
+	ba,a,pt		%xcc, from_tl1_trap		! Fall to tl0 miss
+1:	sllx		%g6, VPTE_SHIFT, %g4		! Position TAG_ACCESS
+	or		%g4, %g5, %g4			! Prepare TAG_ACCESS
+
+/* TLB1 ** ICACHE line 2: Quick VPTE miss	  	*/
+	mov		TSB_REG, %g1			! Grab TSB reg
+	ldxa		[%g1] ASI_DMMU, %g5		! Doing PGD caching?
+	sllx		%g6, PMD_SHIFT_LEFT, %g1	! Position PMD offset
+	be,pn		%xcc, sparc64_vpte_nucleus	! Is it from Nucleus?
+	 srlx		%g1, PMD_SHIFT_RIGHT, %g1	! Mask PMD offset bits
+	brnz,pt		%g5, sparc64_vpte_continue	! Yep, go like smoke
+	 andn		%g1, LOW_MASK_BITS, %g1		! Final PMD mask
+	sllx		%g6, PGDIR_SHIFT_LEFT, %g5	! Position PGD offset
+
+/* TLB1 ** ICACHE line 3: Quick VPTE miss	  	*/
+	srlx		%g5, PGDIR_SHIFT_RIGHT, %g5	! Mask PGD offset bits
+	andn		%g5, LOW_MASK_BITS, %g5		! Final PGD mask
+	lduwa		[%g7 + %g5] ASI_PHYS_USE_EC, %g5! Load PGD
+	brz,pn		%g5, vpte_noent			! Valid?
+sparc64_kpte_continue:
+	 sllx		%g5, 11, %g5			! Shift into place
+sparc64_vpte_continue:
+	lduwa		[%g5 + %g1] ASI_PHYS_USE_EC, %g5! Load PMD
+	sllx		%g5, 11, %g5			! Shift into place
+	brz,pn		%g5, vpte_noent			! Valid?
+
+/* TLB1 ** ICACHE line 4: Quick VPTE miss	  	*/
+	 mov		(VALID_SZ_BITS >> 61), %g1	! upper vpte into %g1
+	sllx		%g1, 61, %g1			! finish calc
+	or		%g5, VPTE_BITS, %g5		! Prepare VPTE data
+	or		%g5, %g1, %g5			! ...
+	mov		TLB_SFSR, %g1			! Restore %g1 value
+	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Load VPTE into TLB
+	stxa		%g4, [%g1 + %g1] ASI_DMMU	! Restore previous TAG_ACCESS
+	retry						! Load PTE once again
+
+#undef SZ_BITS
+#undef VALID_SZ_BITS
+#undef VPTE_SHIFT
+#undef VPTE_BITS
+#undef A
+#undef B
+#undef C
+#undef D
+#undef E
+#undef F
+#undef PMD_SHIFT_LEFT
+#undef PMD_SHIFT_RIGHT
+#undef PGDIR_SHIFT_LEFT
+#undef PGDIR_SHIFT_RIGHT
+#undef LOW_MASK_BITS
+
diff --git a/arch/sparc64/kernel/dtlb_base.S b/arch/sparc64/kernel/dtlb_base.S
new file mode 100644
index 0000000..ded2fed
--- /dev/null
+++ b/arch/sparc64/kernel/dtlb_base.S
@@ -0,0 +1,113 @@
+/* $Id: dtlb_base.S,v 1.17 2001/10/11 22:33:52 davem Exp $
+ * dtlb_base.S:	Front end to DTLB miss replacement strategy.
+ *              This is included directly into the trap table.
+ *
+ * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1997,1998 Jakub Jelinek   (jj@ultra.linux.cz)
+ */
+
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+
+/* %g1	TLB_SFSR	(%g1 + %g1 == TLB_TAG_ACCESS)
+ * %g2	(KERN_HIGHBITS | KERN_LOWBITS)
+ * %g3  VPTE base	(0xfffffffe00000000)	Spitfire/Blackbird (44-bit VA space)
+ *			(0xffe0000000000000)	Cheetah		   (64-bit VA space)
+ * %g7	__pa(current->mm->pgd)
+ *
+ * The VPTE base value is completely magic, but note that
+ * few places in the kernel other than these TLB miss
+ * handlers know anything about the VPTE mechanism or
+ * how it works (see VPTE_SIZE, TASK_SIZE and PTRS_PER_PGD).
+ * Consider the 44-bit VADDR Ultra-I/II case as an example:
+ *
+ * VA[0 :  (1<<43)] produce VPTE index [%g3                        :   0]
+ * VA[0 : -(1<<43)] produce VPTE index [%g3-(1<<(43-PAGE_SHIFT+3)) : %g3]
+ *
+ * For Cheetah's 64-bit VADDR space this is:
+ *
+ * VA[0 :  (1<<63)] produce VPTE index [%g3                        :   0]
+ * VA[0 : -(1<<63)] produce VPTE index [%g3-(1<<(63-PAGE_SHIFT+3)) : %g3]
+ *
+ * If you're paying attention you'll notice that this means half of
+ * the VPTE table is above %g3 and half is below, low VA addresses
+ * map progressively upwards from %g3, and high VA addresses map
+ * progressively upwards towards %g3.  This trick was needed to make
+ * the same 8 instruction handler work both for Spitfire/Blackbird's
+ * peculiar VA space hole configuration and the full 64-bit VA space
+ * one of Cheetah at the same time.
+ */
+
+/* Ways we can get here:
+ *
+ * 1) Nucleus loads and stores to/from PA-->VA direct mappings.
+ * 2) Nucleus loads and stores to/from vmalloc() areas.
+ * 3) User loads and stores.
+ * 4) User space accesses by nucleus at tl0
+ */
+
+#if PAGE_SHIFT == 13
+/*
+ * To compute vpte offset, we need to do ((addr >> 13) << 3),
+ * which can be optimized to (addr >> 10) if bits 10/11/12 can
+ * be guaranteed to be 0 ... mmu_context.h does guarantee this
+ * by only using 10 bits in the hwcontext value.
+ */
+#define CREATE_VPTE_OFFSET1(r1, r2)
+#define CREATE_VPTE_OFFSET2(r1, r2) \
+				srax	r1, 10, r2
+#define CREATE_VPTE_NOP		nop
+#else
+#define CREATE_VPTE_OFFSET1(r1, r2) \
+				srax	r1, PAGE_SHIFT, r2
+#define CREATE_VPTE_OFFSET2(r1, r2) \
+				sllx	r2, 3, r2
+#define CREATE_VPTE_NOP
+#endif
+
+/* DTLB ** ICACHE line 1: Quick user TLB misses		*/
+	ldxa		[%g1 + %g1] ASI_DMMU, %g4	! Get TAG_ACCESS
+	andcc		%g4, TAG_CONTEXT_BITS, %g0	! From Nucleus?
+from_tl1_trap:
+	rdpr		%tl, %g5			! For TL==3 test
+	CREATE_VPTE_OFFSET1(%g4, %g6)			! Create VPTE offset
+	be,pn		%xcc, 3f			! Yep, special processing
+	 CREATE_VPTE_OFFSET2(%g4, %g6)			! Create VPTE offset
+	cmp		%g5, 4				! Last trap level?
+	be,pn		%xcc, longpath			! Yep, cannot risk VPTE miss
+	 nop						! delay slot
+
+/* DTLB ** ICACHE line 2: User finish + quick kernel TLB misses	*/
+	ldxa		[%g3 + %g6] ASI_S, %g5		! Load VPTE
+1:	brgez,pn	%g5, longpath			! Invalid, branch out
+	 nop						! Delay-slot
+9:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
+	retry						! Trap return
+3:	brlz,pt		%g4, 9b				! Kernel virtual map?
+	 xor		%g2, %g4, %g5			! Finish bit twiddles
+	ba,a,pt		%xcc, kvmap			! Yep, go check for obp/vmalloc
+
+/* DTLB ** ICACHE line 3: winfixups+real_faults		*/
+longpath:
+	rdpr		%pstate, %g5			! Move into alternate globals
+	wrpr		%g5, PSTATE_AG|PSTATE_MG, %pstate
+	rdpr		%tl, %g4			! See where we came from.
+	cmp		%g4, 1				! Is etrap/rtrap window fault?
+	mov		TLB_TAG_ACCESS, %g4		! Prepare for fault processing
+	ldxa		[%g4] ASI_DMMU, %g5		! Load faulting VA page
+	be,pt		%xcc, sparc64_realfault_common	! Jump to normal fault handling
+	 mov		FAULT_CODE_DTLB, %g4		! It was read from DTLB
+
+/* DTLB ** ICACHE line 4: Unused...	*/
+	ba,a,pt		%xcc, winfix_trampoline		! Call window fixup code
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	CREATE_VPTE_NOP
+
+#undef CREATE_VPTE_OFFSET1
+#undef CREATE_VPTE_OFFSET2
+#undef CREATE_VPTE_NOP
diff --git a/arch/sparc64/kernel/dtlb_prot.S b/arch/sparc64/kernel/dtlb_prot.S
new file mode 100644
index 0000000..d848bb7
--- /dev/null
+++ b/arch/sparc64/kernel/dtlb_prot.S
@@ -0,0 +1,54 @@
+/* $Id: dtlb_prot.S,v 1.22 2001/04/11 23:40:32 davem Exp $
+ * dtlb_prot.S: DTLB protection trap strategy.
+ *              This is included directly into the trap table.
+ *
+ * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1997,1998 Jakub Jelinek   (jj@ultra.linux.cz)
+ */
+
+/* Ways we can get here:
+ *
+ * [TL == 0] 1) User stores to readonly pages.
+ * [TL == 0] 2) Nucleus stores to user readonly pages.
+ * [TL >  0] 3) Nucleus stores to user readonly stack frame.
+ */
+
+/* PROT ** ICACHE line 1: User DTLB protection trap	*/
+	stxa		%g0, [%g1] ASI_DMMU		! Clear SFSR FaultValid bit
+	membar		#Sync				! Synchronize ASI stores
+	rdpr		%pstate, %g5			! Move into alternate globals
+	wrpr		%g5, PSTATE_AG|PSTATE_MG, %pstate
+	rdpr		%tl, %g1			! Need to do a winfixup?
+	cmp		%g1, 1				! Trap level >1?
+	mov		TLB_TAG_ACCESS, %g4		! Prepare reload of vaddr
+	nop
+
+/* PROT ** ICACHE line 2: More real fault processing */
+	bgu,pn		%xcc, winfix_trampoline		! Yes, perform winfixup
+	 ldxa		[%g4] ASI_DMMU, %g5		! Put tagaccess in %g5
+	ba,pt		%xcc, sparc64_realfault_common	! Nope, normal fault
+	 mov		FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
+	nop
+	nop
+	nop
+	nop
+
+/* PROT ** ICACHE line 3: Unused...	*/
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+/* PROT ** ICACHE line 4: Unused...	*/
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c
new file mode 100644
index 0000000..6ffbeb7
--- /dev/null
+++ b/arch/sparc64/kernel/ebus.c
@@ -0,0 +1,644 @@
+/* $Id: ebus.c,v 1.64 2001/11/08 04:41:33 davem Exp $
+ * ebus.c: PCI to EBus bridge device.
+ *
+ * Copyright (C) 1997  Eddie C. Dost  (ecd@skynet.be)
+ * Copyright (C) 1999  David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pbm.h>
+#include <asm/ebus.h>
+#include <asm/oplib.h>
+#include <asm/bpp.h>
+#include <asm/irq.h>
+
+/* EBUS dma library. */
+
+#define EBDMA_CSR	0x00UL	/* Control/Status */
+#define EBDMA_ADDR	0x04UL	/* DMA Address */
+#define EBDMA_COUNT	0x08UL	/* DMA Count */
+
+#define EBDMA_CSR_INT_PEND	0x00000001
+#define EBDMA_CSR_ERR_PEND	0x00000002
+#define EBDMA_CSR_DRAIN		0x00000004
+#define EBDMA_CSR_INT_EN	0x00000010
+#define EBDMA_CSR_RESET		0x00000080
+#define EBDMA_CSR_WRITE		0x00000100
+#define EBDMA_CSR_EN_DMA	0x00000200
+#define EBDMA_CSR_CYC_PEND	0x00000400
+#define EBDMA_CSR_DIAG_RD_DONE	0x00000800
+#define EBDMA_CSR_DIAG_WR_DONE	0x00001000
+#define EBDMA_CSR_EN_CNT	0x00002000
+#define EBDMA_CSR_TC		0x00004000
+#define EBDMA_CSR_DIS_CSR_DRN	0x00010000
+#define EBDMA_CSR_BURST_SZ_MASK	0x000c0000
+#define EBDMA_CSR_BURST_SZ_1	0x00080000
+#define EBDMA_CSR_BURST_SZ_4	0x00000000
+#define EBDMA_CSR_BURST_SZ_8	0x00040000
+#define EBDMA_CSR_BURST_SZ_16	0x000c0000
+#define EBDMA_CSR_DIAG_EN	0x00100000
+#define EBDMA_CSR_DIS_ERR_PEND	0x00400000
+#define EBDMA_CSR_TCI_DIS	0x00800000
+#define EBDMA_CSR_EN_NEXT	0x01000000
+#define EBDMA_CSR_DMA_ON	0x02000000
+#define EBDMA_CSR_A_LOADED	0x04000000
+#define EBDMA_CSR_NA_LOADED	0x08000000
+#define EBDMA_CSR_DEV_ID_MASK	0xf0000000
+
+#define EBUS_DMA_RESET_TIMEOUT	10000
+
+static void __ebus_dma_reset(struct ebus_dma_info *p, int no_drain)
+{
+	int i;
+	u32 val = 0;
+
+	writel(EBDMA_CSR_RESET, p->regs + EBDMA_CSR);
+	udelay(1);
+
+	if (no_drain)
+		return;
+
+	for (i = EBUS_DMA_RESET_TIMEOUT; i > 0; i--) {
+		val = readl(p->regs + EBDMA_CSR);
+
+		if (!(val & (EBDMA_CSR_DRAIN | EBDMA_CSR_CYC_PEND)))
+			break;
+		udelay(10);
+	}
+}
+
+static irqreturn_t ebus_dma_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct ebus_dma_info *p = dev_id;
+	unsigned long flags;
+	u32 csr = 0;
+
+	spin_lock_irqsave(&p->lock, flags);
+	csr = readl(p->regs + EBDMA_CSR);
+	writel(csr, p->regs + EBDMA_CSR);
+	spin_unlock_irqrestore(&p->lock, flags);
+
+	if (csr & EBDMA_CSR_ERR_PEND) {
+		printk(KERN_CRIT "ebus_dma(%s): DMA error!\n", p->name);
+		p->callback(p, EBUS_DMA_EVENT_ERROR, p->client_cookie);
+		return IRQ_HANDLED;
+	} else if (csr & EBDMA_CSR_INT_PEND) {
+		p->callback(p,
+			    (csr & EBDMA_CSR_TC) ?
+			    EBUS_DMA_EVENT_DMA : EBUS_DMA_EVENT_DEVICE,
+			    p->client_cookie);
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+
+}
+
+int ebus_dma_register(struct ebus_dma_info *p)
+{
+	u32 csr;
+
+	if (!p->regs)
+		return -EINVAL;
+	if (p->flags & ~(EBUS_DMA_FLAG_USE_EBDMA_HANDLER |
+			 EBUS_DMA_FLAG_TCI_DISABLE))
+		return -EINVAL;
+	if ((p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) && !p->callback)
+		return -EINVAL;
+	if (!strlen(p->name))
+		return -EINVAL;
+
+	__ebus_dma_reset(p, 1);
+
+	csr = EBDMA_CSR_BURST_SZ_16 | EBDMA_CSR_EN_CNT;
+
+	if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
+		csr |= EBDMA_CSR_TCI_DIS;
+
+	writel(csr, p->regs + EBDMA_CSR);
+
+	return 0;
+}
+EXPORT_SYMBOL(ebus_dma_register);
+
+int ebus_dma_irq_enable(struct ebus_dma_info *p, int on)
+{
+	unsigned long flags;
+	u32 csr;
+
+	if (on) {
+		if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
+			if (request_irq(p->irq, ebus_dma_irq, SA_SHIRQ, p->name, p))
+				return -EBUSY;
+		}
+
+		spin_lock_irqsave(&p->lock, flags);
+		csr = readl(p->regs + EBDMA_CSR);
+		csr |= EBDMA_CSR_INT_EN;
+		writel(csr, p->regs + EBDMA_CSR);
+		spin_unlock_irqrestore(&p->lock, flags);
+	} else {
+		spin_lock_irqsave(&p->lock, flags);
+		csr = readl(p->regs + EBDMA_CSR);
+		csr &= ~EBDMA_CSR_INT_EN;
+		writel(csr, p->regs + EBDMA_CSR);
+		spin_unlock_irqrestore(&p->lock, flags);
+
+		if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
+			free_irq(p->irq, p);
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ebus_dma_irq_enable);
+
+void ebus_dma_unregister(struct ebus_dma_info *p)
+{
+	unsigned long flags;
+	u32 csr;
+	int irq_on = 0;
+
+	spin_lock_irqsave(&p->lock, flags);
+	csr = readl(p->regs + EBDMA_CSR);
+	if (csr & EBDMA_CSR_INT_EN) {
+		csr &= ~EBDMA_CSR_INT_EN;
+		writel(csr, p->regs + EBDMA_CSR);
+		irq_on = 1;
+	}
+	spin_unlock_irqrestore(&p->lock, flags);
+
+	if (irq_on)
+		free_irq(p->irq, p);
+}
+EXPORT_SYMBOL(ebus_dma_unregister);
+
+int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr, size_t len)
+{
+	unsigned long flags;
+	u32 csr;
+	int err;
+
+	if (len >= (1 << 24))
+		return -EINVAL;
+
+	spin_lock_irqsave(&p->lock, flags);
+	csr = readl(p->regs + EBDMA_CSR);
+	err = -EINVAL;
+	if (!(csr & EBDMA_CSR_EN_DMA))
+		goto out;
+	err = -EBUSY;
+	if (csr & EBDMA_CSR_NA_LOADED)
+		goto out;
+
+	writel(len,      p->regs + EBDMA_COUNT);
+	writel(bus_addr, p->regs + EBDMA_ADDR);
+	err = 0;
+
+out:
+	spin_unlock_irqrestore(&p->lock, flags);
+
+	return err;
+}
+EXPORT_SYMBOL(ebus_dma_request);
+
+void ebus_dma_prepare(struct ebus_dma_info *p, int write)
+{
+	unsigned long flags;
+	u32 csr;
+
+	spin_lock_irqsave(&p->lock, flags);
+	__ebus_dma_reset(p, 0);
+
+	csr = (EBDMA_CSR_INT_EN |
+	       EBDMA_CSR_EN_CNT |
+	       EBDMA_CSR_BURST_SZ_16 |
+	       EBDMA_CSR_EN_NEXT);
+
+	if (write)
+		csr |= EBDMA_CSR_WRITE;
+	if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
+		csr |= EBDMA_CSR_TCI_DIS;
+
+	writel(csr, p->regs + EBDMA_CSR);
+
+	spin_unlock_irqrestore(&p->lock, flags);
+}
+EXPORT_SYMBOL(ebus_dma_prepare);
+
+unsigned int ebus_dma_residue(struct ebus_dma_info *p)
+{
+	return readl(p->regs + EBDMA_COUNT);
+}
+EXPORT_SYMBOL(ebus_dma_residue);
+
+unsigned int ebus_dma_addr(struct ebus_dma_info *p)
+{
+	return readl(p->regs + EBDMA_ADDR);
+}
+EXPORT_SYMBOL(ebus_dma_addr);
+
+void ebus_dma_enable(struct ebus_dma_info *p, int on)
+{
+	unsigned long flags;
+	u32 orig_csr, csr;
+
+	spin_lock_irqsave(&p->lock, flags);
+	orig_csr = csr = readl(p->regs + EBDMA_CSR);
+	if (on)
+		csr |= EBDMA_CSR_EN_DMA;
+	else
+		csr &= ~EBDMA_CSR_EN_DMA;
+	if ((orig_csr & EBDMA_CSR_EN_DMA) !=
+	    (csr & EBDMA_CSR_EN_DMA))
+		writel(csr, p->regs + EBDMA_CSR);
+	spin_unlock_irqrestore(&p->lock, flags);
+}
+EXPORT_SYMBOL(ebus_dma_enable);
+
+struct linux_ebus *ebus_chain = NULL;
+
+#ifdef CONFIG_SUN_AUXIO
+extern void auxio_probe(void);
+#endif
+
+static inline void *ebus_alloc(size_t size)
+{
+	void *mem;
+
+	mem = kmalloc(size, GFP_ATOMIC);
+	if (!mem)
+		panic("ebus_alloc: out of memory");
+	memset((char *)mem, 0, size);
+	return mem;
+}
+
+static void __init ebus_ranges_init(struct linux_ebus *ebus)
+{
+	int success;
+
+	ebus->num_ebus_ranges = 0;
+	success = prom_getproperty(ebus->prom_node, "ranges",
+				   (char *)ebus->ebus_ranges,
+				   sizeof(ebus->ebus_ranges));
+	if (success != -1)
+		ebus->num_ebus_ranges = (success/sizeof(struct linux_prom_ebus_ranges));
+}
+
+static void __init ebus_intmap_init(struct linux_ebus *ebus)
+{
+	int success;
+
+	ebus->num_ebus_intmap = 0;
+	success = prom_getproperty(ebus->prom_node, "interrupt-map",
+				   (char *)ebus->ebus_intmap,
+				   sizeof(ebus->ebus_intmap));
+	if (success == -1)
+		return;
+
+	ebus->num_ebus_intmap = (success/sizeof(struct linux_prom_ebus_intmap));
+
+	success = prom_getproperty(ebus->prom_node, "interrupt-map-mask",
+				   (char *)&ebus->ebus_intmask,
+				   sizeof(ebus->ebus_intmask));
+	if (success == -1) {
+		prom_printf("%s: can't get interrupt-map-mask\n", __FUNCTION__);
+		prom_halt();
+	}
+}
+
+int __init ebus_intmap_match(struct linux_ebus *ebus,
+			     struct linux_prom_registers *reg,
+			     int *interrupt)
+{
+	unsigned int hi, lo, irq;
+	int i;
+
+	if (!ebus->num_ebus_intmap)
+		return 0;
+
+	hi = reg->which_io & ebus->ebus_intmask.phys_hi;
+	lo = reg->phys_addr & ebus->ebus_intmask.phys_lo;
+	irq = *interrupt & ebus->ebus_intmask.interrupt;
+	for (i = 0; i < ebus->num_ebus_intmap; i++) {
+		if ((ebus->ebus_intmap[i].phys_hi == hi) &&
+		    (ebus->ebus_intmap[i].phys_lo == lo) &&
+		    (ebus->ebus_intmap[i].interrupt == irq)) {
+			*interrupt = ebus->ebus_intmap[i].cinterrupt;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+void __init fill_ebus_child(int node, struct linux_prom_registers *preg,
+			    struct linux_ebus_child *dev, int non_standard_regs)
+{
+	int regs[PROMREG_MAX];
+	int irqs[PROMREG_MAX];
+	int i, len;
+
+	dev->prom_node = node;
+	prom_getstring(node, "name", dev->prom_name, sizeof(dev->prom_name));
+	printk(" (%s)", dev->prom_name);
+
+	len = prom_getproperty(node, "reg", (void *)regs, sizeof(regs));
+	dev->num_addrs = len / sizeof(regs[0]);
+
+	if (non_standard_regs) {
+		/* This is to handle reg properties which are not
+		 * in the parent relative format.  One example are
+		 * children of the i2c device on CompactPCI systems.
+		 *
+		 * So, for such devices we just record the property
+		 * raw in the child resources.
+		 */
+		for (i = 0; i < dev->num_addrs; i++)
+			dev->resource[i].start = regs[i];
+	} else {
+		for (i = 0; i < dev->num_addrs; i++) {
+			int rnum = regs[i];
+			if (rnum >= dev->parent->num_addrs) {
+				prom_printf("UGH: property for %s was %d, need < %d\n",
+					    dev->prom_name, len, dev->parent->num_addrs);
+				panic(__FUNCTION__);
+			}
+			dev->resource[i].start = dev->parent->resource[i].start;
+			dev->resource[i].end = dev->parent->resource[i].end;
+			dev->resource[i].flags = IORESOURCE_MEM;
+			dev->resource[i].name = dev->prom_name;
+		}
+	}
+
+	for (i = 0; i < PROMINTR_MAX; i++)
+		dev->irqs[i] = PCI_IRQ_NONE;
+
+	len = prom_getproperty(node, "interrupts", (char *)&irqs, sizeof(irqs));
+	if ((len == -1) || (len == 0)) {
+		dev->num_irqs = 0;
+		/*
+		 * Oh, well, some PROMs don't export interrupts
+		 * property to children of EBus devices...
+		 *
+		 * Be smart about PS/2 keyboard and mouse.
+		 */
+		if (!strcmp(dev->parent->prom_name, "8042")) {
+			if (!strcmp(dev->prom_name, "kb_ps2")) {
+				dev->num_irqs = 1;
+				dev->irqs[0] = dev->parent->irqs[0];
+			} else {
+				dev->num_irqs = 1;
+				dev->irqs[0] = dev->parent->irqs[1];
+			}
+		}
+	} else {
+		dev->num_irqs = len / sizeof(irqs[0]);
+		for (i = 0; i < dev->num_irqs; i++) {
+			struct pci_pbm_info *pbm = dev->bus->parent;
+			struct pci_controller_info *p = pbm->parent;
+
+			if (ebus_intmap_match(dev->bus, preg, &irqs[i]) != -1) {
+				dev->irqs[i] = p->irq_build(pbm,
+							    dev->bus->self,
+							    irqs[i]);
+			} else {
+				/* If we get a bogus interrupt property, just
+				 * record the raw value instead of punting.
+				 */
+				dev->irqs[i] = irqs[i];
+			}
+		}
+	}
+}
+
+static int __init child_regs_nonstandard(struct linux_ebus_device *dev)
+{
+	if (!strcmp(dev->prom_name, "i2c") ||
+	    !strcmp(dev->prom_name, "SUNW,lombus"))
+		return 1;
+	return 0;
+}
+
+void __init fill_ebus_device(int node, struct linux_ebus_device *dev)
+{
+	struct linux_prom_registers regs[PROMREG_MAX];
+	struct linux_ebus_child *child;
+	int irqs[PROMINTR_MAX];
+	int i, n, len;
+
+	dev->prom_node = node;
+	prom_getstring(node, "name", dev->prom_name, sizeof(dev->prom_name));
+	printk(" [%s", dev->prom_name);
+
+	len = prom_getproperty(node, "reg", (void *)regs, sizeof(regs));
+	if (len == -1) {
+		dev->num_addrs = 0;
+		goto probe_interrupts;
+	}
+
+	if (len % sizeof(struct linux_prom_registers)) {
+		prom_printf("UGH: proplen for %s was %d, need multiple of %d\n",
+			    dev->prom_name, len,
+			    (int)sizeof(struct linux_prom_registers));
+		prom_halt();
+	}
+	dev->num_addrs = len / sizeof(struct linux_prom_registers);
+
+	for (i = 0; i < dev->num_addrs; i++) {
+		/* XXX Learn how to interpret ebus ranges... -DaveM */
+		if (regs[i].which_io >= 0x10)
+			n = (regs[i].which_io - 0x10) >> 2;
+		else
+			n = regs[i].which_io;
+
+		dev->resource[i].start  = dev->bus->self->resource[n].start;
+		dev->resource[i].start += (unsigned long)regs[i].phys_addr;
+		dev->resource[i].end    =
+			(dev->resource[i].start + (unsigned long)regs[i].reg_size - 1UL);
+		dev->resource[i].flags  = IORESOURCE_MEM;
+		dev->resource[i].name   = dev->prom_name;
+		request_resource(&dev->bus->self->resource[n],
+				 &dev->resource[i]);
+	}
+
+probe_interrupts:
+	for (i = 0; i < PROMINTR_MAX; i++)
+		dev->irqs[i] = PCI_IRQ_NONE;
+
+	len = prom_getproperty(node, "interrupts", (char *)&irqs, sizeof(irqs));
+	if ((len == -1) || (len == 0)) {
+		dev->num_irqs = 0;
+	} else {
+		dev->num_irqs = len / sizeof(irqs[0]);
+		for (i = 0; i < dev->num_irqs; i++) {
+			struct pci_pbm_info *pbm = dev->bus->parent;
+			struct pci_controller_info *p = pbm->parent;
+
+			if (ebus_intmap_match(dev->bus, &regs[0], &irqs[i]) != -1) {
+				dev->irqs[i] = p->irq_build(pbm,
+							    dev->bus->self,
+							    irqs[i]);
+			} else {
+				/* If we get a bogus interrupt property, just
+				 * record the raw value instead of punting.
+				 */
+				dev->irqs[i] = irqs[i];
+			}
+		}
+	}
+
+	if ((node = prom_getchild(node))) {
+		printk(" ->");
+		dev->children = ebus_alloc(sizeof(struct linux_ebus_child));
+
+		child = dev->children;
+		child->next = NULL;
+		child->parent = dev;
+		child->bus = dev->bus;
+		fill_ebus_child(node, &regs[0],
+				child, child_regs_nonstandard(dev));
+
+		while ((node = prom_getsibling(node)) != 0) {
+			child->next = ebus_alloc(sizeof(struct linux_ebus_child));
+
+			child = child->next;
+			child->next = NULL;
+			child->parent = dev;
+			child->bus = dev->bus;
+			fill_ebus_child(node, &regs[0],
+					child, child_regs_nonstandard(dev));
+		}
+	}
+	printk("]");
+}
+
+static struct pci_dev *find_next_ebus(struct pci_dev *start, int *is_rio_p)
+{
+	struct pci_dev *pdev = start;
+
+	do {
+		pdev = pci_find_device(PCI_VENDOR_ID_SUN, PCI_ANY_ID, pdev);
+		if (pdev &&
+		    (pdev->device == PCI_DEVICE_ID_SUN_EBUS ||
+		     pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS))
+			break;
+	} while (pdev != NULL);
+
+	if (pdev && (pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS))
+		*is_rio_p = 1;
+	else
+		*is_rio_p = 0;
+
+	return pdev;
+}
+
+void __init ebus_init(void)
+{
+	struct pci_pbm_info *pbm;
+	struct linux_ebus_device *dev;
+	struct linux_ebus *ebus;
+	struct pci_dev *pdev;
+	struct pcidev_cookie *cookie;
+	int nd, ebusnd, is_rio;
+	int num_ebus = 0;
+
+	pdev = find_next_ebus(NULL, &is_rio);
+	if (!pdev) {
+		printk("ebus: No EBus's found.\n");
+		return;
+	}
+
+	cookie = pdev->sysdata;
+	ebusnd = cookie->prom_node;
+
+	ebus_chain = ebus = ebus_alloc(sizeof(struct linux_ebus));
+	ebus->next = NULL;
+	ebus->is_rio = is_rio;
+
+	while (ebusnd) {
+		/* SUNW,pci-qfe uses four empty ebuses on it.
+		   I think we should not consider them here,
+		   as they have half of the properties this
+		   code expects and once we do PCI hot-plug,
+		   we'd have to tweak with the ebus_chain
+		   in the runtime after initialization. -jj */
+		if (!prom_getchild (ebusnd)) {
+			pdev = find_next_ebus(pdev, &is_rio);
+			if (!pdev) {
+				if (ebus == ebus_chain) {
+					ebus_chain = NULL;
+					printk("ebus: No EBus's found.\n");
+					return;
+				}
+				break;
+			}
+			ebus->is_rio = is_rio;
+			cookie = pdev->sysdata;
+			ebusnd = cookie->prom_node;
+			continue;
+		}
+		printk("ebus%d:", num_ebus);
+
+		prom_getstring(ebusnd, "name", ebus->prom_name, sizeof(ebus->prom_name));
+		ebus->index = num_ebus;
+		ebus->prom_node = ebusnd;
+		ebus->self = pdev;
+		ebus->parent = pbm = cookie->pbm;
+
+		ebus_ranges_init(ebus);
+		ebus_intmap_init(ebus);
+
+		nd = prom_getchild(ebusnd);
+		if (!nd)
+			goto next_ebus;
+
+		ebus->devices = ebus_alloc(sizeof(struct linux_ebus_device));
+
+		dev = ebus->devices;
+		dev->next = NULL;
+		dev->children = NULL;
+		dev->bus = ebus;
+		fill_ebus_device(nd, dev);
+
+		while ((nd = prom_getsibling(nd)) != 0) {
+			dev->next = ebus_alloc(sizeof(struct linux_ebus_device));
+
+			dev = dev->next;
+			dev->next = NULL;
+			dev->children = NULL;
+			dev->bus = ebus;
+			fill_ebus_device(nd, dev);
+		}
+
+	next_ebus:
+		printk("\n");
+
+		pdev = find_next_ebus(pdev, &is_rio);
+		if (!pdev)
+			break;
+
+		cookie = pdev->sysdata;
+		ebusnd = cookie->prom_node;
+
+		ebus->next = ebus_alloc(sizeof(struct linux_ebus));
+		ebus = ebus->next;
+		ebus->next = NULL;
+		ebus->is_rio = is_rio;
+		++num_ebus;
+	}
+
+#ifdef CONFIG_SUN_AUXIO
+	auxio_probe();
+#endif
+}
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
new file mode 100644
index 0000000..a47f2d0
--- /dev/null
+++ b/arch/sparc64/kernel/entry.S
@@ -0,0 +1,1919 @@
+/* $Id: entry.S,v 1.144 2002/02/09 19:49:30 davem Exp $
+ * arch/sparc64/kernel/entry.S:  Sparc64 trap low-level entry points.
+ *
+ * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost        (ecd@skynet.be)
+ * Copyright (C) 1996 Miguel de Icaza      (miguel@nuclecu.unam.mx)
+ * Copyright (C) 1996,98,99 Jakub Jelinek  (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+
+#include <asm/head.h>
+#include <asm/asi.h>
+#include <asm/smp.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+#include <asm/signal.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/visasm.h>
+#include <asm/estate.h>
+#include <asm/auxio.h>
+
+/* #define SYSCALL_TRACING	1 */
+
+#define curptr      g6
+
+#define NR_SYSCALLS 284      /* Each OS is different... */
+
+	.text
+	.align		32
+
+	.globl		sparc64_vpte_patchme1
+	.globl		sparc64_vpte_patchme2
+/*
+ * On a second level vpte miss, check whether the original fault is to the OBP 
+ * range (note that this is only possible for instruction miss, data misses to
+ * obp range do not use vpte). If so, go back directly to the faulting address.
+ * This is because we want to read the tpc, otherwise we have no way of knowing
+ * the 8k aligned faulting address if we are using >8k kernel pagesize. This
+ * also ensures no vpte range addresses are dropped into tlb while obp is
+ * executing (see inherit_locked_prom_mappings() rant).
+ */
+sparc64_vpte_nucleus:
+	/* Load 0xf0000000, which is LOW_OBP_ADDRESS.  */
+	mov		0xf, %g5
+	sllx		%g5, 28, %g5
+
+	/* Is addr >= LOW_OBP_ADDRESS?  */
+	cmp		%g4, %g5
+	blu,pn		%xcc, sparc64_vpte_patchme1
+	 mov		0x1, %g5
+
+	/* Load 0x100000000, which is HI_OBP_ADDRESS.  */
+	sllx		%g5, 32, %g5
+
+	/* Is addr < HI_OBP_ADDRESS?  */
+	cmp		%g4, %g5
+	blu,pn		%xcc, obp_iaddr_patch
+	 nop
+
+	/* These two instructions are patched by paginig_init().  */
+sparc64_vpte_patchme1:
+	sethi		%hi(0), %g5
+sparc64_vpte_patchme2:
+	or		%g5, %lo(0), %g5
+
+	/* With kernel PGD in %g5, branch back into dtlb_backend.  */
+	ba,pt		%xcc, sparc64_kpte_continue
+	 andn		%g1, 0x3, %g1	/* Finish PMD offset adjustment.  */
+
+vpte_noent:
+	/* Restore previous TAG_ACCESS, %g5 is zero, and we will
+	 * skip over the trap instruction so that the top level
+	 * TLB miss handler will thing this %g5 value is just an
+	 * invalid PTE, thus branching to full fault processing.
+	 */
+	mov		TLB_SFSR, %g1
+	stxa		%g4, [%g1 + %g1] ASI_DMMU
+	done
+
+	.globl		obp_iaddr_patch
+obp_iaddr_patch:
+	/* These two instructions patched by inherit_prom_mappings().  */
+	sethi		%hi(0), %g5
+	or		%g5, %lo(0), %g5
+
+	/* Behave as if we are at TL0.  */
+	wrpr		%g0, 1, %tl
+	rdpr		%tpc, %g4	/* Find original faulting iaddr */
+	srlx		%g4, 13, %g4	/* Throw out context bits */
+	sllx		%g4, 13, %g4	/* g4 has vpn + ctx0 now */
+
+	/* Restore previous TAG_ACCESS.  */
+	mov		TLB_SFSR, %g1
+	stxa		%g4, [%g1 + %g1] ASI_IMMU
+
+	/* Get PMD offset.  */
+	srlx		%g4, 23, %g6
+	and		%g6, 0x7ff, %g6
+	sllx		%g6, 2, %g6
+
+	/* Load PMD, is it valid?  */
+	lduwa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
+	brz,pn		%g5, longpath
+	 sllx		%g5, 11, %g5
+
+	/* Get PTE offset.  */
+	srlx		%g4, 13, %g6
+	and		%g6, 0x3ff, %g6
+	sllx		%g6, 3, %g6
+
+	/* Load PTE.  */
+	ldxa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
+	brgez,pn	%g5, longpath
+	 nop
+
+	/* TLB load and return from trap.  */
+	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
+	retry
+
+	.globl		obp_daddr_patch
+obp_daddr_patch:
+	/* These two instructions patched by inherit_prom_mappings().  */
+	sethi		%hi(0), %g5
+	or		%g5, %lo(0), %g5
+
+	/* Get PMD offset.  */
+	srlx		%g4, 23, %g6
+	and		%g6, 0x7ff, %g6
+	sllx		%g6, 2, %g6
+
+	/* Load PMD, is it valid?  */
+	lduwa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
+	brz,pn		%g5, longpath
+	 sllx		%g5, 11, %g5
+
+	/* Get PTE offset.  */
+	srlx		%g4, 13, %g6
+	and		%g6, 0x3ff, %g6
+	sllx		%g6, 3, %g6
+
+	/* Load PTE.  */
+	ldxa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
+	brgez,pn	%g5, longpath
+	 nop
+
+	/* TLB load and return from trap.  */
+	stxa		%g5, [%g0] ASI_DTLB_DATA_IN
+	retry
+
+/*
+ * On a first level data miss, check whether this is to the OBP range (note
+ * that such accesses can be made by prom, as well as by kernel using
+ * prom_getproperty on "address"), and if so, do not use vpte access ...
+ * rather, use information saved during inherit_prom_mappings() using 8k
+ * pagesize.
+ */
+kvmap:
+	/* Load 0xf0000000, which is LOW_OBP_ADDRESS.  */
+	mov		0xf, %g5
+	sllx		%g5, 28, %g5
+
+	/* Is addr >= LOW_OBP_ADDRESS?  */
+	cmp		%g4, %g5
+	blu,pn		%xcc, vmalloc_addr
+	 mov		0x1, %g5
+
+	/* Load 0x100000000, which is HI_OBP_ADDRESS.  */
+	sllx		%g5, 32, %g5
+
+	/* Is addr < HI_OBP_ADDRESS?  */
+	cmp		%g4, %g5
+	blu,pn		%xcc, obp_daddr_patch
+	 nop
+
+vmalloc_addr:
+	/* If we get here, a vmalloc addr accessed, load kernel VPTE.  */
+	ldxa		[%g3 + %g6] ASI_N, %g5
+	brgez,pn	%g5, longpath
+	 nop
+
+	/* PTE is valid, load into TLB and return from trap.  */
+	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
+	retry
+
+	/* This is trivial with the new code... */
+	.globl		do_fpdis
+do_fpdis:
+	sethi		%hi(TSTATE_PEF), %g4					! IEU0
+	rdpr		%tstate, %g5
+	andcc		%g5, %g4, %g0
+	be,pt		%xcc, 1f
+	 nop
+	rd		%fprs, %g5
+	andcc		%g5, FPRS_FEF, %g0
+	be,pt		%xcc, 1f
+	 nop
+
+	/* Legal state when DCR_IFPOE is set in Cheetah %dcr. */
+	sethi		%hi(109f), %g7
+	ba,pt		%xcc, etrap
+109:	 or		%g7, %lo(109b), %g7
+	add		%g0, %g0, %g0
+	ba,a,pt		%xcc, rtrap_clr_l6
+
+1:	ldub		[%g6 + TI_FPSAVED], %g5					! Load	Group
+	wr		%g0, FPRS_FEF, %fprs					! LSU	Group+4bubbles
+	andcc		%g5, FPRS_FEF, %g0					! IEU1	Group
+	be,a,pt		%icc, 1f						! CTI
+	 clr		%g7							! IEU0
+	ldx		[%g6 + TI_GSR], %g7					! Load	Group
+1:	andcc		%g5, FPRS_DL, %g0					! IEU1
+	bne,pn		%icc, 2f						! CTI
+	 fzero		%f0							! FPA
+	andcc		%g5, FPRS_DU, %g0					! IEU1  Group
+	bne,pn		%icc, 1f						! CTI
+	 fzero		%f2							! FPA
+	faddd		%f0, %f2, %f4
+	fmuld		%f0, %f2, %f6
+	faddd		%f0, %f2, %f8
+	fmuld		%f0, %f2, %f10
+	faddd		%f0, %f2, %f12
+	fmuld		%f0, %f2, %f14
+	faddd		%f0, %f2, %f16
+	fmuld		%f0, %f2, %f18
+	faddd		%f0, %f2, %f20
+	fmuld		%f0, %f2, %f22
+	faddd		%f0, %f2, %f24
+	fmuld		%f0, %f2, %f26
+	faddd		%f0, %f2, %f28
+	fmuld		%f0, %f2, %f30
+	faddd		%f0, %f2, %f32
+	fmuld		%f0, %f2, %f34
+	faddd		%f0, %f2, %f36
+	fmuld		%f0, %f2, %f38
+	faddd		%f0, %f2, %f40
+	fmuld		%f0, %f2, %f42
+	faddd		%f0, %f2, %f44
+	fmuld		%f0, %f2, %f46
+	faddd		%f0, %f2, %f48
+	fmuld		%f0, %f2, %f50
+	faddd		%f0, %f2, %f52
+	fmuld		%f0, %f2, %f54
+	faddd		%f0, %f2, %f56
+	fmuld		%f0, %f2, %f58
+	b,pt		%xcc, fpdis_exit2
+	 faddd		%f0, %f2, %f60
+1:	mov		SECONDARY_CONTEXT, %g3
+	add		%g6, TI_FPREGS + 0x80, %g1
+	faddd		%f0, %f2, %f4
+	fmuld		%f0, %f2, %f6
+	ldxa		[%g3] ASI_DMMU, %g5
+cplus_fptrap_insn_1:
+	sethi		%hi(0), %g2
+	stxa		%g2, [%g3] ASI_DMMU
+	membar		#Sync
+	add		%g6, TI_FPREGS + 0xc0, %g2
+	faddd		%f0, %f2, %f8
+	fmuld		%f0, %f2, %f10
+	ldda		[%g1] ASI_BLK_S, %f32	! grrr, where is ASI_BLK_NUCLEUS 8-(
+	ldda		[%g2] ASI_BLK_S, %f48
+	faddd		%f0, %f2, %f12
+	fmuld		%f0, %f2, %f14
+	faddd		%f0, %f2, %f16
+	fmuld		%f0, %f2, %f18
+	faddd		%f0, %f2, %f20
+	fmuld		%f0, %f2, %f22
+	faddd		%f0, %f2, %f24
+	fmuld		%f0, %f2, %f26
+	faddd		%f0, %f2, %f28
+	fmuld		%f0, %f2, %f30
+	b,pt		%xcc, fpdis_exit
+	 membar		#Sync
+2:	andcc		%g5, FPRS_DU, %g0
+	bne,pt		%icc, 3f
+	 fzero		%f32
+	mov		SECONDARY_CONTEXT, %g3
+	fzero		%f34
+	ldxa		[%g3] ASI_DMMU, %g5
+	add		%g6, TI_FPREGS, %g1
+cplus_fptrap_insn_2:
+	sethi		%hi(0), %g2
+	stxa		%g2, [%g3] ASI_DMMU
+	membar		#Sync
+	add		%g6, TI_FPREGS + 0x40, %g2
+	faddd		%f32, %f34, %f36
+	fmuld		%f32, %f34, %f38
+	ldda		[%g1] ASI_BLK_S, %f0	! grrr, where is ASI_BLK_NUCLEUS 8-(
+	ldda		[%g2] ASI_BLK_S, %f16
+	faddd		%f32, %f34, %f40
+	fmuld		%f32, %f34, %f42
+	faddd		%f32, %f34, %f44
+	fmuld		%f32, %f34, %f46
+	faddd		%f32, %f34, %f48
+	fmuld		%f32, %f34, %f50
+	faddd		%f32, %f34, %f52
+	fmuld		%f32, %f34, %f54
+	faddd		%f32, %f34, %f56
+	fmuld		%f32, %f34, %f58
+	faddd		%f32, %f34, %f60
+	fmuld		%f32, %f34, %f62
+	ba,pt		%xcc, fpdis_exit
+	 membar		#Sync
+3:	mov		SECONDARY_CONTEXT, %g3
+	add		%g6, TI_FPREGS, %g1
+	ldxa		[%g3] ASI_DMMU, %g5
+cplus_fptrap_insn_3:
+	sethi		%hi(0), %g2
+	stxa		%g2, [%g3] ASI_DMMU
+	membar		#Sync
+	mov		0x40, %g2
+	ldda		[%g1] ASI_BLK_S, %f0		! grrr, where is ASI_BLK_NUCLEUS 8-(
+	ldda		[%g1 + %g2] ASI_BLK_S, %f16
+	add		%g1, 0x80, %g1
+	ldda		[%g1] ASI_BLK_S, %f32
+	ldda		[%g1 + %g2] ASI_BLK_S, %f48
+	membar		#Sync
+fpdis_exit:
+	stxa		%g5, [%g3] ASI_DMMU
+	membar		#Sync
+fpdis_exit2:
+	wr		%g7, 0, %gsr
+	ldx		[%g6 + TI_XFSR], %fsr
+	rdpr		%tstate, %g3
+	or		%g3, %g4, %g3		! anal...
+	wrpr		%g3, %tstate
+	wr		%g0, FPRS_FEF, %fprs	! clean DU/DL bits
+	retry
+
+	.align		32
+fp_other_bounce:
+	call		do_fpother
+	 add		%sp, PTREGS_OFF, %o0
+	ba,pt		%xcc, rtrap
+	 clr		%l6
+
+	.globl		do_fpother_check_fitos
+	.align		32
+do_fpother_check_fitos:
+	sethi		%hi(fp_other_bounce - 4), %g7
+	or		%g7, %lo(fp_other_bounce - 4), %g7
+
+	/* NOTE: Need to preserve %g7 until we fully commit
+	 *       to the fitos fixup.
+	 */
+	stx		%fsr, [%g6 + TI_XFSR]
+	rdpr		%tstate, %g3
+	andcc		%g3, TSTATE_PRIV, %g0
+	bne,pn		%xcc, do_fptrap_after_fsr
+	 nop
+	ldx		[%g6 + TI_XFSR], %g3
+	srlx		%g3, 14, %g1
+	and		%g1, 7, %g1
+	cmp		%g1, 2			! Unfinished FP-OP
+	bne,pn		%xcc, do_fptrap_after_fsr
+	 sethi		%hi(1 << 23), %g1	! Inexact
+	andcc		%g3, %g1, %g0
+	bne,pn		%xcc, do_fptrap_after_fsr
+	 rdpr		%tpc, %g1
+	lduwa		[%g1] ASI_AIUP, %g3	! This cannot ever fail
+#define FITOS_MASK	0xc1f83fe0
+#define FITOS_COMPARE	0x81a01880
+	sethi		%hi(FITOS_MASK), %g1
+	or		%g1, %lo(FITOS_MASK), %g1
+	and		%g3, %g1, %g1
+	sethi		%hi(FITOS_COMPARE), %g2
+	or		%g2, %lo(FITOS_COMPARE), %g2
+	cmp		%g1, %g2
+	bne,pn		%xcc, do_fptrap_after_fsr
+	 nop
+	std		%f62, [%g6 + TI_FPREGS + (62 * 4)]
+	sethi		%hi(fitos_table_1), %g1
+	and		%g3, 0x1f, %g2
+	or		%g1, %lo(fitos_table_1),  %g1
+	sllx		%g2, 2, %g2
+	jmpl		%g1 + %g2, %g0
+	 ba,pt		%xcc, fitos_emul_continue
+
+fitos_table_1:
+	fitod		%f0, %f62
+	fitod		%f1, %f62
+	fitod		%f2, %f62
+	fitod		%f3, %f62
+	fitod		%f4, %f62
+	fitod		%f5, %f62
+	fitod		%f6, %f62
+	fitod		%f7, %f62
+	fitod		%f8, %f62
+	fitod		%f9, %f62
+	fitod		%f10, %f62
+	fitod		%f11, %f62
+	fitod		%f12, %f62
+	fitod		%f13, %f62
+	fitod		%f14, %f62
+	fitod		%f15, %f62
+	fitod		%f16, %f62
+	fitod		%f17, %f62
+	fitod		%f18, %f62
+	fitod		%f19, %f62
+	fitod		%f20, %f62
+	fitod		%f21, %f62
+	fitod		%f22, %f62
+	fitod		%f23, %f62
+	fitod		%f24, %f62
+	fitod		%f25, %f62
+	fitod		%f26, %f62
+	fitod		%f27, %f62
+	fitod		%f28, %f62
+	fitod		%f29, %f62
+	fitod		%f30, %f62
+	fitod		%f31, %f62
+
+fitos_emul_continue:
+	sethi		%hi(fitos_table_2), %g1
+	srl		%g3, 25, %g2
+	or		%g1, %lo(fitos_table_2), %g1
+	and		%g2, 0x1f, %g2
+	sllx		%g2, 2, %g2
+	jmpl		%g1 + %g2, %g0
+	 ba,pt		%xcc, fitos_emul_fini
+
+fitos_table_2:
+	fdtos		%f62, %f0
+	fdtos		%f62, %f1
+	fdtos		%f62, %f2
+	fdtos		%f62, %f3
+	fdtos		%f62, %f4
+	fdtos		%f62, %f5
+	fdtos		%f62, %f6
+	fdtos		%f62, %f7
+	fdtos		%f62, %f8
+	fdtos		%f62, %f9
+	fdtos		%f62, %f10
+	fdtos		%f62, %f11
+	fdtos		%f62, %f12
+	fdtos		%f62, %f13
+	fdtos		%f62, %f14
+	fdtos		%f62, %f15
+	fdtos		%f62, %f16
+	fdtos		%f62, %f17
+	fdtos		%f62, %f18
+	fdtos		%f62, %f19
+	fdtos		%f62, %f20
+	fdtos		%f62, %f21
+	fdtos		%f62, %f22
+	fdtos		%f62, %f23
+	fdtos		%f62, %f24
+	fdtos		%f62, %f25
+	fdtos		%f62, %f26
+	fdtos		%f62, %f27
+	fdtos		%f62, %f28
+	fdtos		%f62, %f29
+	fdtos		%f62, %f30
+	fdtos		%f62, %f31
+
+fitos_emul_fini:
+	ldd		[%g6 + TI_FPREGS + (62 * 4)], %f62
+	done
+
+	.globl		do_fptrap
+	.align		32
+do_fptrap:
+	stx		%fsr, [%g6 + TI_XFSR]
+do_fptrap_after_fsr:
+	ldub		[%g6 + TI_FPSAVED], %g3
+	rd		%fprs, %g1
+	or		%g3, %g1, %g3
+	stb		%g3, [%g6 + TI_FPSAVED]
+	rd		%gsr, %g3
+	stx		%g3, [%g6 + TI_GSR]
+	mov		SECONDARY_CONTEXT, %g3
+	ldxa		[%g3] ASI_DMMU, %g5
+cplus_fptrap_insn_4:
+	sethi		%hi(0), %g2
+	stxa		%g2, [%g3] ASI_DMMU
+	membar		#Sync
+	add		%g6, TI_FPREGS, %g2
+	andcc		%g1, FPRS_DL, %g0
+	be,pn		%icc, 4f
+	 mov		0x40, %g3
+	stda		%f0, [%g2] ASI_BLK_S
+	stda		%f16, [%g2 + %g3] ASI_BLK_S
+	andcc		%g1, FPRS_DU, %g0
+	be,pn		%icc, 5f
+4:       add		%g2, 128, %g2
+	stda		%f32, [%g2] ASI_BLK_S
+	stda		%f48, [%g2 + %g3] ASI_BLK_S
+5:	mov		SECONDARY_CONTEXT, %g1
+	membar		#Sync
+	stxa		%g5, [%g1] ASI_DMMU
+	membar		#Sync
+	ba,pt		%xcc, etrap
+	 wr		%g0, 0, %fprs
+
+cplus_fptrap_1:
+	sethi		%hi(CTX_CHEETAH_PLUS_CTX0), %g2
+
+	.globl		cheetah_plus_patch_fpdis
+cheetah_plus_patch_fpdis:
+	/* We configure the dTLB512_0 for 4MB pages and the
+	 * dTLB512_1 for 8K pages when in context zero.
+	 */
+	sethi			%hi(cplus_fptrap_1), %o0
+	lduw			[%o0 + %lo(cplus_fptrap_1)], %o1
+
+	set			cplus_fptrap_insn_1, %o2
+	stw			%o1, [%o2]
+	flush			%o2
+	set			cplus_fptrap_insn_2, %o2
+	stw			%o1, [%o2]
+	flush			%o2
+	set			cplus_fptrap_insn_3, %o2
+	stw			%o1, [%o2]
+	flush			%o2
+	set			cplus_fptrap_insn_4, %o2
+	stw			%o1, [%o2]
+	flush			%o2
+
+	retl
+	 nop
+
+	/* The registers for cross calls will be:
+	 *
+	 * DATA 0: [low 32-bits]  Address of function to call, jmp to this
+	 *         [high 32-bits] MMU Context Argument 0, place in %g5
+	 * DATA 1: Address Argument 1, place in %g6
+	 * DATA 2: Address Argument 2, place in %g7
+	 *
+	 * With this method we can do most of the cross-call tlb/cache
+	 * flushing very quickly.
+	 *
+	 * Current CPU's IRQ worklist table is locked into %g1,
+	 * don't touch.
+	 */
+	.text
+	.align		32
+	.globl		do_ivec
+do_ivec:
+	mov		0x40, %g3
+	ldxa		[%g3 + %g0] ASI_INTR_R, %g3
+	sethi		%hi(KERNBASE), %g4
+	cmp		%g3, %g4
+	bgeu,pn		%xcc, do_ivec_xcall
+	 srlx		%g3, 32, %g5
+	stxa		%g0, [%g0] ASI_INTR_RECEIVE
+	membar		#Sync
+
+	sethi		%hi(ivector_table), %g2
+	sllx		%g3, 5, %g3
+	or		%g2, %lo(ivector_table), %g2
+	add		%g2, %g3, %g3
+	ldx		[%g3 + 0x08], %g2	/* irq_info */
+	ldub		[%g3 + 0x04], %g4	/* pil */
+	brz,pn		%g2, do_ivec_spurious
+	 mov		1, %g2
+
+	sllx		%g2, %g4, %g2
+	sllx		%g4, 2, %g4
+	lduw		[%g6 + %g4], %g5	/* g5 = irq_work(cpu, pil) */
+	stw		%g5, [%g3 + 0x00]	/* bucket->irq_chain = g5 */
+	stw		%g3, [%g6 + %g4]	/* irq_work(cpu, pil) = bucket */
+	wr		%g2, 0x0, %set_softint
+	retry
+do_ivec_xcall:
+	mov		0x50, %g1
+
+	ldxa		[%g1 + %g0] ASI_INTR_R, %g1
+	srl		%g3, 0, %g3
+	mov		0x60, %g7
+	ldxa		[%g7 + %g0] ASI_INTR_R, %g7
+	stxa		%g0, [%g0] ASI_INTR_RECEIVE
+	membar		#Sync
+	ba,pt		%xcc, 1f
+	 nop
+
+	.align		32
+1:	jmpl		%g3, %g0
+	 nop
+
+do_ivec_spurious:
+	stw		%g3, [%g6 + 0x00]	/* irq_work(cpu, 0) = bucket */
+	rdpr		%pstate, %g5
+
+	wrpr		%g5, PSTATE_IG | PSTATE_AG, %pstate
+	sethi		%hi(109f), %g7
+	ba,pt		%xcc, etrap
+109:	 or		%g7, %lo(109b), %g7
+	call		catch_disabled_ivec
+	 add		%sp, PTREGS_OFF, %o0
+	ba,pt		%xcc, rtrap
+	 clr		%l6
+
+	.globl		save_alternate_globals
+save_alternate_globals: /* %o0 = save_area */
+	rdpr		%pstate, %o5
+	andn		%o5, PSTATE_IE, %o1
+	wrpr		%o1, PSTATE_AG, %pstate
+	stx		%g0, [%o0 + 0x00]
+	stx		%g1, [%o0 + 0x08]
+	stx		%g2, [%o0 + 0x10]
+	stx		%g3, [%o0 + 0x18]
+	stx		%g4, [%o0 + 0x20]
+	stx		%g5, [%o0 + 0x28]
+	stx		%g6, [%o0 + 0x30]
+	stx		%g7, [%o0 + 0x38]
+	wrpr		%o1, PSTATE_IG, %pstate
+	stx		%g0, [%o0 + 0x40]
+	stx		%g1, [%o0 + 0x48]
+	stx		%g2, [%o0 + 0x50]
+	stx		%g3, [%o0 + 0x58]
+	stx		%g4, [%o0 + 0x60]
+	stx		%g5, [%o0 + 0x68]
+	stx		%g6, [%o0 + 0x70]
+	stx		%g7, [%o0 + 0x78]
+	wrpr		%o1, PSTATE_MG, %pstate
+	stx		%g0, [%o0 + 0x80]
+	stx		%g1, [%o0 + 0x88]
+	stx		%g2, [%o0 + 0x90]
+	stx		%g3, [%o0 + 0x98]
+	stx		%g4, [%o0 + 0xa0]
+	stx		%g5, [%o0 + 0xa8]
+	stx		%g6, [%o0 + 0xb0]
+	stx		%g7, [%o0 + 0xb8]
+	wrpr		%o5, 0x0, %pstate
+	retl
+	 nop
+
+	.globl		restore_alternate_globals
+restore_alternate_globals: /* %o0 = save_area */
+	rdpr		%pstate, %o5
+	andn		%o5, PSTATE_IE, %o1
+	wrpr		%o1, PSTATE_AG, %pstate
+	ldx		[%o0 + 0x00], %g0
+	ldx		[%o0 + 0x08], %g1
+	ldx		[%o0 + 0x10], %g2
+	ldx		[%o0 + 0x18], %g3
+	ldx		[%o0 + 0x20], %g4
+	ldx		[%o0 + 0x28], %g5
+	ldx		[%o0 + 0x30], %g6
+	ldx		[%o0 + 0x38], %g7
+	wrpr		%o1, PSTATE_IG, %pstate
+	ldx		[%o0 + 0x40], %g0
+	ldx		[%o0 + 0x48], %g1
+	ldx		[%o0 + 0x50], %g2
+	ldx		[%o0 + 0x58], %g3
+	ldx		[%o0 + 0x60], %g4
+	ldx		[%o0 + 0x68], %g5
+	ldx		[%o0 + 0x70], %g6
+	ldx		[%o0 + 0x78], %g7
+	wrpr		%o1, PSTATE_MG, %pstate
+	ldx		[%o0 + 0x80], %g0
+	ldx		[%o0 + 0x88], %g1
+	ldx		[%o0 + 0x90], %g2
+	ldx		[%o0 + 0x98], %g3
+	ldx		[%o0 + 0xa0], %g4
+	ldx		[%o0 + 0xa8], %g5
+	ldx		[%o0 + 0xb0], %g6
+	ldx		[%o0 + 0xb8], %g7
+	wrpr		%o5, 0x0, %pstate
+	retl
+	 nop
+
+	.globl		getcc, setcc
+getcc:
+	ldx		[%o0 + PT_V9_TSTATE], %o1
+	srlx		%o1, 32, %o1
+	and		%o1, 0xf, %o1
+	retl
+	 stx		%o1, [%o0 + PT_V9_G1]
+setcc:
+	ldx		[%o0 + PT_V9_TSTATE], %o1
+	ldx		[%o0 + PT_V9_G1], %o2
+	or		%g0, %ulo(TSTATE_ICC), %o3
+	sllx		%o3, 32, %o3
+	andn		%o1, %o3, %o1
+	sllx		%o2, 32, %o2
+	and		%o2, %o3, %o2
+	or		%o1, %o2, %o1
+	retl
+	 stx		%o1, [%o0 + PT_V9_TSTATE]
+
+	.globl		utrap, utrap_ill
+utrap:	brz,pn		%g1, etrap
+	 nop
+	save		%sp, -128, %sp
+	rdpr		%tstate, %l6
+	rdpr		%cwp, %l7
+	andn		%l6, TSTATE_CWP, %l6
+	wrpr		%l6, %l7, %tstate
+	rdpr		%tpc, %l6
+	rdpr		%tnpc, %l7
+	wrpr		%g1, 0, %tnpc
+	done
+utrap_ill:
+        call		bad_trap
+	 add		%sp, PTREGS_OFF, %o0
+	ba,pt		%xcc, rtrap
+	 clr		%l6
+
+#ifdef CONFIG_BLK_DEV_FD
+	.globl		floppy_hardint
+floppy_hardint:
+	wr		%g0, (1 << 11), %clear_softint
+	sethi		%hi(doing_pdma), %g1
+	ld		[%g1 + %lo(doing_pdma)], %g2
+	brz,pn		%g2, floppy_dosoftint
+	 sethi		%hi(fdc_status), %g3
+	ldx		[%g3 + %lo(fdc_status)], %g3
+	sethi		%hi(pdma_vaddr), %g5
+	ldx		[%g5 + %lo(pdma_vaddr)], %g4
+	sethi		%hi(pdma_size), %g5
+	ldx		[%g5 + %lo(pdma_size)], %g5
+
+next_byte:
+	lduba		[%g3] ASI_PHYS_BYPASS_EC_E, %g7
+	andcc		%g7, 0x80, %g0
+	be,pn		%icc, floppy_fifo_emptied
+	 andcc		%g7, 0x20, %g0
+	be,pn		%icc, floppy_overrun
+	 andcc		%g7, 0x40, %g0
+	be,pn		%icc, floppy_write
+	 sub		%g5, 1, %g5
+
+	inc		%g3
+	lduba		[%g3] ASI_PHYS_BYPASS_EC_E, %g7
+	dec		%g3
+	orcc		%g0, %g5, %g0
+	stb		%g7, [%g4]
+	bne,pn		%xcc, next_byte
+	 add		%g4, 1, %g4
+
+	b,pt		%xcc, floppy_tdone
+	 nop
+
+floppy_write:
+	ldub		[%g4], %g7
+	orcc		%g0, %g5, %g0
+	inc		%g3
+	stba		%g7, [%g3] ASI_PHYS_BYPASS_EC_E
+	dec		%g3
+	bne,pn		%xcc, next_byte
+	 add		%g4, 1, %g4
+
+floppy_tdone:
+	sethi		%hi(pdma_vaddr), %g1
+	stx		%g4, [%g1 + %lo(pdma_vaddr)]
+	sethi		%hi(pdma_size), %g1
+	stx		%g5, [%g1 + %lo(pdma_size)]
+	sethi		%hi(auxio_register), %g1
+	ldx		[%g1 + %lo(auxio_register)], %g7
+	lduba		[%g7] ASI_PHYS_BYPASS_EC_E, %g5
+	or		%g5, AUXIO_AUX1_FTCNT, %g5
+/*	andn		%g5, AUXIO_AUX1_MASK, %g5 */
+	stba		%g5, [%g7] ASI_PHYS_BYPASS_EC_E
+	andn		%g5, AUXIO_AUX1_FTCNT, %g5
+/*	andn		%g5, AUXIO_AUX1_MASK, %g5 */
+
+	nop; nop;  nop; nop;  nop; nop;
+	nop; nop;  nop; nop;  nop; nop;
+
+	stba		%g5, [%g7] ASI_PHYS_BYPASS_EC_E
+	sethi		%hi(doing_pdma), %g1
+	b,pt		%xcc, floppy_dosoftint
+	 st		%g0, [%g1 + %lo(doing_pdma)]
+
+floppy_fifo_emptied:
+	sethi		%hi(pdma_vaddr), %g1
+	stx		%g4, [%g1 + %lo(pdma_vaddr)]
+	sethi		%hi(pdma_size), %g1
+	stx		%g5, [%g1 + %lo(pdma_size)]
+	sethi		%hi(irq_action), %g1
+	or		%g1, %lo(irq_action), %g1
+	ldx		[%g1 + (11 << 3)], %g3		! irqaction[floppy_irq]
+	ldx		[%g3 + 0x08], %g4		! action->flags>>48==ino
+	sethi		%hi(ivector_table), %g3
+	srlx		%g4, 48, %g4
+	or		%g3, %lo(ivector_table), %g3
+	sllx		%g4, 5, %g4
+	ldx		[%g3 + %g4], %g4		! &ivector_table[ino]
+	ldx		[%g4 + 0x10], %g4		! bucket->iclr
+	stwa		%g0, [%g4] ASI_PHYS_BYPASS_EC_E	! ICLR_IDLE
+	membar		#Sync				! probably not needed...
+	retry
+
+floppy_overrun:
+	sethi		%hi(pdma_vaddr), %g1
+	stx		%g4, [%g1 + %lo(pdma_vaddr)]
+	sethi		%hi(pdma_size), %g1
+	stx		%g5, [%g1 + %lo(pdma_size)]
+	sethi		%hi(doing_pdma), %g1
+	st		%g0, [%g1 + %lo(doing_pdma)]
+
+floppy_dosoftint:
+	rdpr		%pil, %g2
+	wrpr		%g0, 15, %pil
+	sethi		%hi(109f), %g7
+	b,pt		%xcc, etrap_irq
+109:	 or		%g7, %lo(109b), %g7
+
+	mov		11, %o0
+	mov		0, %o1
+	call		sparc_floppy_irq
+	 add		%sp, PTREGS_OFF, %o2
+
+	b,pt		%xcc, rtrap_irq
+	 nop
+
+#endif /* CONFIG_BLK_DEV_FD */
+
+	/* XXX Here is stuff we still need to write... -DaveM XXX */
+	.globl		netbsd_syscall
+netbsd_syscall:
+	retl
+	 nop
+
+	/* These next few routines must be sure to clear the
+	 * SFSR FaultValid bit so that the fast tlb data protection
+	 * handler does not flush the wrong context and lock up the
+	 * box.
+	 */
+	.globl		__do_data_access_exception
+	.globl		__do_data_access_exception_tl1
+__do_data_access_exception_tl1:
+	rdpr		%pstate, %g4
+	wrpr		%g4, PSTATE_MG|PSTATE_AG, %pstate
+	mov		TLB_SFSR, %g3
+	mov		DMMU_SFAR, %g5
+	ldxa		[%g3] ASI_DMMU, %g4	! Get SFSR
+	ldxa		[%g5] ASI_DMMU, %g5	! Get SFAR
+	stxa		%g0, [%g3] ASI_DMMU	! Clear SFSR.FaultValid bit
+	membar		#Sync
+	ba,pt		%xcc, winfix_dax
+	 rdpr		%tpc, %g3
+__do_data_access_exception:
+	rdpr		%pstate, %g4
+	wrpr		%g4, PSTATE_MG|PSTATE_AG, %pstate
+	mov		TLB_SFSR, %g3
+	mov		DMMU_SFAR, %g5
+	ldxa		[%g3] ASI_DMMU, %g4	! Get SFSR
+	ldxa		[%g5] ASI_DMMU, %g5	! Get SFAR
+	stxa		%g0, [%g3] ASI_DMMU	! Clear SFSR.FaultValid bit
+	membar		#Sync
+	sethi		%hi(109f), %g7
+	ba,pt		%xcc, etrap
+109:	 or		%g7, %lo(109b), %g7
+	mov		%l4, %o1
+	mov		%l5, %o2
+	call		data_access_exception
+	 add		%sp, PTREGS_OFF, %o0
+	ba,pt		%xcc, rtrap
+	 clr		%l6
+
+	.globl		__do_instruction_access_exception
+	.globl		__do_instruction_access_exception_tl1
+__do_instruction_access_exception_tl1:
+	rdpr		%pstate, %g4
+	wrpr		%g4, PSTATE_MG|PSTATE_AG, %pstate
+	mov		TLB_SFSR, %g3
+	mov		DMMU_SFAR, %g5
+	ldxa		[%g3] ASI_DMMU, %g4	! Get SFSR
+	ldxa		[%g5] ASI_DMMU, %g5	! Get SFAR
+	stxa		%g0, [%g3] ASI_IMMU	! Clear FaultValid bit
+	membar		#Sync
+	sethi		%hi(109f), %g7
+	ba,pt		%xcc, etraptl1
+109:	 or		%g7, %lo(109b), %g7
+	mov		%l4, %o1
+	mov		%l5, %o2
+	call		instruction_access_exception_tl1
+	 add		%sp, PTREGS_OFF, %o0
+	ba,pt		%xcc, rtrap
+	 clr		%l6
+
+__do_instruction_access_exception:
+	rdpr		%pstate, %g4
+	wrpr		%g4, PSTATE_MG|PSTATE_AG, %pstate
+	mov		TLB_SFSR, %g3
+	mov		DMMU_SFAR, %g5
+	ldxa		[%g3] ASI_DMMU, %g4	! Get SFSR
+	ldxa		[%g5] ASI_DMMU, %g5	! Get SFAR
+	stxa		%g0, [%g3] ASI_IMMU	! Clear FaultValid bit
+	membar		#Sync
+	sethi		%hi(109f), %g7
+	ba,pt		%xcc, etrap
+109:	 or		%g7, %lo(109b), %g7
+	mov		%l4, %o1
+	mov		%l5, %o2
+	call		instruction_access_exception
+	 add		%sp, PTREGS_OFF, %o0
+	ba,pt		%xcc, rtrap
+	 clr		%l6
+
+	/* This is the trap handler entry point for ECC correctable
+	 * errors.  They are corrected, but we listen for the trap
+	 * so that the event can be logged.
+	 *
+	 * Disrupting errors are either:
+	 * 1) single-bit ECC errors during UDB reads to system
+	 *    memory
+	 * 2) data parity errors during write-back events
+	 *
+	 * As far as I can make out from the manual, the CEE trap
+	 * is only for correctable errors during memory read
+	 * accesses by the front-end of the processor.
+	 *
+	 * The code below is only for trap level 1 CEE events,
+	 * as it is the only situation where we can safely record
+	 * and log.  For trap level >1 we just clear the CE bit
+	 * in the AFSR and return.
+	 */
+
+	/* Our trap handling infrastructure allows us to preserve
+	 * two 64-bit values during etrap for arguments to
+	 * subsequent C code.  Therefore we encode the information
+	 * as follows:
+	 *
+	 * value 1) Full 64-bits of AFAR
+	 * value 2) Low 33-bits of AFSR, then bits 33-->42
+	 *          are UDBL error status and bits 43-->52
+	 *          are UDBH error status
+	 */
+	.align	64
+	.globl	cee_trap
+cee_trap:
+	ldxa	[%g0] ASI_AFSR, %g1		! Read AFSR
+	ldxa	[%g0] ASI_AFAR, %g2		! Read AFAR
+	sllx	%g1, 31, %g1			! Clear reserved bits
+	srlx	%g1, 31, %g1			! in AFSR
+
+	/* NOTE: UltraSparc-I/II have high and low UDB error
+	 *       registers, corresponding to the two UDB units
+	 *       present on those chips.  UltraSparc-IIi only
+	 *       has a single UDB, called "SDB" in the manual.
+	 *       For IIi the upper UDB register always reads
+	 *       as zero so for our purposes things will just
+	 *       work with the checks below.
+	 */
+	ldxa	[%g0] ASI_UDBL_ERROR_R, %g3	! Read UDB-Low error status
+	andcc	%g3, (1 << 8), %g4		! Check CE bit
+	sllx	%g3, (64 - 10), %g3		! Clear reserved bits
+	srlx	%g3, (64 - 10), %g3		! in UDB-Low error status
+
+	sllx	%g3, (33 + 0), %g3		! Shift up to encoding area
+	or	%g1, %g3, %g1			! Or it in
+	be,pn	%xcc, 1f			! Branch if CE bit was clear
+	 nop
+	stxa	%g4, [%g0] ASI_UDB_ERROR_W	! Clear CE sticky bit in UDBL
+	membar	#Sync				! Synchronize ASI stores
+1:	mov	0x18, %g5			! Addr of UDB-High error status
+	ldxa	[%g5] ASI_UDBH_ERROR_R, %g3	! Read it
+
+	andcc	%g3, (1 << 8), %g4		! Check CE bit
+	sllx	%g3, (64 - 10), %g3		! Clear reserved bits
+	srlx	%g3, (64 - 10), %g3		! in UDB-High error status
+	sllx	%g3, (33 + 10), %g3		! Shift up to encoding area
+	or	%g1, %g3, %g1			! Or it in
+	be,pn	%xcc, 1f			! Branch if CE bit was clear
+	 nop
+	nop
+
+	stxa	%g4, [%g5] ASI_UDB_ERROR_W	! Clear CE sticky bit in UDBH
+	membar	#Sync				! Synchronize ASI stores
+1:	mov	1, %g5				! AFSR CE bit is
+	sllx	%g5, 20, %g5			! bit 20
+	stxa	%g5, [%g0] ASI_AFSR		! Clear CE sticky bit in AFSR
+	membar	#Sync				! Synchronize ASI stores
+	sllx	%g2, (64 - 41), %g2		! Clear reserved bits
+	srlx	%g2, (64 - 41), %g2		! in latched AFAR
+
+	andn	%g2, 0x0f, %g2			! Finish resv bit clearing
+	mov	%g1, %g4			! Move AFSR+UDB* into save reg
+	mov	%g2, %g5			! Move AFAR into save reg
+	rdpr	%pil, %g2
+	wrpr	%g0, 15, %pil
+	ba,pt	%xcc, etrap_irq
+	 rd	%pc, %g7
+	mov	%l4, %o0
+
+	mov	%l5, %o1
+	call	cee_log
+	 add	%sp, PTREGS_OFF, %o2
+	ba,a,pt	%xcc, rtrap_irq
+
+	/* Capture I/D/E-cache state into per-cpu error scoreboard.
+	 *
+	 * %g1:		(TL>=0) ? 1 : 0
+	 * %g2:		scratch
+	 * %g3:		scratch
+	 * %g4:		AFSR
+	 * %g5:		AFAR
+	 * %g6:		current thread ptr
+	 * %g7:		scratch
+	 */
+#define CHEETAH_LOG_ERROR						\
+	/* Put "TL1" software bit into AFSR. */				\
+	and		%g1, 0x1, %g1;					\
+	sllx		%g1, 63, %g2;					\
+	or		%g4, %g2, %g4;					\
+	/* Get log entry pointer for this cpu at this trap level. */	\
+	BRANCH_IF_JALAPENO(g2,g3,50f)					\
+	ldxa		[%g0] ASI_SAFARI_CONFIG, %g2;			\
+	srlx		%g2, 17, %g2;					\
+	ba,pt		%xcc, 60f; 					\
+	 and		%g2, 0x3ff, %g2;				\
+50:	ldxa		[%g0] ASI_JBUS_CONFIG, %g2;			\
+	srlx		%g2, 17, %g2;					\
+	and		%g2, 0x1f, %g2;					\
+60:	sllx		%g2, 9, %g2;					\
+	sethi		%hi(cheetah_error_log), %g3;			\
+	ldx		[%g3 + %lo(cheetah_error_log)], %g3;		\
+	brz,pn		%g3, 80f;					\
+	 nop;								\
+	add		%g3, %g2, %g3;					\
+	sllx		%g1, 8, %g1;					\
+	add		%g3, %g1, %g1;					\
+	/* %g1 holds pointer to the top of the logging scoreboard */	\
+	ldx		[%g1 + 0x0], %g7;				\
+	cmp		%g7, -1;					\
+	bne,pn		%xcc, 80f;					\
+	 nop;								\
+	stx		%g4, [%g1 + 0x0];				\
+	stx		%g5, [%g1 + 0x8];				\
+	add		%g1, 0x10, %g1;					\
+	/* %g1 now points to D-cache logging area */			\
+	set		0x3ff8, %g2;	/* DC_addr mask		*/	\
+	and		%g5, %g2, %g2;	/* DC_addr bits of AFAR	*/	\
+	srlx		%g5, 12, %g3;					\
+	or		%g3, 1, %g3;	/* PHYS tag + valid	*/	\
+10:	ldxa		[%g2] ASI_DCACHE_TAG, %g7;			\
+	cmp		%g3, %g7;	/* TAG match?		*/	\
+	bne,pt		%xcc, 13f;					\
+	 nop;								\
+	/* Yep, what we want, capture state. */				\
+	stx		%g2, [%g1 + 0x20];				\
+	stx		%g7, [%g1 + 0x28];				\
+	/* A membar Sync is required before and after utag access. */	\
+	membar		#Sync;						\
+	ldxa		[%g2] ASI_DCACHE_UTAG, %g7;			\
+	membar		#Sync;						\
+	stx		%g7, [%g1 + 0x30];				\
+	ldxa		[%g2] ASI_DCACHE_SNOOP_TAG, %g7;		\
+	stx		%g7, [%g1 + 0x38];				\
+	clr		%g3;						\
+12:	ldxa		[%g2 + %g3] ASI_DCACHE_DATA, %g7;		\
+	stx		%g7, [%g1];					\
+	add		%g3, (1 << 5), %g3;				\
+	cmp		%g3, (4 << 5);					\
+	bl,pt		%xcc, 12b;					\
+	 add		%g1, 0x8, %g1;					\
+	ba,pt		%xcc, 20f;					\
+	 add		%g1, 0x20, %g1;					\
+13:	sethi		%hi(1 << 14), %g7;				\
+	add		%g2, %g7, %g2;					\
+	srlx		%g2, 14, %g7;					\
+	cmp		%g7, 4;						\
+	bl,pt		%xcc, 10b;					\
+	 nop;								\
+	add		%g1, 0x40, %g1;					\
+20:	/* %g1 now points to I-cache logging area */			\
+	set		0x1fe0, %g2;	/* IC_addr mask		*/	\
+	and		%g5, %g2, %g2;	/* IC_addr bits of AFAR	*/	\
+	sllx		%g2, 1, %g2;	/* IC_addr[13:6]==VA[12:5] */	\
+	srlx		%g5, (13 - 8), %g3; /* Make PTAG */		\
+	andn		%g3, 0xff, %g3;	/* Mask off undefined bits */	\
+21:	ldxa		[%g2] ASI_IC_TAG, %g7;				\
+	andn		%g7, 0xff, %g7;					\
+	cmp		%g3, %g7;					\
+	bne,pt		%xcc, 23f;					\
+	 nop;								\
+	/* Yep, what we want, capture state. */				\
+	stx		%g2, [%g1 + 0x40];				\
+	stx		%g7, [%g1 + 0x48];				\
+	add		%g2, (1 << 3), %g2;				\
+	ldxa		[%g2] ASI_IC_TAG, %g7;				\
+	add		%g2, (1 << 3), %g2;				\
+	stx		%g7, [%g1 + 0x50];				\
+	ldxa		[%g2] ASI_IC_TAG, %g7;				\
+	add		%g2, (1 << 3), %g2;				\
+	stx		%g7, [%g1 + 0x60];				\
+	ldxa		[%g2] ASI_IC_TAG, %g7;				\
+	stx		%g7, [%g1 + 0x68];				\
+	sub		%g2, (3 << 3), %g2;				\
+	ldxa		[%g2] ASI_IC_STAG, %g7;				\
+	stx		%g7, [%g1 + 0x58];				\
+	clr		%g3;						\
+	srlx		%g2, 2, %g2;					\
+22:	ldxa		[%g2 + %g3] ASI_IC_INSTR, %g7;			\
+	stx		%g7, [%g1];					\
+	add		%g3, (1 << 3), %g3;				\
+	cmp		%g3, (8 << 3);					\
+	bl,pt		%xcc, 22b;					\
+	 add		%g1, 0x8, %g1;					\
+	ba,pt		%xcc, 30f;					\
+	 add		%g1, 0x30, %g1;					\
+23:	sethi		%hi(1 << 14), %g7;				\
+	add		%g2, %g7, %g2;					\
+	srlx		%g2, 14, %g7;					\
+	cmp		%g7, 4;						\
+	bl,pt		%xcc, 21b;					\
+	 nop;								\
+	add		%g1, 0x70, %g1;					\
+30:	/* %g1 now points to E-cache logging area */			\
+	andn		%g5, (32 - 1), %g2;	/* E-cache subblock */	\
+	stx		%g2, [%g1 + 0x20];				\
+	ldxa		[%g2] ASI_EC_TAG_DATA, %g7;			\
+	stx		%g7, [%g1 + 0x28];				\
+	ldxa		[%g2] ASI_EC_R, %g0;				\
+	clr		%g3;						\
+31:	ldxa		[%g3] ASI_EC_DATA, %g7;				\
+	stx		%g7, [%g1 + %g3];				\
+	add		%g3, 0x8, %g3;					\
+	cmp		%g3, 0x20;					\
+	bl,pt		%xcc, 31b;					\
+	 nop;								\
+80:	/* DONE */
+
+	/* These get patched into the trap table at boot time
+	 * once we know we have a cheetah processor.
+	 */
+	.globl		cheetah_fecc_trap_vector, cheetah_fecc_trap_vector_tl1
+cheetah_fecc_trap_vector:
+	membar		#Sync
+	ldxa		[%g0] ASI_DCU_CONTROL_REG, %g1
+	andn		%g1, DCU_DC | DCU_IC, %g1
+	stxa		%g1, [%g0] ASI_DCU_CONTROL_REG
+	membar		#Sync
+	sethi		%hi(cheetah_fast_ecc), %g2
+	jmpl		%g2 + %lo(cheetah_fast_ecc), %g0
+	 mov		0, %g1
+cheetah_fecc_trap_vector_tl1:
+	membar		#Sync
+	ldxa		[%g0] ASI_DCU_CONTROL_REG, %g1
+	andn		%g1, DCU_DC | DCU_IC, %g1
+	stxa		%g1, [%g0] ASI_DCU_CONTROL_REG
+	membar		#Sync
+	sethi		%hi(cheetah_fast_ecc), %g2
+	jmpl		%g2 + %lo(cheetah_fast_ecc), %g0
+	 mov		1, %g1
+	.globl	cheetah_cee_trap_vector, cheetah_cee_trap_vector_tl1
+cheetah_cee_trap_vector:
+	membar		#Sync
+	ldxa		[%g0] ASI_DCU_CONTROL_REG, %g1
+	andn		%g1, DCU_IC, %g1
+	stxa		%g1, [%g0] ASI_DCU_CONTROL_REG
+	membar		#Sync
+	sethi		%hi(cheetah_cee), %g2
+	jmpl		%g2 + %lo(cheetah_cee), %g0
+	 mov		0, %g1
+cheetah_cee_trap_vector_tl1:
+	membar		#Sync
+	ldxa		[%g0] ASI_DCU_CONTROL_REG, %g1
+	andn		%g1, DCU_IC, %g1
+	stxa		%g1, [%g0] ASI_DCU_CONTROL_REG
+	membar		#Sync
+	sethi		%hi(cheetah_cee), %g2
+	jmpl		%g2 + %lo(cheetah_cee), %g0
+	 mov		1, %g1
+	.globl	cheetah_deferred_trap_vector, cheetah_deferred_trap_vector_tl1
+cheetah_deferred_trap_vector:
+	membar		#Sync
+	ldxa		[%g0] ASI_DCU_CONTROL_REG, %g1;
+	andn		%g1, DCU_DC | DCU_IC, %g1;
+	stxa		%g1, [%g0] ASI_DCU_CONTROL_REG;
+	membar		#Sync;
+	sethi		%hi(cheetah_deferred_trap), %g2
+	jmpl		%g2 + %lo(cheetah_deferred_trap), %g0
+	 mov		0, %g1
+cheetah_deferred_trap_vector_tl1:
+	membar		#Sync;
+	ldxa		[%g0] ASI_DCU_CONTROL_REG, %g1;
+	andn		%g1, DCU_DC | DCU_IC, %g1;
+	stxa		%g1, [%g0] ASI_DCU_CONTROL_REG;
+	membar		#Sync;
+	sethi		%hi(cheetah_deferred_trap), %g2
+	jmpl		%g2 + %lo(cheetah_deferred_trap), %g0
+	 mov		1, %g1
+
+	/* Cheetah+ specific traps. These are for the new I/D cache parity
+	 * error traps.  The first argument to cheetah_plus_parity_handler
+	 * is encoded as follows:
+	 *
+	 * Bit0:	0=dcache,1=icache
+	 * Bit1:	0=recoverable,1=unrecoverable
+	 */
+	.globl		cheetah_plus_dcpe_trap_vector, cheetah_plus_dcpe_trap_vector_tl1
+cheetah_plus_dcpe_trap_vector:
+	membar		#Sync
+	sethi		%hi(do_cheetah_plus_data_parity), %g7
+	jmpl		%g7 + %lo(do_cheetah_plus_data_parity), %g0
+	 nop
+	nop
+	nop
+	nop
+	nop
+
+do_cheetah_plus_data_parity:
+	ba,pt		%xcc, etrap
+	 rd		%pc, %g7
+	mov		0x0, %o0
+	call		cheetah_plus_parity_error
+	 add		%sp, PTREGS_OFF, %o1
+	ba,pt		%xcc, rtrap
+	 clr		%l6
+
+cheetah_plus_dcpe_trap_vector_tl1:
+	membar		#Sync
+	wrpr		PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
+	sethi		%hi(do_dcpe_tl1), %g3
+	jmpl		%g3 + %lo(do_dcpe_tl1), %g0
+	 nop
+	nop
+	nop
+	nop
+
+	.globl		cheetah_plus_icpe_trap_vector, cheetah_plus_icpe_trap_vector_tl1
+cheetah_plus_icpe_trap_vector:
+	membar		#Sync
+	sethi		%hi(do_cheetah_plus_insn_parity), %g7
+	jmpl		%g7 + %lo(do_cheetah_plus_insn_parity), %g0
+	 nop
+	nop
+	nop
+	nop
+	nop
+
+do_cheetah_plus_insn_parity:
+	ba,pt		%xcc, etrap
+	 rd		%pc, %g7
+	mov		0x1, %o0
+	call		cheetah_plus_parity_error
+	 add		%sp, PTREGS_OFF, %o1
+	ba,pt		%xcc, rtrap
+	 clr		%l6
+
+cheetah_plus_icpe_trap_vector_tl1:
+	membar		#Sync
+	wrpr		PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
+	sethi		%hi(do_icpe_tl1), %g3
+	jmpl		%g3 + %lo(do_icpe_tl1), %g0
+	 nop
+	nop
+	nop
+	nop
+
+	/* If we take one of these traps when tl >= 1, then we
+	 * jump to interrupt globals.  If some trap level above us
+	 * was also using interrupt globals, we cannot recover.
+	 * We may use all interrupt global registers except %g6.
+	 */
+	.globl		do_dcpe_tl1, do_icpe_tl1
+do_dcpe_tl1:
+	rdpr		%tl, %g1		! Save original trap level
+	mov		1, %g2			! Setup TSTATE checking loop
+	sethi		%hi(TSTATE_IG), %g3	! TSTATE mask bit
+1:	wrpr		%g2, %tl		! Set trap level to check
+	rdpr		%tstate, %g4		! Read TSTATE for this level
+	andcc		%g4, %g3, %g0		! Interrupt globals in use?
+	bne,a,pn	%xcc, do_dcpe_tl1_fatal	! Yep, irrecoverable
+	 wrpr		%g1, %tl		! Restore original trap level
+	add		%g2, 1, %g2		! Next trap level
+	cmp		%g2, %g1		! Hit them all yet?
+	ble,pt		%icc, 1b		! Not yet
+	 nop
+	wrpr		%g1, %tl		! Restore original trap level
+do_dcpe_tl1_nonfatal:	/* Ok we may use interrupt globals safely. */
+	/* Reset D-cache parity */
+	sethi		%hi(1 << 16), %g1	! D-cache size
+	mov		(1 << 5), %g2		! D-cache line size
+	sub		%g1, %g2, %g1		! Move down 1 cacheline
+1:	srl		%g1, 14, %g3		! Compute UTAG
+	membar		#Sync
+	stxa		%g3, [%g1] ASI_DCACHE_UTAG
+	membar		#Sync
+	sub		%g2, 8, %g3		! 64-bit data word within line
+2:	membar		#Sync
+	stxa		%g0, [%g1 + %g3] ASI_DCACHE_DATA
+	membar		#Sync
+	subcc		%g3, 8, %g3		! Next 64-bit data word
+	bge,pt		%icc, 2b
+	 nop
+	subcc		%g1, %g2, %g1		! Next cacheline
+	bge,pt		%icc, 1b
+	 nop
+	ba,pt		%xcc, dcpe_icpe_tl1_common
+	 nop
+
+do_dcpe_tl1_fatal:
+	sethi		%hi(1f), %g7
+	ba,pt		%xcc, etraptl1
+1:	or		%g7, %lo(1b), %g7
+	mov		0x2, %o0
+	call		cheetah_plus_parity_error
+	 add		%sp, PTREGS_OFF, %o1
+	ba,pt		%xcc, rtrap
+	 clr		%l6
+
+do_icpe_tl1:
+	rdpr		%tl, %g1		! Save original trap level
+	mov		1, %g2			! Setup TSTATE checking loop
+	sethi		%hi(TSTATE_IG), %g3	! TSTATE mask bit
+1:	wrpr		%g2, %tl		! Set trap level to check
+	rdpr		%tstate, %g4		! Read TSTATE for this level
+	andcc		%g4, %g3, %g0		! Interrupt globals in use?
+	bne,a,pn	%xcc, do_icpe_tl1_fatal	! Yep, irrecoverable
+	 wrpr		%g1, %tl		! Restore original trap level
+	add		%g2, 1, %g2		! Next trap level
+	cmp		%g2, %g1		! Hit them all yet?
+	ble,pt		%icc, 1b		! Not yet
+	 nop
+	wrpr		%g1, %tl		! Restore original trap level
+do_icpe_tl1_nonfatal:	/* Ok we may use interrupt globals safely. */
+	/* Flush I-cache */
+	sethi		%hi(1 << 15), %g1	! I-cache size
+	mov		(1 << 5), %g2		! I-cache line size
+	sub		%g1, %g2, %g1
+1:	or		%g1, (2 << 3), %g3
+	stxa		%g0, [%g3] ASI_IC_TAG
+	membar		#Sync
+	subcc		%g1, %g2, %g1
+	bge,pt		%icc, 1b
+	 nop
+	ba,pt		%xcc, dcpe_icpe_tl1_common
+	 nop
+
+do_icpe_tl1_fatal:
+	sethi		%hi(1f), %g7
+	ba,pt		%xcc, etraptl1
+1:	or		%g7, %lo(1b), %g7
+	mov		0x3, %o0
+	call		cheetah_plus_parity_error
+	 add		%sp, PTREGS_OFF, %o1
+	ba,pt		%xcc, rtrap
+	 clr		%l6
+	
+dcpe_icpe_tl1_common:
+	/* Flush D-cache, re-enable D/I caches in DCU and finally
+	 * retry the trapping instruction.
+	 */
+	sethi		%hi(1 << 16), %g1	! D-cache size
+	mov		(1 << 5), %g2		! D-cache line size
+	sub		%g1, %g2, %g1
+1:	stxa		%g0, [%g1] ASI_DCACHE_TAG
+	membar		#Sync
+	subcc		%g1, %g2, %g1
+	bge,pt		%icc, 1b
+	 nop
+	ldxa		[%g0] ASI_DCU_CONTROL_REG, %g1
+	or		%g1, (DCU_DC | DCU_IC), %g1
+	stxa		%g1, [%g0] ASI_DCU_CONTROL_REG
+	membar		#Sync
+	retry
+
+	/* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
+	 * in the trap table.  That code has done a memory barrier
+	 * and has disabled both the I-cache and D-cache in the DCU
+	 * control register.  The I-cache is disabled so that we may
+	 * capture the corrupted cache line, and the D-cache is disabled
+	 * because corrupt data may have been placed there and we don't
+	 * want to reference it.
+	 *
+	 * %g1 is one if this trap occurred at %tl >= 1.
+	 *
+	 * Next, we turn off error reporting so that we don't recurse.
+	 */
+	.globl		cheetah_fast_ecc
+cheetah_fast_ecc:
+	ldxa		[%g0] ASI_ESTATE_ERROR_EN, %g2
+	andn		%g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
+	stxa		%g2, [%g0] ASI_ESTATE_ERROR_EN
+	membar		#Sync
+
+	/* Fetch and clear AFSR/AFAR */
+	ldxa		[%g0] ASI_AFSR, %g4
+	ldxa		[%g0] ASI_AFAR, %g5
+	stxa		%g4, [%g0] ASI_AFSR
+	membar		#Sync
+
+	CHEETAH_LOG_ERROR
+
+	rdpr		%pil, %g2
+	wrpr		%g0, 15, %pil
+	ba,pt		%xcc, etrap_irq
+	 rd		%pc, %g7
+	mov		%l4, %o1
+	mov		%l5, %o2
+	call		cheetah_fecc_handler
+	 add		%sp, PTREGS_OFF, %o0
+	ba,a,pt		%xcc, rtrap_irq
+
+	/* Our caller has disabled I-cache and performed membar Sync. */
+	.globl		cheetah_cee
+cheetah_cee:
+	ldxa		[%g0] ASI_ESTATE_ERROR_EN, %g2
+	andn		%g2, ESTATE_ERROR_CEEN, %g2
+	stxa		%g2, [%g0] ASI_ESTATE_ERROR_EN
+	membar		#Sync
+
+	/* Fetch and clear AFSR/AFAR */
+	ldxa		[%g0] ASI_AFSR, %g4
+	ldxa		[%g0] ASI_AFAR, %g5
+	stxa		%g4, [%g0] ASI_AFSR
+	membar		#Sync
+
+	CHEETAH_LOG_ERROR
+
+	rdpr		%pil, %g2
+	wrpr		%g0, 15, %pil
+	ba,pt		%xcc, etrap_irq
+	 rd		%pc, %g7
+	mov		%l4, %o1
+	mov		%l5, %o2
+	call		cheetah_cee_handler
+	 add		%sp, PTREGS_OFF, %o0
+	ba,a,pt		%xcc, rtrap_irq
+
+	/* Our caller has disabled I-cache+D-cache and performed membar Sync. */
+	.globl		cheetah_deferred_trap
+cheetah_deferred_trap:
+	ldxa		[%g0] ASI_ESTATE_ERROR_EN, %g2
+	andn		%g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
+	stxa		%g2, [%g0] ASI_ESTATE_ERROR_EN
+	membar		#Sync
+
+	/* Fetch and clear AFSR/AFAR */
+	ldxa		[%g0] ASI_AFSR, %g4
+	ldxa		[%g0] ASI_AFAR, %g5
+	stxa		%g4, [%g0] ASI_AFSR
+	membar		#Sync
+
+	CHEETAH_LOG_ERROR
+
+	rdpr		%pil, %g2
+	wrpr		%g0, 15, %pil
+	ba,pt		%xcc, etrap_irq
+	 rd		%pc, %g7
+	mov		%l4, %o1
+	mov		%l5, %o2
+	call		cheetah_deferred_handler
+	 add		%sp, PTREGS_OFF, %o0
+	ba,a,pt		%xcc, rtrap_irq
+
+	.globl		__do_privact
+__do_privact:
+	mov		TLB_SFSR, %g3
+	stxa		%g0, [%g3] ASI_DMMU	! Clear FaultValid bit
+	membar		#Sync
+	sethi		%hi(109f), %g7
+	ba,pt		%xcc, etrap
+109:	or		%g7, %lo(109b), %g7
+	call		do_privact
+	 add		%sp, PTREGS_OFF, %o0
+	ba,pt		%xcc, rtrap
+	 clr		%l6
+
+	.globl		do_mna
+do_mna:
+	rdpr		%tl, %g3
+	cmp		%g3, 1
+
+	/* Setup %g4/%g5 now as they are used in the
+	 * winfixup code.
+	 */
+	mov		TLB_SFSR, %g3
+	mov		DMMU_SFAR, %g4
+	ldxa		[%g4] ASI_DMMU, %g4
+	ldxa		[%g3] ASI_DMMU, %g5
+	stxa		%g0, [%g3] ASI_DMMU	! Clear FaultValid bit
+	membar		#Sync
+	bgu,pn		%icc, winfix_mna
+	 rdpr		%tpc, %g3
+
+1:	sethi		%hi(109f), %g7
+	ba,pt		%xcc, etrap
+109:	 or		%g7, %lo(109b), %g7
+	mov		%l4, %o1
+	mov		%l5, %o2
+	call		mem_address_unaligned
+	 add		%sp, PTREGS_OFF, %o0
+	ba,pt		%xcc, rtrap
+	 clr		%l6
+
+	.globl		do_lddfmna
+do_lddfmna:
+	sethi		%hi(109f), %g7
+	mov		TLB_SFSR, %g4
+	ldxa		[%g4] ASI_DMMU, %g5
+	stxa		%g0, [%g4] ASI_DMMU	! Clear FaultValid bit
+	membar		#Sync
+	mov		DMMU_SFAR, %g4
+	ldxa		[%g4] ASI_DMMU, %g4
+	ba,pt		%xcc, etrap
+109:	 or		%g7, %lo(109b), %g7
+	mov		%l4, %o1
+	mov		%l5, %o2
+	call		handle_lddfmna
+	 add		%sp, PTREGS_OFF, %o0
+	ba,pt		%xcc, rtrap
+	 clr		%l6
+
+	.globl		do_stdfmna
+do_stdfmna:
+	sethi		%hi(109f), %g7
+	mov		TLB_SFSR, %g4
+	ldxa		[%g4] ASI_DMMU, %g5
+	stxa		%g0, [%g4] ASI_DMMU	! Clear FaultValid bit
+	membar		#Sync
+	mov		DMMU_SFAR, %g4
+	ldxa		[%g4] ASI_DMMU, %g4
+	ba,pt		%xcc, etrap
+109:	 or		%g7, %lo(109b), %g7
+	mov		%l4, %o1
+	mov		%l5, %o2
+	call		handle_stdfmna
+	 add		%sp, PTREGS_OFF, %o0
+	ba,pt		%xcc, rtrap
+	 clr		%l6
+
+	.globl	breakpoint_trap
+breakpoint_trap:
+	call		sparc_breakpoint
+	 add		%sp, PTREGS_OFF, %o0
+	ba,pt		%xcc, rtrap
+	 nop
+
+#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
+    defined(CONFIG_SOLARIS_EMUL_MODULE)
+	/* SunOS uses syscall zero as the 'indirect syscall' it looks
+	 * like indir_syscall(scall_num, arg0, arg1, arg2...);  etc.
+	 * This is complete brain damage.
+	 */
+	.globl	sunos_indir
+sunos_indir:
+	srl		%o0, 0, %o0
+	mov		%o7, %l4
+	cmp		%o0, NR_SYSCALLS
+	blu,a,pt	%icc, 1f
+	 sll		%o0, 0x2, %o0
+	sethi		%hi(sunos_nosys), %l6
+	b,pt		%xcc, 2f
+	 or		%l6, %lo(sunos_nosys), %l6
+1:	sethi		%hi(sunos_sys_table), %l7
+	or		%l7, %lo(sunos_sys_table), %l7
+	lduw		[%l7 + %o0], %l6
+2:	mov		%o1, %o0
+	mov		%o2, %o1
+	mov		%o3, %o2
+	mov		%o4, %o3
+	mov		%o5, %o4
+	call		%l6
+	 mov		%l4, %o7
+
+	.globl	sunos_getpid
+sunos_getpid:
+	call	sys_getppid
+	 nop
+	call	sys_getpid
+	 stx	%o0, [%sp + PTREGS_OFF + PT_V9_I1]
+	b,pt	%xcc, ret_sys_call
+	 stx	%o0, [%sp + PTREGS_OFF + PT_V9_I0]
+
+	/* SunOS getuid() returns uid in %o0 and euid in %o1 */
+	.globl	sunos_getuid
+sunos_getuid:
+	call	sys32_geteuid16
+	 nop
+	call	sys32_getuid16
+	 stx	%o0, [%sp + PTREGS_OFF + PT_V9_I1]
+	b,pt	%xcc, ret_sys_call
+	 stx	%o0, [%sp + PTREGS_OFF + PT_V9_I0]
+
+	/* SunOS getgid() returns gid in %o0 and egid in %o1 */
+	.globl	sunos_getgid
+sunos_getgid:
+	call	sys32_getegid16
+	 nop
+	call	sys32_getgid16
+	 stx	%o0, [%sp + PTREGS_OFF + PT_V9_I1]
+	b,pt	%xcc, ret_sys_call
+	 stx	%o0, [%sp + PTREGS_OFF + PT_V9_I0]
+#endif
+
+	/* SunOS's execv() call only specifies the argv argument, the
+	 * environment settings are the same as the calling processes.
+	 */
+	.globl	sunos_execv
+sys_execve:
+	sethi		%hi(sparc_execve), %g1
+	ba,pt		%xcc, execve_merge
+	 or		%g1, %lo(sparc_execve), %g1
+#ifdef CONFIG_COMPAT
+	.globl	sys_execve
+sunos_execv:
+	stx		%g0, [%sp + PTREGS_OFF + PT_V9_I2]
+	.globl	sys32_execve
+sys32_execve:
+	sethi		%hi(sparc32_execve), %g1
+	or		%g1, %lo(sparc32_execve), %g1
+#endif
+execve_merge:
+	flushw
+	jmpl		%g1, %g0
+	 add		%sp, PTREGS_OFF, %o0
+
+	.globl	sys_pipe, sys_sigpause, sys_nis_syscall
+	.globl	sys_sigsuspend, sys_rt_sigsuspend
+	.globl	sys_rt_sigreturn
+	.globl	sys_ptrace
+	.globl	sys_sigaltstack
+	.align	32
+sys_pipe:	ba,pt		%xcc, sparc_pipe
+		 add		%sp, PTREGS_OFF, %o0
+sys_nis_syscall:ba,pt		%xcc, c_sys_nis_syscall
+		 add		%sp, PTREGS_OFF, %o0
+sys_memory_ordering:
+		ba,pt		%xcc, sparc_memory_ordering
+		 add		%sp, PTREGS_OFF, %o1
+sys_sigaltstack:ba,pt		%xcc, do_sigaltstack
+		 add		%i6, STACK_BIAS, %o2
+#ifdef CONFIG_COMPAT
+	.globl	sys32_sigstack
+sys32_sigstack:	ba,pt		%xcc, do_sys32_sigstack
+		 mov		%i6, %o2
+	.globl	sys32_sigaltstack
+sys32_sigaltstack:
+		ba,pt		%xcc, do_sys32_sigaltstack
+		 mov		%i6, %o2
+#endif
+		.align		32
+sys_sigsuspend:	add		%sp, PTREGS_OFF, %o0
+		call		do_sigsuspend
+		 add		%o7, 1f-.-4, %o7
+		nop
+sys_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
+		add		%sp, PTREGS_OFF, %o2
+		call		do_rt_sigsuspend
+		 add		%o7, 1f-.-4, %o7
+		nop
+#ifdef CONFIG_COMPAT
+	.globl	sys32_rt_sigsuspend
+sys32_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
+		srl		%o0, 0, %o0
+		add		%sp, PTREGS_OFF, %o2
+		call		do_rt_sigsuspend32
+		 add		%o7, 1f-.-4, %o7
+#endif
+		/* NOTE: %o0 has a correct value already */
+sys_sigpause:	add		%sp, PTREGS_OFF, %o1
+		call		do_sigpause
+		 add		%o7, 1f-.-4, %o7
+		nop
+#ifdef CONFIG_COMPAT
+	.globl	sys32_sigreturn
+sys32_sigreturn:
+		add		%sp, PTREGS_OFF, %o0
+		call		do_sigreturn32
+		 add		%o7, 1f-.-4, %o7
+		nop
+#endif
+sys_rt_sigreturn:
+		add		%sp, PTREGS_OFF, %o0
+		call		do_rt_sigreturn
+		 add		%o7, 1f-.-4, %o7
+		nop
+#ifdef CONFIG_COMPAT
+	.globl	sys32_rt_sigreturn
+sys32_rt_sigreturn:
+		add		%sp, PTREGS_OFF, %o0
+		call		do_rt_sigreturn32
+		 add		%o7, 1f-.-4, %o7
+		nop
+#endif
+sys_ptrace:	add		%sp, PTREGS_OFF, %o0
+		call		do_ptrace
+		 add		%o7, 1f-.-4, %o7
+		nop
+		.align		32
+1:		ldx		[%curptr + TI_FLAGS], %l5
+		andcc		%l5, _TIF_SYSCALL_TRACE, %g0
+		be,pt		%icc, rtrap
+		 clr		%l6
+		call		syscall_trace
+		 nop
+
+		ba,pt		%xcc, rtrap
+		 clr		%l6
+
+	/* This is how fork() was meant to be done, 8 instruction entry.
+	 *
+	 * I questioned the following code briefly, let me clear things
+	 * up so you must not reason on it like I did.
+	 *
+	 * Know the fork_kpsr etc. we use in the sparc32 port?  We don't
+	 * need it here because the only piece of window state we copy to
+	 * the child is the CWP register.  Even if the parent sleeps,
+	 * we are safe because we stuck it into pt_regs of the parent
+	 * so it will not change.
+	 *
+	 * XXX This raises the question, whether we can do the same on
+	 * XXX sparc32 to get rid of fork_kpsr _and_ fork_kwim.  The
+	 * XXX answer is yes.  We stick fork_kpsr in UREG_G0 and
+	 * XXX fork_kwim in UREG_G1 (global registers are considered
+	 * XXX volatile across a system call in the sparc ABI I think
+	 * XXX if it isn't we can use regs->y instead, anyone who depends
+	 * XXX upon the Y register being preserved across a fork deserves
+	 * XXX to lose).
+	 *
+	 * In fact we should take advantage of that fact for other things
+	 * during system calls...
+	 */
+	.globl	sys_fork, sys_vfork, sys_clone, sparc_exit
+	.globl	ret_from_syscall
+	.align	32
+sys_vfork:	/* Under Linux, vfork and fork are just special cases of clone. */
+		sethi		%hi(0x4000 | 0x0100 | SIGCHLD), %o0
+		or		%o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
+		ba,pt		%xcc, sys_clone
+sys_fork:	 clr		%o1
+		mov		SIGCHLD, %o0
+sys_clone:	flushw
+		movrz		%o1, %fp, %o1
+		mov		0, %o3
+		ba,pt		%xcc, sparc_do_fork
+		 add		%sp, PTREGS_OFF, %o2
+ret_from_syscall:
+		/* Clear SPARC_FLAG_NEWCHILD, switch_to leaves thread.flags in
+		 * %o7 for us.  Check performance counter stuff too.
+		 */
+		andn		%o7, _TIF_NEWCHILD, %l0
+		stx		%l0, [%g6 + TI_FLAGS]
+		call		schedule_tail
+		 mov		%g7, %o0
+		andcc		%l0, _TIF_PERFCTR, %g0
+		be,pt		%icc, 1f
+		 nop
+		ldx		[%g6 + TI_PCR], %o7
+		wr		%g0, %o7, %pcr
+
+		/* Blackbird errata workaround.  See commentary in
+		 * smp.c:smp_percpu_timer_interrupt() for more
+		 * information.
+		 */
+		ba,pt		%xcc, 99f
+		 nop
+		.align		64
+99:		wr		%g0, %g0, %pic
+		rd		%pic, %g0
+
+1:		b,pt		%xcc, ret_sys_call
+		 ldx		[%sp + PTREGS_OFF + PT_V9_I0], %o0
+sparc_exit:	wrpr		%g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV), %pstate
+		rdpr		%otherwin, %g1
+		rdpr		%cansave, %g3
+		add		%g3, %g1, %g3
+		wrpr		%g3, 0x0, %cansave
+		wrpr		%g0, 0x0, %otherwin
+		wrpr		%g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE), %pstate
+		ba,pt		%xcc, sys_exit
+		 stb		%g0, [%g6 + TI_WSAVED]
+
+linux_sparc_ni_syscall:
+	sethi		%hi(sys_ni_syscall), %l7
+	b,pt		%xcc, 4f
+	 or		%l7, %lo(sys_ni_syscall), %l7
+
+linux_syscall_trace32:
+	call		syscall_trace
+	 nop
+	srl		%i0, 0, %o0
+	mov		%i4, %o4
+	srl		%i1, 0, %o1
+	srl		%i2, 0, %o2
+	b,pt		%xcc, 2f
+	 srl		%i3, 0, %o3
+
+linux_syscall_trace:
+	call		syscall_trace
+	 nop
+	mov		%i0, %o0
+	mov		%i1, %o1
+	mov		%i2, %o2
+	mov		%i3, %o3
+	b,pt		%xcc, 2f
+	 mov		%i4, %o4
+
+
+	/* Linux 32-bit and SunOS system calls enter here... */
+	.align	32
+	.globl	linux_sparc_syscall32
+linux_sparc_syscall32:
+	/* Direct access to user regs, much faster. */
+	cmp		%g1, NR_SYSCALLS			! IEU1	Group
+	bgeu,pn		%xcc, linux_sparc_ni_syscall		! CTI
+	 srl		%i0, 0, %o0				! IEU0
+	sll		%g1, 2, %l4				! IEU0	Group
+#ifdef SYSCALL_TRACING
+	call		syscall_trace_entry
+	 add		%sp, PTREGS_OFF, %o0
+	srl		%i0, 0, %o0
+#endif
+	srl		%i4, 0, %o4				! IEU1
+	lduw		[%l7 + %l4], %l7			! Load
+	srl		%i1, 0, %o1				! IEU0	Group
+	ldx		[%curptr + TI_FLAGS], %l0		! Load
+
+	srl		%i5, 0, %o5				! IEU1
+	srl		%i2, 0, %o2				! IEU0	Group
+	andcc		%l0, _TIF_SYSCALL_TRACE, %g0		! IEU0	Group
+	bne,pn		%icc, linux_syscall_trace32		! CTI
+	 mov		%i0, %l5				! IEU1
+	call		%l7					! CTI	Group brk forced
+	 srl		%i3, 0, %o3				! IEU0
+	ba,a,pt		%xcc, 3f
+
+	/* Linux native and SunOS system calls enter here... */
+	.align	32
+	.globl	linux_sparc_syscall, ret_sys_call
+linux_sparc_syscall:
+	/* Direct access to user regs, much faster. */
+	cmp		%g1, NR_SYSCALLS			! IEU1	Group
+	bgeu,pn		%xcc, linux_sparc_ni_syscall		! CTI
+	 mov		%i0, %o0				! IEU0
+	sll		%g1, 2, %l4				! IEU0	Group
+#ifdef SYSCALL_TRACING
+	call		syscall_trace_entry
+	 add		%sp, PTREGS_OFF, %o0
+	mov		%i0, %o0
+#endif
+	mov		%i1, %o1				! IEU1
+	lduw		[%l7 + %l4], %l7			! Load
+4:	mov		%i2, %o2				! IEU0	Group
+	ldx		[%curptr + TI_FLAGS], %l0		! Load
+
+	mov		%i3, %o3				! IEU1
+	mov		%i4, %o4				! IEU0	Group
+	andcc		%l0, _TIF_SYSCALL_TRACE, %g0		! IEU1	Group+1 bubble
+	bne,pn		%icc, linux_syscall_trace		! CTI	Group
+	 mov		%i0, %l5				! IEU0
+2:	call		%l7					! CTI	Group brk forced
+	 mov		%i5, %o5				! IEU0
+	nop
+
+3:	stx		%o0, [%sp + PTREGS_OFF + PT_V9_I0]
+ret_sys_call:
+#ifdef SYSCALL_TRACING
+	mov		%o0, %o1
+	call		syscall_trace_exit
+	 add		%sp, PTREGS_OFF, %o0
+	mov		%o1, %o0
+#endif
+	ldx		[%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
+	ldx		[%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
+	sra		%o0, 0, %o0
+	mov		%ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
+	sllx		%g2, 32, %g2
+
+	/* Check if force_successful_syscall_return()
+	 * was invoked.
+	 */
+	ldx		[%curptr + TI_FLAGS], %l0
+	andcc		%l0, _TIF_SYSCALL_SUCCESS, %g0
+	be,pt		%icc, 1f
+	 andn		%l0, _TIF_SYSCALL_SUCCESS, %l0
+	ba,pt		%xcc, 80f
+	 stx		%l0, [%curptr + TI_FLAGS]
+
+1:
+	cmp		%o0, -ERESTART_RESTARTBLOCK
+	bgeu,pn		%xcc, 1f
+	 andcc		%l0, _TIF_SYSCALL_TRACE, %l6	
+80:
+	/* System call success, clear Carry condition code. */
+	andn		%g3, %g2, %g3
+	stx		%g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]	
+	bne,pn		%icc, linux_syscall_trace2
+	 add		%l1, 0x4, %l2			! npc = npc+4
+	stx		%l1, [%sp + PTREGS_OFF + PT_V9_TPC]
+	ba,pt		%xcc, rtrap_clr_l6
+	 stx		%l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
+
+1:
+	/* System call failure, set Carry condition code.
+	 * Also, get abs(errno) to return to the process.
+	 */
+	andcc		%l0, _TIF_SYSCALL_TRACE, %l6	
+	sub		%g0, %o0, %o0
+	or		%g3, %g2, %g3
+	stx		%o0, [%sp + PTREGS_OFF + PT_V9_I0]
+	mov		1, %l6
+	stx		%g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
+	bne,pn		%icc, linux_syscall_trace2
+	 add		%l1, 0x4, %l2			! npc = npc+4
+	stx		%l1, [%sp + PTREGS_OFF + PT_V9_TPC]
+
+	b,pt		%xcc, rtrap
+	 stx		%l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
+linux_syscall_trace2:
+	call		syscall_trace
+	 nop
+	stx		%l1, [%sp + PTREGS_OFF + PT_V9_TPC]
+	ba,pt		%xcc, rtrap
+	 stx		%l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
+
+	.align		32
+	.globl		__flushw_user
+__flushw_user:
+	rdpr		%otherwin, %g1
+	brz,pn		%g1, 2f
+	 clr		%g2
+1:	save		%sp, -128, %sp
+	rdpr		%otherwin, %g1
+	brnz,pt		%g1, 1b
+	 add		%g2, 1, %g2
+1:	sub		%g2, 1, %g2
+	brnz,pt		%g2, 1b
+	 restore	%g0, %g0, %g0
+2:	retl
+	 nop
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S
new file mode 100644
index 0000000..50d2af1
--- /dev/null
+++ b/arch/sparc64/kernel/etrap.S
@@ -0,0 +1,301 @@
+/* $Id: etrap.S,v 1.46 2002/02/09 19:49:30 davem Exp $
+ * etrap.S: Preparing for entry into the kernel on Sparc V9.
+ *
+ * Copyright (C) 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#include <linux/config.h>
+
+#include <asm/asi.h>
+#include <asm/pstate.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+#include <asm/spitfire.h>
+#include <asm/head.h>
+#include <asm/processor.h>
+#include <asm/mmu.h>
+
+#define		TASK_REGOFF		(THREAD_SIZE-TRACEREG_SZ-STACKFRAME_SZ)
+#define		ETRAP_PSTATE1		(PSTATE_RMO | PSTATE_PRIV)
+#define		ETRAP_PSTATE2		\
+		(PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE)
+
+/*
+ * On entry, %g7 is return address - 0x4.
+ * %g4 and %g5 will be preserved %l4 and %l5 respectively.
+ */
+
+		.text		
+		.align	64
+		.globl	etrap, etrap_irq, etraptl1
+etrap:		rdpr	%pil, %g2
+etrap_irq:
+		rdpr	%tstate, %g1
+		sllx	%g2, 20, %g3
+		andcc	%g1, TSTATE_PRIV, %g0
+		or	%g1, %g3, %g1
+		bne,pn	%xcc, 1f
+		 sub	%sp, STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS, %g2
+		wrpr	%g0, 7, %cleanwin
+
+		sethi	%hi(TASK_REGOFF), %g2
+		sethi	%hi(TSTATE_PEF), %g3
+		or	%g2, %lo(TASK_REGOFF), %g2
+		and	%g1, %g3, %g3
+		brnz,pn	%g3, 1f
+		 add	%g6, %g2, %g2
+		wr	%g0, 0, %fprs
+1:		rdpr	%tpc, %g3
+
+		stx	%g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE]
+		rdpr	%tnpc, %g1
+		stx	%g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC]
+		rd	%y, %g3
+		stx	%g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC]
+		st	%g3, [%g2 + STACKFRAME_SZ + PT_V9_Y]
+		save	%g2, -STACK_BIAS, %sp	! Ordering here is critical
+		mov	%g6, %l6
+
+		bne,pn	%xcc, 3f
+		 mov	PRIMARY_CONTEXT, %l4
+		rdpr	%canrestore, %g3
+		rdpr	%wstate, %g2
+		wrpr	%g0, 0, %canrestore
+		sll	%g2, 3, %g2
+		mov	1, %l5
+		stb	%l5, [%l6 + TI_FPDEPTH]
+
+		wrpr	%g3, 0, %otherwin
+		wrpr	%g2, 0, %wstate
+cplus_etrap_insn_1:
+		sethi	%hi(0), %g3
+		sllx	%g3, 32, %g3
+cplus_etrap_insn_2:
+		sethi	%hi(0), %g2
+		or	%g3, %g2, %g3
+		stxa	%g3, [%l4] ASI_DMMU
+		flush	%l6
+		wr	%g0, ASI_AIUS, %asi
+2:		wrpr	%g0, 0x0, %tl
+		mov	%g4, %l4
+		mov	%g5, %l5
+
+		mov	%g7, %l2
+		wrpr	%g0, ETRAP_PSTATE1, %pstate
+		stx	%g1, [%sp + PTREGS_OFF + PT_V9_G1]
+		stx	%g2, [%sp + PTREGS_OFF + PT_V9_G2]
+		stx	%g3, [%sp + PTREGS_OFF + PT_V9_G3]
+		stx	%g4, [%sp + PTREGS_OFF + PT_V9_G4]
+		stx	%g5, [%sp + PTREGS_OFF + PT_V9_G5]
+		stx	%g6, [%sp + PTREGS_OFF + PT_V9_G6]
+
+		stx	%g7, [%sp + PTREGS_OFF + PT_V9_G7]
+		stx	%i0, [%sp + PTREGS_OFF + PT_V9_I0]
+		stx	%i1, [%sp + PTREGS_OFF + PT_V9_I1]
+		stx	%i2, [%sp + PTREGS_OFF + PT_V9_I2]
+		stx	%i3, [%sp + PTREGS_OFF + PT_V9_I3]
+		stx	%i4, [%sp + PTREGS_OFF + PT_V9_I4]
+		stx	%i5, [%sp + PTREGS_OFF + PT_V9_I5]
+
+		stx	%i6, [%sp + PTREGS_OFF + PT_V9_I6]
+		stx	%i7, [%sp + PTREGS_OFF + PT_V9_I7]
+		wrpr	%g0, ETRAP_PSTATE2, %pstate
+		mov	%l6, %g6
+#ifdef CONFIG_SMP
+		mov	TSB_REG, %g3
+		ldxa	[%g3] ASI_IMMU, %g5
+#endif
+		jmpl	%l2 + 0x4, %g0
+		 ldx	[%g6 + TI_TASK], %g4
+
+3:		ldub	[%l6 + TI_FPDEPTH], %l5
+		add	%l6, TI_FPSAVED + 1, %l4
+		srl	%l5, 1, %l3
+		add	%l5, 2, %l5
+		stb	%l5, [%l6 + TI_FPDEPTH]
+		ba,pt	%xcc, 2b
+		 stb	%g0, [%l4 + %l3]
+		nop
+
+etraptl1:	/* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself.
+		 * We place this right after pt_regs on the trap stack.
+		 * The layout is:
+		 *	0x00	TL1's TSTATE
+		 *	0x08	TL1's TPC
+		 *	0x10	TL1's TNPC
+		 *	0x18	TL1's TT
+		 *	 ...
+		 *	0x58	TL4's TT
+		 *	0x60	TL
+		 */
+		sub	%sp, ((4 * 8) * 4) + 8, %g2
+		rdpr	%tl, %g1
+
+		wrpr	%g0, 1, %tl
+		rdpr	%tstate, %g3
+		stx	%g3, [%g2 + STACK_BIAS + 0x00]
+		rdpr	%tpc, %g3
+		stx	%g3, [%g2 + STACK_BIAS + 0x08]
+		rdpr	%tnpc, %g3
+		stx	%g3, [%g2 + STACK_BIAS + 0x10]
+		rdpr	%tt, %g3
+		stx	%g3, [%g2 + STACK_BIAS + 0x18]
+
+		wrpr	%g0, 2, %tl
+		rdpr	%tstate, %g3
+		stx	%g3, [%g2 + STACK_BIAS + 0x20]
+		rdpr	%tpc, %g3
+		stx	%g3, [%g2 + STACK_BIAS + 0x28]
+		rdpr	%tnpc, %g3
+		stx	%g3, [%g2 + STACK_BIAS + 0x30]
+		rdpr	%tt, %g3
+		stx	%g3, [%g2 + STACK_BIAS + 0x38]
+
+		wrpr	%g0, 3, %tl
+		rdpr	%tstate, %g3
+		stx	%g3, [%g2 + STACK_BIAS + 0x40]
+		rdpr	%tpc, %g3
+		stx	%g3, [%g2 + STACK_BIAS + 0x48]
+		rdpr	%tnpc, %g3
+		stx	%g3, [%g2 + STACK_BIAS + 0x50]
+		rdpr	%tt, %g3
+		stx	%g3, [%g2 + STACK_BIAS + 0x58]
+
+		wrpr	%g0, 4, %tl
+		rdpr	%tstate, %g3
+		stx	%g3, [%g2 + STACK_BIAS + 0x60]
+		rdpr	%tpc, %g3
+		stx	%g3, [%g2 + STACK_BIAS + 0x68]
+		rdpr	%tnpc, %g3
+		stx	%g3, [%g2 + STACK_BIAS + 0x70]
+		rdpr	%tt, %g3
+		stx	%g3, [%g2 + STACK_BIAS + 0x78]
+
+		wrpr	%g1, %tl
+		stx	%g1, [%g2 + STACK_BIAS + 0x80]
+
+		rdpr	%tstate, %g1
+		sub	%g2, STACKFRAME_SZ + TRACEREG_SZ - STACK_BIAS, %g2
+		ba,pt	%xcc, 1b
+		 andcc	%g1, TSTATE_PRIV, %g0
+
+		.align	64
+		.globl	scetrap
+scetrap:	rdpr	%pil, %g2
+		rdpr	%tstate, %g1
+		sllx	%g2, 20, %g3
+		andcc	%g1, TSTATE_PRIV, %g0
+		or	%g1, %g3, %g1
+		bne,pn	%xcc, 1f
+		 sub	%sp, (STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS), %g2
+		wrpr	%g0, 7, %cleanwin
+
+		sllx	%g1, 51, %g3
+		sethi	%hi(TASK_REGOFF), %g2
+		or	%g2, %lo(TASK_REGOFF), %g2
+		brlz,pn	%g3, 1f
+		 add	%g6, %g2, %g2
+		wr	%g0, 0, %fprs
+1:		rdpr	%tpc, %g3
+		stx	%g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE]
+
+		rdpr	%tnpc, %g1
+		stx	%g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC]
+		stx	%g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC]
+		save	%g2, -STACK_BIAS, %sp	! Ordering here is critical
+		mov	%g6, %l6
+		bne,pn	%xcc, 2f
+		 mov	ASI_P, %l7
+		rdpr	%canrestore, %g3
+
+		rdpr	%wstate, %g2
+		wrpr	%g0, 0, %canrestore
+		sll	%g2, 3, %g2
+		mov	PRIMARY_CONTEXT, %l4
+		wrpr	%g3, 0, %otherwin
+		wrpr	%g2, 0, %wstate
+cplus_etrap_insn_3:
+		sethi	%hi(0), %g3
+		sllx	%g3, 32, %g3
+cplus_etrap_insn_4:
+		sethi	%hi(0), %g2
+		or	%g3, %g2, %g3
+		stxa	%g3, [%l4] ASI_DMMU
+		flush	%l6
+
+		mov	ASI_AIUS, %l7
+2:		mov	%g4, %l4
+		mov	%g5, %l5
+		add	%g7, 0x4, %l2
+		wrpr	%g0, ETRAP_PSTATE1, %pstate
+		stx	%g1, [%sp + PTREGS_OFF + PT_V9_G1]
+		stx	%g2, [%sp + PTREGS_OFF + PT_V9_G2]
+		sllx	%l7, 24, %l7
+
+		stx	%g3, [%sp + PTREGS_OFF + PT_V9_G3]
+		rdpr	%cwp, %l0
+		stx	%g4, [%sp + PTREGS_OFF + PT_V9_G4]
+		stx	%g5, [%sp + PTREGS_OFF + PT_V9_G5]
+		stx	%g6, [%sp + PTREGS_OFF + PT_V9_G6]
+		stx	%g7, [%sp + PTREGS_OFF + PT_V9_G7]
+		or	%l7, %l0, %l7
+		sethi	%hi(TSTATE_RMO | TSTATE_PEF), %l0
+
+		or	%l7, %l0, %l7
+		wrpr	%l2, %tnpc
+		wrpr	%l7, (TSTATE_PRIV | TSTATE_IE), %tstate
+		stx	%i0, [%sp + PTREGS_OFF + PT_V9_I0]
+		stx	%i1, [%sp + PTREGS_OFF + PT_V9_I1]
+		stx	%i2, [%sp + PTREGS_OFF + PT_V9_I2]
+		stx	%i3, [%sp + PTREGS_OFF + PT_V9_I3]
+		stx	%i4, [%sp + PTREGS_OFF + PT_V9_I4]
+
+		stx	%i5, [%sp + PTREGS_OFF + PT_V9_I5]
+		stx	%i6, [%sp + PTREGS_OFF + PT_V9_I6]
+		mov	%l6, %g6
+		stx	%i7, [%sp + PTREGS_OFF + PT_V9_I7]
+#ifdef CONFIG_SMP
+		mov	TSB_REG, %g3
+		ldxa	[%g3] ASI_IMMU, %g5
+#endif
+		ldx	[%g6 + TI_TASK], %g4
+		done
+
+#undef TASK_REGOFF
+#undef ETRAP_PSTATE1
+
+cplus_einsn_1:
+		sethi			%uhi(CTX_CHEETAH_PLUS_NUC), %g3
+cplus_einsn_2:
+		sethi			%hi(CTX_CHEETAH_PLUS_CTX0), %g2
+
+		.globl			cheetah_plus_patch_etrap
+cheetah_plus_patch_etrap:
+		/* We configure the dTLB512_0 for 4MB pages and the
+		 * dTLB512_1 for 8K pages when in context zero.
+		 */
+		sethi			%hi(cplus_einsn_1), %o0
+		sethi			%hi(cplus_etrap_insn_1), %o2
+		lduw			[%o0 + %lo(cplus_einsn_1)], %o1
+		or			%o2, %lo(cplus_etrap_insn_1), %o2
+		stw			%o1, [%o2]
+		flush			%o2
+		sethi			%hi(cplus_etrap_insn_3), %o2
+		or			%o2, %lo(cplus_etrap_insn_3), %o2
+		stw			%o1, [%o2]
+		flush			%o2
+
+		sethi			%hi(cplus_einsn_2), %o0
+		sethi			%hi(cplus_etrap_insn_2), %o2
+		lduw			[%o0 + %lo(cplus_einsn_2)], %o1
+		or			%o2, %lo(cplus_etrap_insn_2), %o2
+		stw			%o1, [%o2]
+		flush			%o2
+		sethi			%hi(cplus_etrap_insn_4), %o2
+		or			%o2, %lo(cplus_etrap_insn_4), %o2
+		stw			%o1, [%o2]
+		flush			%o2
+
+		retl
+		 nop
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
new file mode 100644
index 0000000..8104a56
--- /dev/null
+++ b/arch/sparc64/kernel/head.S
@@ -0,0 +1,782 @@
+/* $Id: head.S,v 1.87 2002/02/09 19:49:31 davem Exp $
+ * head.S: Initial boot code for the Sparc64 port of Linux.
+ *
+ * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au)
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ */
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/errno.h>
+#include <asm/thread_info.h>
+#include <asm/asi.h>
+#include <asm/pstate.h>
+#include <asm/ptrace.h>
+#include <asm/spitfire.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/errno.h>
+#include <asm/signal.h>
+#include <asm/processor.h>
+#include <asm/lsu.h>
+#include <asm/dcr.h>
+#include <asm/dcu.h>
+#include <asm/head.h>
+#include <asm/ttable.h>
+#include <asm/mmu.h>
+	
+/* This section from from _start to sparc64_boot_end should fit into
+ * 0x0000.0000.0040.4000 to 0x0000.0000.0040.8000 and will be sharing space
+ * with bootup_user_stack, which is from 0x0000.0000.0040.4000 to
+ * 0x0000.0000.0040.6000 and empty_bad_page, which is from
+ * 0x0000.0000.0040.6000 to 0x0000.0000.0040.8000. 
+ */
+
+	.text
+	.globl	start, _start, stext, _stext
+_start:
+start:
+_stext:
+stext:
+bootup_user_stack:
+! 0x0000000000404000
+	b	sparc64_boot
+	 flushw					/* Flush register file.      */
+
+/* This stuff has to be in sync with SILO and other potential boot loaders
+ * Fields should be kept upward compatible and whenever any change is made,
+ * HdrS version should be incremented.
+ */
+        .global root_flags, ram_flags, root_dev
+        .global sparc_ramdisk_image, sparc_ramdisk_size
+	.global sparc_ramdisk_image64
+
+        .ascii  "HdrS"
+        .word   LINUX_VERSION_CODE
+
+	/* History:
+	 *
+	 * 0x0300 : Supports being located at other than 0x4000
+	 * 0x0202 : Supports kernel params string
+	 * 0x0201 : Supports reboot_command
+	 */
+	.half   0x0301          /* HdrS version */
+
+root_flags:
+        .half   1
+root_dev:
+        .half   0
+ram_flags:
+        .half   0
+sparc_ramdisk_image:
+        .word   0
+sparc_ramdisk_size:
+        .word   0
+        .xword  reboot_command
+	.xword	bootstr_info
+sparc_ramdisk_image64:
+	.xword	0
+	.word	_end
+
+	/* We must be careful, 32-bit OpenBOOT will get confused if it
+	 * tries to save away a register window to a 64-bit kernel
+	 * stack address.  Flush all windows, disable interrupts,
+	 * remap if necessary, jump onto kernel trap table, then kernel
+	 * stack, or else we die.
+	 *
+	 * PROM entry point is on %o4
+	 */
+sparc64_boot:
+	BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot)
+	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot)
+	ba,pt	%xcc, spitfire_boot
+	 nop
+
+cheetah_plus_boot:
+	/* Preserve OBP chosen DCU and DCR register settings.  */
+	ba,pt	%xcc, cheetah_generic_boot
+	 nop
+
+cheetah_boot:
+	mov	DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
+	wr	%g1, %asr18
+
+	sethi	%uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7
+	or	%g7, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7
+	sllx	%g7, 32, %g7
+	or	%g7, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g7
+	stxa	%g7, [%g0] ASI_DCU_CONTROL_REG
+	membar	#Sync
+
+cheetah_generic_boot:
+	mov	TSB_EXTENSION_P, %g3
+	stxa	%g0, [%g3] ASI_DMMU
+	stxa	%g0, [%g3] ASI_IMMU
+	membar	#Sync
+
+	mov	TSB_EXTENSION_S, %g3
+	stxa	%g0, [%g3] ASI_DMMU
+	membar	#Sync
+
+	mov	TSB_EXTENSION_N, %g3
+	stxa	%g0, [%g3] ASI_DMMU
+	stxa	%g0, [%g3] ASI_IMMU
+	membar	#Sync
+
+	wrpr    %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
+	wr	%g0, 0, %fprs
+
+	/* Just like for Spitfire, we probe itlb-2 for a mapping which
+	 * matches our current %pc.  We take the physical address in
+	 * that mapping and use it to make our own.
+	 */
+
+	/* %g5 holds the tlb data */
+        sethi   %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
+        sllx    %g5, 32, %g5
+        or      %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
+
+	/* Put PADDR tlb data mask into %g3. */
+	sethi	%uhi(_PAGE_PADDR), %g3
+	or	%g3, %ulo(_PAGE_PADDR), %g3
+	sllx	%g3, 32, %g3
+	sethi	%hi(_PAGE_PADDR), %g7
+	or	%g7, %lo(_PAGE_PADDR), %g7
+	or	%g3, %g7, %g3
+
+	set	2 << 16, %l0		/* TLB entry walker. */
+	set	0x1fff, %l2		/* Page mask. */
+	rd	%pc, %l3
+	andn	%l3, %l2, %g2		/* vaddr comparator */
+
+1:	ldxa	[%l0] ASI_ITLB_TAG_READ, %g1
+	membar	#Sync
+	andn	%g1, %l2, %g1
+	cmp	%g1, %g2
+	be,pn	%xcc, cheetah_got_tlbentry
+	 nop
+	and	%l0, (127 << 3), %g1
+	cmp	%g1, (127 << 3)
+	blu,pt	%xcc, 1b
+	 add	%l0, (1 << 3), %l0
+
+	/* Search the small TLB.  OBP never maps us like that but
+	 * newer SILO can.
+	 */
+	clr	%l0
+
+1:	ldxa	[%l0] ASI_ITLB_TAG_READ, %g1
+	membar	#Sync
+	andn	%g1, %l2, %g1
+	cmp	%g1, %g2
+	be,pn	%xcc, cheetah_got_tlbentry
+	 nop
+	cmp	%l0, (15 << 3)
+	blu,pt	%xcc, 1b
+	 add	%l0, (1 << 3), %l0
+
+	/* BUG() if we get here... */
+	ta	0x5
+
+cheetah_got_tlbentry:
+	ldxa	[%l0] ASI_ITLB_DATA_ACCESS, %g0
+	ldxa	[%l0] ASI_ITLB_DATA_ACCESS, %g1
+	membar	#Sync
+	and	%g1, %g3, %g1
+	set	0x5fff, %l0
+	andn	%g1, %l0, %g1
+	or	%g5, %g1, %g5
+
+	/* Clear out any KERNBASE area entries. */
+	set	2 << 16, %l0
+	sethi	%hi(KERNBASE), %g3
+	sethi	%hi(KERNBASE<<1), %g7
+	mov	TLB_TAG_ACCESS, %l7
+
+	/* First, check ITLB */
+1:	ldxa	[%l0] ASI_ITLB_TAG_READ, %g1
+	membar	#Sync
+	andn	%g1, %l2, %g1
+	cmp	%g1, %g3
+	blu,pn	%xcc, 2f
+	 cmp	%g1, %g7
+	bgeu,pn	%xcc, 2f
+	 nop
+	stxa	%g0, [%l7] ASI_IMMU
+	membar	#Sync
+	stxa	%g0, [%l0] ASI_ITLB_DATA_ACCESS
+	membar	#Sync
+
+2:	and	%l0, (127 << 3), %g1
+	cmp	%g1, (127 << 3)
+	blu,pt	%xcc, 1b
+	 add	%l0, (1 << 3), %l0
+
+	/* Next, check DTLB */
+	set	2 << 16, %l0
+1:	ldxa	[%l0] ASI_DTLB_TAG_READ, %g1
+	membar	#Sync
+	andn	%g1, %l2, %g1
+	cmp	%g1, %g3
+	blu,pn	%xcc, 2f
+	 cmp	%g1, %g7
+	bgeu,pn	%xcc, 2f
+	 nop
+	stxa	%g0, [%l7] ASI_DMMU
+	membar	#Sync
+	stxa	%g0, [%l0] ASI_DTLB_DATA_ACCESS
+	membar	#Sync
+	
+2:	and	%l0, (511 << 3), %g1
+	cmp	%g1, (511 << 3)
+	blu,pt	%xcc, 1b
+	 add	%l0, (1 << 3), %l0
+
+	/* On Cheetah+, have to check second DTLB.  */
+	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,l0,2f)
+	ba,pt	%xcc, 9f
+	 nop
+
+2:	set	3 << 16, %l0
+1:	ldxa	[%l0] ASI_DTLB_TAG_READ, %g1
+	membar	#Sync
+	andn	%g1, %l2, %g1
+	cmp	%g1, %g3
+	blu,pn	%xcc, 2f
+	 cmp	%g1, %g7
+	bgeu,pn	%xcc, 2f
+	 nop
+	stxa	%g0, [%l7] ASI_DMMU
+	membar	#Sync
+	stxa	%g0, [%l0] ASI_DTLB_DATA_ACCESS
+	membar	#Sync
+	
+2:	and	%l0, (511 << 3), %g1
+	cmp	%g1, (511 << 3)
+	blu,pt	%xcc, 1b
+	 add	%l0, (1 << 3), %l0
+
+9:
+
+	/* Now lock the TTE we created into ITLB-0 and DTLB-0,
+	 * entry 15 (and maybe 14 too).
+	 */
+	sethi	%hi(KERNBASE), %g3
+	set	(0 << 16) | (15 << 3), %g7
+	stxa	%g3, [%l7] ASI_DMMU
+	membar	#Sync
+	stxa	%g5, [%g7] ASI_DTLB_DATA_ACCESS
+	membar	#Sync
+	stxa	%g3, [%l7] ASI_IMMU
+	membar	#Sync
+	stxa	%g5, [%g7] ASI_ITLB_DATA_ACCESS
+	membar	#Sync
+	flush	%g3
+	membar	#Sync
+	sethi	%hi(_end), %g3			/* Check for bigkernel case */
+	or	%g3, %lo(_end), %g3
+	srl	%g3, 23, %g3			/* Check if _end > 8M */
+	brz,pt	%g3, 1f
+	 sethi	%hi(KERNBASE), %g3		/* Restore for fixup code below */
+	sethi	%hi(0x400000), %g3
+	or	%g3, %lo(0x400000), %g3
+	add	%g5, %g3, %g5			/* New tte data */
+	andn	%g5, (_PAGE_G), %g5
+	sethi	%hi(KERNBASE+0x400000), %g3
+	or	%g3, %lo(KERNBASE+0x400000), %g3
+	set	(0 << 16) | (14 << 3), %g7
+	stxa	%g3, [%l7] ASI_DMMU
+	membar	#Sync
+	stxa	%g5, [%g7] ASI_DTLB_DATA_ACCESS
+	membar	#Sync
+	stxa	%g3, [%l7] ASI_IMMU
+	membar	#Sync
+	stxa	%g5, [%g7] ASI_ITLB_DATA_ACCESS
+	membar	#Sync
+	flush	%g3
+	membar	#Sync
+	sethi	%hi(KERNBASE), %g3		/* Restore for fixup code below */
+	ba,pt	%xcc, 1f
+	 nop
+
+1:	set	sun4u_init, %g2
+	jmpl    %g2 + %g0, %g0
+	 nop
+
+spitfire_boot:
+	/* Typically PROM has already enabled both MMU's and both on-chip
+	 * caches, but we do it here anyway just to be paranoid.
+	 */
+	mov	(LSU_CONTROL_IC|LSU_CONTROL_DC|LSU_CONTROL_IM|LSU_CONTROL_DM), %g1
+	stxa	%g1, [%g0] ASI_LSU_CONTROL
+	membar	#Sync
+
+	/*
+	 * Make sure we are in privileged mode, have address masking,
+         * using the ordinary globals and have enabled floating
+         * point.
+	 *
+	 * Again, typically PROM has left %pil at 13 or similar, and
+	 * (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE) in %pstate.
+         */
+	wrpr    %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
+	wr	%g0, 0, %fprs
+
+spitfire_create_mappings:
+	/* %g5 holds the tlb data */
+        sethi   %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
+        sllx    %g5, 32, %g5
+        or      %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
+
+	/* Base of physical memory cannot reliably be assumed to be
+	 * at 0x0!  Figure out where it happens to be. -DaveM
+	 */
+
+	/* Put PADDR tlb data mask into %g3. */
+	sethi	%uhi(_PAGE_PADDR_SF), %g3
+	or	%g3, %ulo(_PAGE_PADDR_SF), %g3
+	sllx	%g3, 32, %g3
+	sethi	%hi(_PAGE_PADDR_SF), %g7
+	or	%g7, %lo(_PAGE_PADDR_SF), %g7
+	or	%g3, %g7, %g3
+
+	/* Walk through entire ITLB, looking for entry which maps
+	 * our %pc currently, stick PADDR from there into %g5 tlb data.
+	 */
+	clr	%l0			/* TLB entry walker. */
+	set	0x1fff, %l2		/* Page mask. */
+	rd	%pc, %l3
+	andn	%l3, %l2, %g2		/* vaddr comparator */
+1:
+	/* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
+	ldxa	[%l0] ASI_ITLB_TAG_READ, %g1
+	nop
+	nop
+	nop
+	andn	%g1, %l2, %g1		/* Get vaddr */
+	cmp	%g1, %g2
+	be,a,pn	%xcc, spitfire_got_tlbentry
+	 ldxa	[%l0] ASI_ITLB_DATA_ACCESS, %g1
+	cmp	%l0, (63 << 3)
+	blu,pt	%xcc, 1b
+	 add	%l0, (1 << 3), %l0
+
+	/* BUG() if we get here... */
+	ta	0x5
+
+spitfire_got_tlbentry:
+	/* Nops here again, perhaps Cheetah/Blackbird are better behaved... */
+	nop
+	nop
+	nop
+	and	%g1, %g3, %g1		/* Mask to just get paddr bits.       */
+	set	0x5fff, %l3		/* Mask offset to get phys base.      */
+	andn	%g1, %l3, %g1
+
+	/* NOTE: We hold on to %g1 paddr base as we need it below to lock
+	 * NOTE: the PROM cif code into the TLB.
+	 */
+
+	or	%g5, %g1, %g5		/* Or it into TAG being built.        */
+
+	clr	%l0			/* TLB entry walker. */
+	sethi	%hi(KERNBASE), %g3	/* 4M lower limit */
+	sethi	%hi(KERNBASE<<1), %g7	/* 8M upper limit */
+	mov	TLB_TAG_ACCESS, %l7
+1:
+	/* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
+	ldxa	[%l0] ASI_ITLB_TAG_READ, %g1
+	nop
+	nop
+	nop
+	andn	%g1, %l2, %g1		/* Get vaddr */
+	cmp	%g1, %g3
+	blu,pn	%xcc, 2f
+	 cmp	%g1, %g7
+	bgeu,pn	%xcc, 2f
+	 nop
+	stxa	%g0, [%l7] ASI_IMMU
+	stxa	%g0, [%l0] ASI_ITLB_DATA_ACCESS
+	membar	#Sync
+2:
+	cmp	%l0, (63 << 3)
+	blu,pt	%xcc, 1b
+	 add	%l0, (1 << 3), %l0
+
+	nop; nop; nop
+
+	clr	%l0			/* TLB entry walker. */
+1:
+	/* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
+	ldxa	[%l0] ASI_DTLB_TAG_READ, %g1
+	nop
+	nop
+	nop
+	andn	%g1, %l2, %g1		/* Get vaddr */
+	cmp	%g1, %g3
+	blu,pn	%xcc, 2f
+	 cmp	%g1, %g7
+	bgeu,pn	%xcc, 2f
+	 nop
+	stxa	%g0, [%l7] ASI_DMMU
+	stxa	%g0, [%l0] ASI_DTLB_DATA_ACCESS
+	membar	#Sync
+2:
+	cmp	%l0, (63 << 3)
+	blu,pt	%xcc, 1b
+	 add	%l0, (1 << 3), %l0
+
+	nop; nop; nop
+
+
+	/* PROM never puts any TLB entries into the MMU with the lock bit
+	 * set.  So we gladly use tlb entry 63 for KERNBASE. And maybe 62 too.
+	 */
+
+	sethi	%hi(KERNBASE), %g3
+	mov	(63 << 3), %g7
+	stxa	%g3, [%l7] ASI_DMMU		/* KERNBASE into TLB TAG	*/
+	stxa	%g5, [%g7] ASI_DTLB_DATA_ACCESS	/* TTE into TLB DATA		*/
+	membar	#Sync
+	stxa	%g3, [%l7] ASI_IMMU		/* KERNBASE into TLB TAG	*/
+	stxa	%g5, [%g7] ASI_ITLB_DATA_ACCESS	/* TTE into TLB DATA		*/
+	membar	#Sync
+	flush	%g3
+	membar	#Sync
+	sethi	%hi(_end), %g3			/* Check for bigkernel case */
+	or	%g3, %lo(_end), %g3
+	srl	%g3, 23, %g3			/* Check if _end > 8M */
+	brz,pt	%g3, 2f
+	 sethi	%hi(KERNBASE), %g3		/* Restore for fixup code below */
+	sethi	%hi(0x400000), %g3
+	or	%g3, %lo(0x400000), %g3
+	add	%g5, %g3, %g5			/* New tte data */
+	andn	%g5, (_PAGE_G), %g5
+	sethi	%hi(KERNBASE+0x400000), %g3
+	or	%g3, %lo(KERNBASE+0x400000), %g3
+	mov	(62 << 3), %g7
+	stxa	%g3, [%l7] ASI_DMMU
+	stxa	%g5, [%g7] ASI_DTLB_DATA_ACCESS
+	membar	#Sync
+	stxa	%g3, [%l7] ASI_IMMU
+	stxa	%g5, [%g7] ASI_ITLB_DATA_ACCESS
+	membar	#Sync
+	flush	%g3
+	membar	#Sync
+	sethi	%hi(KERNBASE), %g3		/* Restore for fixup code below */
+2:	ba,pt	%xcc, 1f
+	 nop
+1:
+	set	sun4u_init, %g2
+	jmpl    %g2 + %g0, %g0
+	 nop
+
+sun4u_init:
+	/* Set ctx 0 */
+	mov	PRIMARY_CONTEXT, %g7
+	stxa	%g0, [%g7] ASI_DMMU
+	membar	#Sync
+
+	mov	SECONDARY_CONTEXT, %g7
+	stxa	%g0, [%g7] ASI_DMMU
+	membar	#Sync
+
+	/* We are now safely (we hope) in Nucleus context (0), rewrite
+	 * the KERNBASE TTE's so they no longer have the global bit set.
+	 * Don't forget to setup TAG_ACCESS first 8-)
+	 */
+	mov	TLB_TAG_ACCESS, %g2
+	stxa	%g3, [%g2] ASI_IMMU
+	stxa	%g3, [%g2] ASI_DMMU
+	membar	#Sync
+
+	BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup)
+
+	ba,pt	%xcc, spitfire_tlb_fixup
+	 nop
+
+cheetah_tlb_fixup:
+	set	(0 << 16) | (15 << 3), %g7
+	ldxa	[%g7] ASI_ITLB_DATA_ACCESS, %g0
+	ldxa	[%g7] ASI_ITLB_DATA_ACCESS, %g1
+	andn	%g1, (_PAGE_G), %g1
+	stxa	%g1, [%g7] ASI_ITLB_DATA_ACCESS
+	membar	#Sync
+
+	ldxa	[%g7] ASI_DTLB_DATA_ACCESS, %g0
+	ldxa	[%g7] ASI_DTLB_DATA_ACCESS, %g1
+	andn	%g1, (_PAGE_G), %g1
+	stxa	%g1, [%g7] ASI_DTLB_DATA_ACCESS
+	membar	#Sync
+
+	/* Kill instruction prefetch queues. */
+	flush	%g3
+	membar	#Sync
+
+	mov	2, %g2		/* Set TLB type to cheetah+. */
+	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
+
+	mov	1, %g2		/* Set TLB type to cheetah. */
+
+1:	sethi	%hi(tlb_type), %g1
+	stw	%g2, [%g1 + %lo(tlb_type)]
+
+	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
+	ba,pt	%xcc, 2f
+	 nop
+
+1:	/* Patch context register writes to support nucleus page
+	 * size correctly.
+	 */
+	call	cheetah_plus_patch_etrap
+	 nop
+	call	cheetah_plus_patch_rtrap
+	 nop
+	call	cheetah_plus_patch_fpdis
+	 nop
+	call	cheetah_plus_patch_winfixup
+	 nop
+	
+
+2:	/* Patch copy/page operations to cheetah optimized versions. */
+	call	cheetah_patch_copyops
+	 nop
+	call	cheetah_patch_cachetlbops
+	 nop
+
+	ba,pt	%xcc, tlb_fixup_done
+	 nop
+
+spitfire_tlb_fixup:
+	mov	(63 << 3), %g7
+	ldxa	[%g7] ASI_ITLB_DATA_ACCESS, %g1
+	andn	%g1, (_PAGE_G), %g1
+	stxa	%g1, [%g7] ASI_ITLB_DATA_ACCESS
+	membar	#Sync
+
+	ldxa	[%g7] ASI_DTLB_DATA_ACCESS, %g1
+	andn	%g1, (_PAGE_G), %g1
+	stxa	%g1, [%g7] ASI_DTLB_DATA_ACCESS
+	membar	#Sync
+
+	/* Kill instruction prefetch queues. */
+	flush	%g3
+	membar	#Sync
+
+	/* Set TLB type to spitfire. */
+	mov	0, %g2
+	sethi	%hi(tlb_type), %g1
+	stw	%g2, [%g1 + %lo(tlb_type)]
+
+tlb_fixup_done:
+	sethi	%hi(init_thread_union), %g6
+	or	%g6, %lo(init_thread_union), %g6
+	ldx	[%g6 + TI_TASK], %g4
+	mov	%sp, %l6
+	mov	%o4, %l7
+
+#if 0	/* We don't do it like this anymore, but for historical hack value
+	 * I leave this snippet here to show how crazy we can be sometimes. 8-)
+	 */
+
+	/* Setup "Linux Current Register", thanks Sun 8-) */
+	wr	%g0, 0x1, %pcr
+
+	/* Blackbird errata workaround.  See commentary in
+	 * smp.c:smp_percpu_timer_interrupt() for more
+	 * information.
+	 */
+	ba,pt	%xcc, 99f
+	 nop
+	.align	64
+99:	wr	%g6, %g0, %pic
+	rd	%pic, %g0
+#endif
+
+	wr	%g0, ASI_P, %asi
+	mov	1, %g1
+	sllx	%g1, THREAD_SHIFT, %g1
+	sub	%g1, (STACKFRAME_SZ + STACK_BIAS), %g1
+	add	%g6, %g1, %sp
+	mov	0, %fp
+
+	/* Set per-cpu pointer initially to zero, this makes
+	 * the boot-cpu use the in-kernel-image per-cpu areas
+	 * before setup_per_cpu_area() is invoked.
+	 */
+	clr	%g5
+
+	wrpr	%g0, 0, %wstate
+	wrpr	%g0, 0x0, %tl
+
+	/* Clear the bss */
+	sethi	%hi(__bss_start), %o0
+	or	%o0, %lo(__bss_start), %o0
+	sethi	%hi(_end), %o1
+	or	%o1, %lo(_end), %o1
+	call	__bzero
+	 sub	%o1, %o0, %o1
+
+	mov	%l6, %o1			! OpenPROM stack
+	call	prom_init
+	 mov	%l7, %o0			! OpenPROM cif handler
+
+	/* Off we go.... */
+	call	start_kernel
+	 nop
+	/* Not reached... */
+
+/* IMPORTANT NOTE: Whenever making changes here, check
+ * trampoline.S as well. -jj */
+	.globl	setup_tba
+setup_tba:	/* i0 = is_starfire */
+	save	%sp, -160, %sp
+
+	rdpr	%tba, %g7
+	sethi	%hi(prom_tba), %o1
+	or	%o1, %lo(prom_tba), %o1
+	stx	%g7, [%o1]
+
+	/* Setup "Linux" globals 8-) */
+	rdpr	%pstate, %o1
+	mov	%g6, %o2
+	wrpr	%o1, (PSTATE_AG|PSTATE_IE), %pstate
+	sethi	%hi(sparc64_ttable_tl0), %g1
+	wrpr	%g1, %tba
+	mov	%o2, %g6
+
+	/* Set up MMU globals */
+	wrpr	%o1, (PSTATE_MG|PSTATE_IE), %pstate
+
+	/* Set fixed globals used by dTLB miss handler. */
+#define KERN_HIGHBITS		((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)
+#define KERN_LOWBITS		(_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
+
+	mov	TSB_REG, %g1
+	stxa	%g0, [%g1] ASI_DMMU
+	membar	#Sync
+	stxa	%g0, [%g1] ASI_IMMU
+	membar	#Sync
+	mov	TLB_SFSR, %g1
+	sethi	%uhi(KERN_HIGHBITS), %g2
+	or	%g2, %ulo(KERN_HIGHBITS), %g2
+	sllx	%g2, 32, %g2
+	or	%g2, KERN_LOWBITS, %g2
+
+	BRANCH_IF_ANY_CHEETAH(g3,g7,cheetah_vpte_base)
+	ba,pt	%xcc, spitfire_vpte_base
+	 nop
+
+cheetah_vpte_base:
+	sethi		%uhi(VPTE_BASE_CHEETAH), %g3
+	or		%g3, %ulo(VPTE_BASE_CHEETAH), %g3
+	ba,pt		%xcc, 2f
+	 sllx		%g3, 32, %g3
+
+spitfire_vpte_base:
+	sethi		%uhi(VPTE_BASE_SPITFIRE), %g3
+	or		%g3, %ulo(VPTE_BASE_SPITFIRE), %g3
+	sllx		%g3, 32, %g3
+
+2:
+	clr	%g7
+#undef KERN_HIGHBITS
+#undef KERN_LOWBITS
+
+	/* Kill PROM timer */
+	sethi	%hi(0x80000000), %o2
+	sllx	%o2, 32, %o2
+	wr	%o2, 0, %tick_cmpr
+
+	BRANCH_IF_ANY_CHEETAH(o2,o3,1f)
+
+	ba,pt	%xcc, 2f
+	 nop
+
+	/* Disable STICK_INT interrupts. */
+1:
+	sethi	%hi(0x80000000), %o2
+	sllx	%o2, 32, %o2
+	wr	%o2, %asr25
+
+	/* Ok, we're done setting up all the state our trap mechanims needs,
+	 * now get back into normal globals and let the PROM know what is up.
+	 */
+2:
+	wrpr	%g0, %g0, %wstate
+	wrpr	%o1, PSTATE_IE, %pstate
+
+	call	init_irqwork_curcpu
+	 nop
+
+	call	prom_set_trap_table
+	 sethi	%hi(sparc64_ttable_tl0), %o0
+
+	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f)
+	ba,pt	%xcc, 2f
+	 nop
+
+1:	/* Start using proper page size encodings in ctx register.  */
+	sethi	%uhi(CTX_CHEETAH_PLUS_NUC), %g3
+	mov	PRIMARY_CONTEXT, %g1
+	sllx	%g3, 32, %g3
+	sethi	%hi(CTX_CHEETAH_PLUS_CTX0), %g2
+	or	%g3, %g2, %g3
+	stxa	%g3, [%g1] ASI_DMMU
+	membar	#Sync
+
+2:
+	rdpr	%pstate, %o1
+	or	%o1, PSTATE_IE, %o1
+	wrpr	%o1, 0, %pstate
+
+	ret
+	 restore
+
+/*
+ * The following skips make sure the trap table in ttable.S is aligned
+ * on a 32K boundary as required by the v9 specs for TBA register.
+ */
+sparc64_boot_end:
+	.skip	0x2000 + _start - sparc64_boot_end
+bootup_user_stack_end:
+	.skip	0x2000
+
+#ifdef CONFIG_SBUS
+/* This is just a hack to fool make depend config.h discovering
+   strategy: As the .S files below need config.h, but
+   make depend does not find it for them, we include config.h
+   in head.S */
+#endif
+
+! 0x0000000000408000
+
+#include "ttable.S"
+#include "systbls.S"
+
+	.align	1024
+	.globl	swapper_pg_dir
+swapper_pg_dir:
+	.word	0
+
+#include "etrap.S"
+#include "rtrap.S"
+#include "winfixup.S"
+#include "entry.S"
+
+	/* This is just anal retentiveness on my part... */
+	.align	16384
+
+	.data
+	.align	8
+	.globl	prom_tba, tlb_type
+prom_tba:	.xword	0
+tlb_type:	.word	0	/* Must NOT end up in BSS */
+	.section	".fixup",#alloc,#execinstr
+	.globl	__ret_efault
+__ret_efault:
+	ret
+	 restore %g0, -EFAULT, %o0
+
diff --git a/arch/sparc64/kernel/idprom.c b/arch/sparc64/kernel/idprom.c
new file mode 100644
index 0000000..3b6789e
--- /dev/null
+++ b/arch/sparc64/kernel/idprom.c
@@ -0,0 +1,49 @@
+/* $Id: idprom.c,v 1.3 1999/08/31 06:54:53 davem Exp $
+ * idprom.c: Routines to load the idprom into kernel addresses and
+ *           interpret the data contained within.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <asm/oplib.h>
+#include <asm/idprom.h>
+
+struct idprom *idprom;
+static struct idprom idprom_buffer;
+
+/* Calculate the IDPROM checksum (xor of the data bytes). */
+static unsigned char __init calc_idprom_cksum(struct idprom *idprom)
+{
+	unsigned char cksum, i, *ptr = (unsigned char *)idprom;
+
+	for (i = cksum = 0; i <= 0x0E; i++)
+		cksum ^= *ptr++;
+
+	return cksum;
+}
+
+/* Create a local IDPROM copy and verify integrity. */
+void __init idprom_init(void)
+{
+	prom_get_idprom((char *) &idprom_buffer, sizeof(idprom_buffer));
+
+	idprom = &idprom_buffer;
+
+	if (idprom->id_format != 0x01)  {
+		prom_printf("IDPROM: Warning, unknown format type!\n");
+	}
+
+	if (idprom->id_cksum != calc_idprom_cksum(idprom)) {
+		prom_printf("IDPROM: Warning, checksum failure (nvram=%x, calc=%x)!\n",
+			    idprom->id_cksum, calc_idprom_cksum(idprom));
+	}
+
+	printk("Ethernet address: %02x:%02x:%02x:%02x:%02x:%02x\n",
+	       idprom->id_ethaddr[0], idprom->id_ethaddr[1],
+	       idprom->id_ethaddr[2], idprom->id_ethaddr[3],
+	       idprom->id_ethaddr[4], idprom->id_ethaddr[5]);
+}
diff --git a/arch/sparc64/kernel/init_task.c b/arch/sparc64/kernel/init_task.c
new file mode 100644
index 0000000..329b38f
--- /dev/null
+++ b/arch/sparc64/kernel/init_task.c
@@ -0,0 +1,35 @@
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+
+EXPORT_SYMBOL(init_mm);
+
+/* .text section in head.S is aligned at 2 page boundary and this gets linked
+ * right after that so that the init_thread_union is aligned properly as well.
+ * We really don't need this special alignment like the Intel does, but
+ * I do it anyways for completeness.
+ */
+__asm__ (".text");
+union thread_union init_thread_union = { INIT_THREAD_INFO(init_task) };
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+EXPORT_SYMBOL(init_task);
+
+__asm__(".data");
+struct task_struct init_task = INIT_TASK(init_task);
diff --git a/arch/sparc64/kernel/ioctl32.c b/arch/sparc64/kernel/ioctl32.c
new file mode 100644
index 0000000..43fc317
--- /dev/null
+++ b/arch/sparc64/kernel/ioctl32.c
@@ -0,0 +1,597 @@
+/* $Id: ioctl32.c,v 1.136 2002/01/14 09:49:52 davem Exp $
+ * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
+ *
+ * Copyright (C) 1997-2000  Jakub Jelinek  (jakub@redhat.com)
+ * Copyright (C) 1998  Eddie C. Dost  (ecd@skynet.be)
+ * Copyright (C) 2003  Pavel Machek (pavel@suse.cz)
+ *
+ * These routines maintain argument size conversion between 32bit and 64bit
+ * ioctls.
+ */
+
+#define INCLUDES
+#include "compat_ioctl.c"
+#include <linux/ncp_fs.h>
+#include <linux/syscalls.h>
+#include <asm/fbio.h>
+#include <asm/kbio.h>
+#include <asm/vuid_event.h>
+#include <asm/envctrl.h>
+#include <asm/display7seg.h>
+#include <asm/openpromio.h>
+#include <asm/audioio.h>
+#include <asm/watchdog.h>
+
+/* Use this to get at 32-bit user passed pointers. 
+ * See sys_sparc32.c for description about it.
+ */
+#define A(__x) compat_ptr(__x)
+
+static __inline__ void *alloc_user_space(long len)
+{
+	struct pt_regs *regs = current_thread_info()->kregs;
+	unsigned long usp = regs->u_regs[UREG_I6];
+
+	if (!(test_thread_flag(TIF_32BIT)))
+		usp += STACK_BIAS;
+
+	return (void *) (usp - len);
+}
+
+#define CODE
+#include "compat_ioctl.c"
+
+struct  fbcmap32 {
+	int             index;          /* first element (0 origin) */
+	int             count;
+	u32		red;
+	u32		green;
+	u32		blue;
+};
+
+#define FBIOPUTCMAP32	_IOW('F', 3, struct fbcmap32)
+#define FBIOGETCMAP32	_IOW('F', 4, struct fbcmap32)
+
+static int fbiogetputcmap(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct fbcmap32 __user *argp = (void __user *)arg;
+	struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p));
+	u32 addr;
+	int ret;
+	
+	ret = copy_in_user(p, argp, 2 * sizeof(int));
+	ret |= get_user(addr, &argp->red);
+	ret |= put_user(compat_ptr(addr), &p->red);
+	ret |= get_user(addr, &argp->green);
+	ret |= put_user(compat_ptr(addr), &p->green);
+	ret |= get_user(addr, &argp->blue);
+	ret |= put_user(compat_ptr(addr), &p->blue);
+	if (ret)
+		return -EFAULT;
+	return sys_ioctl(fd, (cmd == FBIOPUTCMAP32) ? FBIOPUTCMAP_SPARC : FBIOGETCMAP_SPARC, (unsigned long)p);
+}
+
+struct fbcursor32 {
+	short set;		/* what to set, choose from the list above */
+	short enable;		/* cursor on/off */
+	struct fbcurpos pos;	/* cursor position */
+	struct fbcurpos hot;	/* cursor hot spot */
+	struct fbcmap32 cmap;	/* color map info */
+	struct fbcurpos size;	/* cursor bit map size */
+	u32	image;		/* cursor image bits */
+	u32	mask;		/* cursor mask bits */
+};
+	
+#define FBIOSCURSOR32	_IOW('F', 24, struct fbcursor32)
+#define FBIOGCURSOR32	_IOW('F', 25, struct fbcursor32)
+
+static int fbiogscursor(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p));
+	struct fbcursor32 __user *argp =  (void __user *)arg;
+	compat_uptr_t addr;
+	int ret;
+	
+	ret = copy_in_user(p, argp,
+			      2 * sizeof (short) + 2 * sizeof(struct fbcurpos));
+	ret |= copy_in_user(&p->size, &argp->size, sizeof(struct fbcurpos));
+	ret |= copy_in_user(&p->cmap, &argp->cmap, 2 * sizeof(int));
+	ret |= get_user(addr, &argp->cmap.red);
+	ret |= put_user(compat_ptr(addr), &p->cmap.red);
+	ret |= get_user(addr, &argp->cmap.green);
+	ret |= put_user(compat_ptr(addr), &p->cmap.green);
+	ret |= get_user(addr, &argp->cmap.blue);
+	ret |= put_user(compat_ptr(addr), &p->cmap.blue);
+	ret |= get_user(addr, &argp->mask);
+	ret |= put_user(compat_ptr(addr), &p->mask);
+	ret |= get_user(addr, &argp->image);
+	ret |= put_user(compat_ptr(addr), &p->image);
+	if (ret)
+		return -EFAULT;
+	return sys_ioctl (fd, FBIOSCURSOR, (unsigned long)p);
+}
+
+#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
+/* This really belongs in include/linux/drm.h -DaveM */
+#include "../../../drivers/char/drm/drm.h"
+
+typedef struct drm32_version {
+	int    version_major;	  /* Major version			    */
+	int    version_minor;	  /* Minor version			    */
+	int    version_patchlevel;/* Patch level			    */
+	int    name_len;	  /* Length of name buffer		    */
+	u32    name;		  /* Name of driver			    */
+	int    date_len;	  /* Length of date buffer		    */
+	u32    date;		  /* User-space buffer to hold date	    */
+	int    desc_len;	  /* Length of desc buffer		    */
+	u32    desc;		  /* User-space buffer to hold desc	    */
+} drm32_version_t;
+#define DRM32_IOCTL_VERSION    DRM_IOWR(0x00, drm32_version_t)
+
+static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	drm32_version_t __user *uversion = (drm32_version_t __user *)arg;
+	drm_version_t __user *p = compat_alloc_user_space(sizeof(*p));
+	compat_uptr_t addr;
+	int n;
+	int ret;
+
+	if (clear_user(p, 3 * sizeof(int)) ||
+	    get_user(n, &uversion->name_len) ||
+	    put_user(n, &p->name_len) ||
+	    get_user(addr, &uversion->name) ||
+	    put_user(compat_ptr(addr), &p->name) ||
+	    get_user(n, &uversion->date_len) ||
+	    put_user(n, &p->date_len) ||
+	    get_user(addr, &uversion->date) ||
+	    put_user(compat_ptr(addr), &p->date) ||
+	    get_user(n, &uversion->desc_len) ||
+	    put_user(n, &p->desc_len) ||
+	    get_user(addr, &uversion->desc) ||
+	    put_user(compat_ptr(addr), &p->desc))
+		return -EFAULT;
+
+        ret = sys_ioctl(fd, DRM_IOCTL_VERSION, (unsigned long)p);
+	if (ret)
+		return ret;
+
+	if (copy_in_user(uversion, p, 3 * sizeof(int)) ||
+	    get_user(n, &p->name_len) ||
+	    put_user(n, &uversion->name_len) ||
+	    get_user(n, &p->date_len) ||
+	    put_user(n, &uversion->date_len) ||
+	    get_user(n, &p->desc_len) ||
+	    put_user(n, &uversion->desc_len))
+		return -EFAULT;
+
+	return 0;
+}
+
+typedef struct drm32_unique {
+	int	unique_len;	  /* Length of unique			    */
+	u32	unique;		  /* Unique name for driver instantiation   */
+} drm32_unique_t;
+#define DRM32_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm32_unique_t)
+#define DRM32_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm32_unique_t)
+
+static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	drm32_unique_t __user *uarg = (drm32_unique_t __user *)arg;
+	drm_unique_t __user *p = compat_alloc_user_space(sizeof(*p));
+	compat_uptr_t addr;
+	int n;
+	int ret;
+
+	if (get_user(n, &uarg->unique_len) ||
+	    put_user(n, &p->unique_len) ||
+	    get_user(addr, &uarg->unique) ||
+	    put_user(compat_ptr(addr), &p->unique))
+		return -EFAULT;
+
+	if (cmd == DRM32_IOCTL_GET_UNIQUE)
+		ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)p);
+	else
+		ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)p);
+
+	if (ret)
+		return ret;
+
+	if (get_user(n, &p->unique_len) || put_user(n, &uarg->unique_len))
+		return -EFAULT;
+
+	return 0;
+}
+
+typedef struct drm32_map {
+	u32		offset;	 /* Requested physical address (0 for SAREA)*/
+	u32		size;	 /* Requested physical size (bytes)	    */
+	drm_map_type_t	type;	 /* Type of memory to map		    */
+	drm_map_flags_t flags;	 /* Flags				    */
+	u32		handle;  /* User-space: "Handle" to pass to mmap    */
+				 /* Kernel-space: kernel-virtual address    */
+	int		mtrr;	 /* MTRR slot used			    */
+				 /* Private data			    */
+} drm32_map_t;
+#define DRM32_IOCTL_ADD_MAP    DRM_IOWR(0x15, drm32_map_t)
+
+static int drm32_addmap(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	drm32_map_t __user *uarg = (drm32_map_t __user *) arg;
+	drm_map_t karg;
+	mm_segment_t old_fs;
+	u32 tmp;
+	int ret;
+
+	ret  = get_user(karg.offset, &uarg->offset);
+	ret |= get_user(karg.size, &uarg->size);
+	ret |= get_user(karg.type, &uarg->type);
+	ret |= get_user(karg.flags, &uarg->flags);
+	ret |= get_user(tmp, &uarg->handle);
+	ret |= get_user(karg.mtrr, &uarg->mtrr);
+	if (ret)
+		return -EFAULT;
+
+	karg.handle = (void *) (unsigned long) tmp;
+
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+	ret = sys_ioctl(fd, DRM_IOCTL_ADD_MAP, (unsigned long) &karg);
+	set_fs(old_fs);
+
+	if (!ret) {
+		ret  = put_user(karg.offset, &uarg->offset);
+		ret |= put_user(karg.size, &uarg->size);
+		ret |= put_user(karg.type, &uarg->type);
+		ret |= put_user(karg.flags, &uarg->flags);
+		tmp = (u32) (long)karg.handle;
+		ret |= put_user(tmp, &uarg->handle);
+		ret |= put_user(karg.mtrr, &uarg->mtrr);
+		if (ret)
+			ret = -EFAULT;
+	}
+
+	return ret;
+}
+
+typedef struct drm32_buf_info {
+	int	       count;	/* Entries in list			     */
+	u32	       list;    /* (drm_buf_desc_t *) */ 
+} drm32_buf_info_t;
+#define DRM32_IOCTL_INFO_BUFS  DRM_IOWR(0x18, drm32_buf_info_t)
+
+static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	drm32_buf_info_t __user *uarg = (drm32_buf_info_t __user *)arg;
+	drm_buf_info_t __user *p = compat_alloc_user_space(sizeof(*p));
+	compat_uptr_t addr;
+	int n;
+	int ret;
+
+	if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
+	    get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
+		return -EFAULT;
+
+	ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long)p);
+	if (ret)
+		return ret;
+
+	if (get_user(n, &p->count) || put_user(n, &uarg->count))
+		return -EFAULT;
+
+	return 0;
+}
+
+typedef struct drm32_buf_free {
+	int	       count;
+	u32	       list;	/* (int *) */
+} drm32_buf_free_t;
+#define DRM32_IOCTL_FREE_BUFS  DRM_IOW( 0x1a, drm32_buf_free_t)
+
+static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	drm32_buf_free_t __user *uarg = (drm32_buf_free_t __user *)arg;
+	drm_buf_free_t __user *p = compat_alloc_user_space(sizeof(*p));
+	compat_uptr_t addr;
+	int n;
+
+	if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
+	    get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
+		return -EFAULT;
+
+	return sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long)p);
+}
+
+typedef struct drm32_buf_pub {
+	int		  idx;	       /* Index into master buflist	     */
+	int		  total;       /* Buffer size			     */
+	int		  used;	       /* Amount of buffer in use (for DMA)  */
+	u32		  address;     /* Address of buffer (void *)	     */
+} drm32_buf_pub_t;
+
+typedef struct drm32_buf_map {
+	int	      count;	/* Length of buflist			    */
+	u32	      virtual;	/* Mmaped area in user-virtual (void *)	    */
+	u32 	      list;	/* Buffer information (drm_buf_pub_t *)	    */
+} drm32_buf_map_t;
+#define DRM32_IOCTL_MAP_BUFS   DRM_IOWR(0x19, drm32_buf_map_t)
+
+static int drm32_map_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	drm32_buf_map_t __user *uarg = (drm32_buf_map_t __user *)arg;
+	drm32_buf_pub_t __user *ulist;
+	drm_buf_map_t __user *arg64;
+	drm_buf_pub_t __user *list;
+	int orig_count, ret, i;
+	int n;
+	compat_uptr_t addr;
+
+	if (get_user(orig_count, &uarg->count))
+		return -EFAULT;
+
+	arg64 = compat_alloc_user_space(sizeof(drm_buf_map_t) +
+				(size_t)orig_count * sizeof(drm_buf_pub_t));
+	list = (void __user *)(arg64 + 1);
+
+	if (put_user(orig_count, &arg64->count) ||
+	    put_user(list, &arg64->list) ||
+	    get_user(addr, &uarg->virtual) ||
+	    put_user(compat_ptr(addr), &arg64->virtual) ||
+	    get_user(addr, &uarg->list))
+		return -EFAULT;
+
+	ulist = compat_ptr(addr);
+
+	for (i = 0; i < orig_count; i++) {
+		if (get_user(n, &ulist[i].idx) ||
+		    put_user(n, &list[i].idx) ||
+		    get_user(n, &ulist[i].total) ||
+		    put_user(n, &list[i].total) ||
+		    get_user(n, &ulist[i].used) ||
+		    put_user(n, &list[i].used) ||
+		    get_user(addr, &ulist[i].address) ||
+		    put_user(compat_ptr(addr), &list[i].address))
+			return -EFAULT;
+	}
+
+	ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) arg64);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < orig_count; i++) {
+		void __user *p;
+		if (get_user(n, &list[i].idx) ||
+		    put_user(n, &ulist[i].idx) ||
+		    get_user(n, &list[i].total) ||
+		    put_user(n, &ulist[i].total) ||
+		    get_user(n, &list[i].used) ||
+		    put_user(n, &ulist[i].used) ||
+		    get_user(p, &list[i].address) ||
+		    put_user((unsigned long)p, &ulist[i].address))
+			return -EFAULT;
+	}
+
+	if (get_user(n, &arg64->count) || put_user(n, &uarg->count))
+		return -EFAULT;
+
+	return 0;
+}
+
+typedef struct drm32_dma {
+				/* Indices here refer to the offset into
+				   buflist in drm_buf_get_t.  */
+	int		context;	  /* Context handle		    */
+	int		send_count;	  /* Number of buffers to send	    */
+	u32		send_indices;	  /* List of handles to buffers (int *) */
+	u32		send_sizes;	  /* Lengths of data to send (int *) */
+	drm_dma_flags_t flags;		  /* Flags			    */
+	int		request_count;	  /* Number of buffers requested    */
+	int		request_size;	  /* Desired size for buffers	    */
+	u32		request_indices;  /* Buffer information (int *)	    */
+	u32		request_sizes;    /* (int *) */
+	int		granted_count;	  /* Number of buffers granted	    */
+} drm32_dma_t;
+#define DRM32_IOCTL_DMA	     DRM_IOWR(0x29, drm32_dma_t)
+
+/* RED PEN	The DRM layer blindly dereferences the send/request
+ * 		index/size arrays even though they are userland
+ * 		pointers.  -DaveM
+ */
+static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	drm32_dma_t __user *uarg = (drm32_dma_t __user *) arg;
+	drm_dma_t __user *p = compat_alloc_user_space(sizeof(*p));
+	compat_uptr_t addr;
+	int ret;
+
+	if (copy_in_user(p, uarg, 2 * sizeof(int)) ||
+	    get_user(addr, &uarg->send_indices) ||
+	    put_user(compat_ptr(addr), &p->send_indices) ||
+	    get_user(addr, &uarg->send_sizes) ||
+	    put_user(compat_ptr(addr), &p->send_sizes) ||
+	    copy_in_user(&p->flags, &uarg->flags, sizeof(drm_dma_flags_t)) ||
+	    copy_in_user(&p->request_count, &uarg->request_count, sizeof(int))||
+	    copy_in_user(&p->request_size, &uarg->request_size, sizeof(int)) ||
+	    get_user(addr, &uarg->request_indices) ||
+	    put_user(compat_ptr(addr), &p->request_indices) ||
+	    get_user(addr, &uarg->request_sizes) ||
+	    put_user(compat_ptr(addr), &p->request_sizes) ||
+	    copy_in_user(&p->granted_count, &uarg->granted_count, sizeof(int)))
+		return -EFAULT;
+
+	ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long)p);
+	if (ret)
+		return ret;
+
+	if (copy_in_user(uarg, p, 2 * sizeof(int)) ||
+	    copy_in_user(&uarg->flags, &p->flags, sizeof(drm_dma_flags_t)) ||
+	    copy_in_user(&uarg->request_count, &p->request_count, sizeof(int))||
+	    copy_in_user(&uarg->request_size, &p->request_size, sizeof(int)) ||
+	    copy_in_user(&uarg->granted_count, &p->granted_count, sizeof(int)))
+		return -EFAULT;
+
+	return 0;
+}
+
+typedef struct drm32_ctx_res {
+	int		count;
+	u32		contexts; /* (drm_ctx_t *) */
+} drm32_ctx_res_t;
+#define DRM32_IOCTL_RES_CTX    DRM_IOWR(0x26, drm32_ctx_res_t)
+
+static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	drm32_ctx_res_t __user *uarg = (drm32_ctx_res_t __user *) arg;
+	drm_ctx_res_t __user *p = compat_alloc_user_space(sizeof(*p));
+	compat_uptr_t addr;
+	int ret;
+
+	if (copy_in_user(p, uarg, sizeof(int)) ||
+	    get_user(addr, &uarg->contexts) ||
+	    put_user(compat_ptr(addr), &p->contexts))
+		return -EFAULT;
+
+	ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long)p);
+	if (ret)
+		return ret;
+
+	if (copy_in_user(uarg, p, sizeof(int)))
+		return -EFAULT;
+
+	return 0;
+}
+
+#endif
+
+typedef int (* ioctl32_handler_t)(unsigned int, unsigned int, unsigned long, struct file *);
+
+#define COMPATIBLE_IOCTL(cmd)		HANDLE_IOCTL((cmd),sys_ioctl)
+#define HANDLE_IOCTL(cmd,handler)	{ (cmd), (ioctl32_handler_t)(handler), NULL },
+#define IOCTL_TABLE_START \
+	struct ioctl_trans ioctl_start[] = {
+#define IOCTL_TABLE_END \
+	};
+
+IOCTL_TABLE_START
+#include <linux/compat_ioctl.h>
+#define DECLARES
+#include "compat_ioctl.c"
+COMPATIBLE_IOCTL(TIOCSTART)
+COMPATIBLE_IOCTL(TIOCSTOP)
+COMPATIBLE_IOCTL(TIOCSLTC)
+COMPATIBLE_IOCTL(FBIOGTYPE)
+COMPATIBLE_IOCTL(FBIOSATTR)
+COMPATIBLE_IOCTL(FBIOGATTR)
+COMPATIBLE_IOCTL(FBIOSVIDEO)
+COMPATIBLE_IOCTL(FBIOGVIDEO)
+COMPATIBLE_IOCTL(FBIOGCURSOR32)  /* This is not implemented yet. Later it should be converted... */
+COMPATIBLE_IOCTL(FBIOSCURPOS)
+COMPATIBLE_IOCTL(FBIOGCURPOS)
+COMPATIBLE_IOCTL(FBIOGCURMAX)
+/* Little k */
+COMPATIBLE_IOCTL(KIOCTYPE)
+COMPATIBLE_IOCTL(KIOCLAYOUT)
+COMPATIBLE_IOCTL(KIOCGTRANS)
+COMPATIBLE_IOCTL(KIOCTRANS)
+COMPATIBLE_IOCTL(KIOCCMD)
+COMPATIBLE_IOCTL(KIOCSDIRECT)
+COMPATIBLE_IOCTL(KIOCSLED)
+COMPATIBLE_IOCTL(KIOCGLED)
+COMPATIBLE_IOCTL(KIOCSRATE)
+COMPATIBLE_IOCTL(KIOCGRATE)
+COMPATIBLE_IOCTL(VUIDSFORMAT)
+COMPATIBLE_IOCTL(VUIDGFORMAT)
+/* Little v, the video4linux ioctls */
+COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
+COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
+COMPATIBLE_IOCTL(ENVCTRL_RD_WARNING_TEMPERATURE)
+COMPATIBLE_IOCTL(ENVCTRL_RD_SHUTDOWN_TEMPERATURE)
+COMPATIBLE_IOCTL(ENVCTRL_RD_CPU_TEMPERATURE)
+COMPATIBLE_IOCTL(ENVCTRL_RD_FAN_STATUS)
+COMPATIBLE_IOCTL(ENVCTRL_RD_VOLTAGE_STATUS)
+COMPATIBLE_IOCTL(ENVCTRL_RD_SCSI_TEMPERATURE)
+COMPATIBLE_IOCTL(ENVCTRL_RD_ETHERNET_TEMPERATURE)
+COMPATIBLE_IOCTL(ENVCTRL_RD_MTHRBD_TEMPERATURE)
+COMPATIBLE_IOCTL(ENVCTRL_RD_CPU_VOLTAGE)
+COMPATIBLE_IOCTL(ENVCTRL_RD_GLOBALADDRESS)
+/* COMPATIBLE_IOCTL(D7SIOCRD) same value as ENVCTRL_RD_VOLTAGE_STATUS */
+COMPATIBLE_IOCTL(D7SIOCWR)
+COMPATIBLE_IOCTL(D7SIOCTM)
+/* OPENPROMIO, SunOS/Solaris only, the NetBSD one's have
+ * embedded pointers in the arg which we'd need to clean up...
+ */
+COMPATIBLE_IOCTL(OPROMGETOPT)
+COMPATIBLE_IOCTL(OPROMSETOPT)
+COMPATIBLE_IOCTL(OPROMNXTOPT)
+COMPATIBLE_IOCTL(OPROMSETOPT2)
+COMPATIBLE_IOCTL(OPROMNEXT)
+COMPATIBLE_IOCTL(OPROMCHILD)
+COMPATIBLE_IOCTL(OPROMGETPROP)
+COMPATIBLE_IOCTL(OPROMNXTPROP)
+COMPATIBLE_IOCTL(OPROMU2P)
+COMPATIBLE_IOCTL(OPROMGETCONS)
+COMPATIBLE_IOCTL(OPROMGETFBNAME)
+COMPATIBLE_IOCTL(OPROMGETBOOTARGS)
+COMPATIBLE_IOCTL(OPROMSETCUR)
+COMPATIBLE_IOCTL(OPROMPCI2NODE)
+COMPATIBLE_IOCTL(OPROMPATH2NODE)
+/* Big L */
+COMPATIBLE_IOCTL(LOOP_SET_STATUS64)
+COMPATIBLE_IOCTL(LOOP_GET_STATUS64)
+/* Big A */
+COMPATIBLE_IOCTL(AUDIO_GETINFO)
+COMPATIBLE_IOCTL(AUDIO_SETINFO)
+COMPATIBLE_IOCTL(AUDIO_DRAIN)
+COMPATIBLE_IOCTL(AUDIO_GETDEV)
+COMPATIBLE_IOCTL(AUDIO_GETDEV_SUNOS)
+COMPATIBLE_IOCTL(AUDIO_FLUSH)
+COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE_MULTI)
+#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
+COMPATIBLE_IOCTL(DRM_IOCTL_GET_MAGIC)
+COMPATIBLE_IOCTL(DRM_IOCTL_IRQ_BUSID)
+COMPATIBLE_IOCTL(DRM_IOCTL_AUTH_MAGIC)
+COMPATIBLE_IOCTL(DRM_IOCTL_BLOCK)
+COMPATIBLE_IOCTL(DRM_IOCTL_UNBLOCK)
+COMPATIBLE_IOCTL(DRM_IOCTL_CONTROL)
+COMPATIBLE_IOCTL(DRM_IOCTL_ADD_BUFS)
+COMPATIBLE_IOCTL(DRM_IOCTL_MARK_BUFS)
+COMPATIBLE_IOCTL(DRM_IOCTL_ADD_CTX)
+COMPATIBLE_IOCTL(DRM_IOCTL_RM_CTX)
+COMPATIBLE_IOCTL(DRM_IOCTL_MOD_CTX)
+COMPATIBLE_IOCTL(DRM_IOCTL_GET_CTX)
+COMPATIBLE_IOCTL(DRM_IOCTL_SWITCH_CTX)
+COMPATIBLE_IOCTL(DRM_IOCTL_NEW_CTX)
+COMPATIBLE_IOCTL(DRM_IOCTL_ADD_DRAW)
+COMPATIBLE_IOCTL(DRM_IOCTL_RM_DRAW)
+COMPATIBLE_IOCTL(DRM_IOCTL_LOCK)
+COMPATIBLE_IOCTL(DRM_IOCTL_UNLOCK)
+COMPATIBLE_IOCTL(DRM_IOCTL_FINISH)
+#endif /* DRM */
+COMPATIBLE_IOCTL(WIOCSTART)
+COMPATIBLE_IOCTL(WIOCSTOP)
+COMPATIBLE_IOCTL(WIOCGSTAT)
+/* And these ioctls need translation */
+/* Note SIOCRTMSG is no longer, so this is safe and * the user would have seen just an -EINVAL anyways. */
+HANDLE_IOCTL(FBIOPUTCMAP32, fbiogetputcmap)
+HANDLE_IOCTL(FBIOGETCMAP32, fbiogetputcmap)
+HANDLE_IOCTL(FBIOSCURSOR32, fbiogscursor)
+#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
+HANDLE_IOCTL(DRM32_IOCTL_VERSION, drm32_version)
+HANDLE_IOCTL(DRM32_IOCTL_GET_UNIQUE, drm32_getsetunique)
+HANDLE_IOCTL(DRM32_IOCTL_SET_UNIQUE, drm32_getsetunique)
+HANDLE_IOCTL(DRM32_IOCTL_ADD_MAP, drm32_addmap)
+HANDLE_IOCTL(DRM32_IOCTL_INFO_BUFS, drm32_info_bufs)
+HANDLE_IOCTL(DRM32_IOCTL_FREE_BUFS, drm32_free_bufs)
+HANDLE_IOCTL(DRM32_IOCTL_MAP_BUFS, drm32_map_bufs)
+HANDLE_IOCTL(DRM32_IOCTL_DMA, drm32_dma)
+HANDLE_IOCTL(DRM32_IOCTL_RES_CTX, drm32_res_ctx)
+#endif /* DRM */
+#if 0
+HANDLE_IOCTL(RTC32_IRQP_READ, do_rtc_ioctl)
+HANDLE_IOCTL(RTC32_IRQP_SET, do_rtc_ioctl)
+HANDLE_IOCTL(RTC32_EPOCH_READ, do_rtc_ioctl)
+HANDLE_IOCTL(RTC32_EPOCH_SET, do_rtc_ioctl)
+#endif
+/* take care of sizeof(sizeof()) breakage */
+IOCTL_TABLE_END
+
+int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/sparc64/kernel/iommu_common.c b/arch/sparc64/kernel/iommu_common.c
new file mode 100644
index 0000000..12c93a3
--- /dev/null
+++ b/arch/sparc64/kernel/iommu_common.c
@@ -0,0 +1,231 @@
+/* $Id: iommu_common.c,v 1.9 2001/12/17 07:05:09 davem Exp $
+ * iommu_common.c: UltraSparc SBUS/PCI common iommu code.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include "iommu_common.h"
+
+/* You are _strongly_ advised to enable the following debugging code
+ * any time you make changes to the sg code below, run it for a while
+ * with filesystems mounted read-only before buying the farm... -DaveM
+ */
+
+#ifdef VERIFY_SG
+static int verify_lengths(struct scatterlist *sg, int nents, int npages)
+{
+	int sg_len, dma_len;
+	int i, pgcount;
+
+	sg_len = 0;
+	for (i = 0; i < nents; i++)
+		sg_len += sg[i].length;
+
+	dma_len = 0;
+	for (i = 0; i < nents && sg[i].dma_length; i++)
+		dma_len += sg[i].dma_length;
+
+	if (sg_len != dma_len) {
+		printk("verify_lengths: Error, different, sg[%d] dma[%d]\n",
+		       sg_len, dma_len);
+		return -1;
+	}
+
+	pgcount = 0;
+	for (i = 0; i < nents && sg[i].dma_length; i++) {
+		unsigned long start, end;
+
+		start = sg[i].dma_address;
+		start = start & IO_PAGE_MASK;
+
+		end = sg[i].dma_address + sg[i].dma_length;
+		end = (end + (IO_PAGE_SIZE - 1)) & IO_PAGE_MASK;
+
+		pgcount += ((end - start) >> IO_PAGE_SHIFT);
+	}
+
+	if (pgcount != npages) {
+		printk("verify_lengths: Error, page count wrong, "
+		       "npages[%d] pgcount[%d]\n",
+		       npages, pgcount);
+		return -1;
+	}
+
+	/* This test passes... */
+	return 0;
+}
+
+static int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg, int nents, iopte_t **__iopte)
+{
+	struct scatterlist *sg = *__sg;
+	iopte_t *iopte = *__iopte;
+	u32 dlen = dma_sg->dma_length;
+	u32 daddr;
+	unsigned int sglen;
+	unsigned long sgaddr;
+
+	daddr = dma_sg->dma_address;
+	sglen = sg->length;
+	sgaddr = (unsigned long) (page_address(sg->page) + sg->offset);
+	while (dlen > 0) {
+		unsigned long paddr;
+
+		/* SG and DMA_SG must begin at the same sub-page boundary. */
+		if ((sgaddr & ~IO_PAGE_MASK) != (daddr & ~IO_PAGE_MASK)) {
+			printk("verify_one_map: Wrong start offset "
+			       "sg[%08lx] dma[%08x]\n",
+			       sgaddr, daddr);
+			nents = -1;
+			goto out;
+		}
+
+		/* Verify the IOPTE points to the right page. */
+		paddr = iopte_val(*iopte) & IOPTE_PAGE;
+		if ((paddr + PAGE_OFFSET) != (sgaddr & IO_PAGE_MASK)) {
+			printk("verify_one_map: IOPTE[%08lx] maps the "
+			       "wrong page, should be [%08lx]\n",
+			       iopte_val(*iopte), (sgaddr & IO_PAGE_MASK) - PAGE_OFFSET);
+			nents = -1;
+			goto out;
+		}
+
+		/* If this SG crosses a page, adjust to that next page
+		 * boundary and loop.
+		 */
+		if ((sgaddr & IO_PAGE_MASK) ^ ((sgaddr + sglen - 1) & IO_PAGE_MASK)) {
+			unsigned long next_page, diff;
+
+			next_page = (sgaddr + IO_PAGE_SIZE) & IO_PAGE_MASK;
+			diff = next_page - sgaddr;
+			sgaddr += diff;
+			daddr += diff;
+			sglen -= diff;
+			dlen -= diff;
+			if (dlen > 0)
+				iopte++;
+			continue;
+		}
+
+		/* SG wholly consumed within this page. */
+		daddr += sglen;
+		dlen -= sglen;
+
+		if (dlen > 0 && ((daddr & ~IO_PAGE_MASK) == 0))
+			iopte++;
+
+		sg++;
+		if (--nents <= 0)
+			break;
+		sgaddr = (unsigned long) (page_address(sg->page) + sg->offset);
+		sglen = sg->length;
+	}
+	if (dlen < 0) {
+		/* Transfer overrun, big problems. */
+		printk("verify_one_map: Transfer overrun by %d bytes.\n",
+		       -dlen);
+		nents = -1;
+	} else {
+		/* Advance to next dma_sg implies that the next iopte will
+		 * begin it.
+		 */
+		iopte++;
+	}
+
+out:
+	*__sg = sg;
+	*__iopte = iopte;
+	return nents;
+}
+
+static int verify_maps(struct scatterlist *sg, int nents, iopte_t *iopte)
+{
+	struct scatterlist *dma_sg = sg;
+	struct scatterlist *orig_dma_sg = dma_sg;
+	int orig_nents = nents;
+
+	for (;;) {
+		nents = verify_one_map(dma_sg, &sg, nents, &iopte);
+		if (nents <= 0)
+			break;
+		dma_sg++;
+		if (dma_sg->dma_length == 0)
+			break;
+	}
+
+	if (nents > 0) {
+		printk("verify_maps: dma maps consumed by some sgs remain (%d)\n",
+		       nents);
+		return -1;
+	}
+
+	if (nents < 0) {
+		printk("verify_maps: Error, messed up mappings, "
+		       "at sg %d dma_sg %d\n",
+		       (int) (orig_nents + nents), (int) (dma_sg - orig_dma_sg));
+		return -1;
+	}
+
+	/* This test passes... */
+	return 0;
+}
+
+void verify_sglist(struct scatterlist *sg, int nents, iopte_t *iopte, int npages)
+{
+	if (verify_lengths(sg, nents, npages) < 0 ||
+	    verify_maps(sg, nents, iopte) < 0) {
+		int i;
+
+		printk("verify_sglist: Crap, messed up mappings, dumping, iodma at ");
+		printk("%016lx.\n", sg->dma_address & IO_PAGE_MASK);
+
+		for (i = 0; i < nents; i++) {
+			printk("sg(%d): page_addr(%p) off(%x) length(%x) "
+			       "dma_address[%016lx] dma_length[%016lx]\n",
+			       i,
+			       page_address(sg[i].page), sg[i].offset,
+			       sg[i].length,
+			       sg[i].dma_address, sg[i].dma_length);
+		}
+	}
+
+	/* Seems to be ok */
+}
+#endif
+
+unsigned long prepare_sg(struct scatterlist *sg, int nents)
+{
+	struct scatterlist *dma_sg = sg;
+	unsigned long prev;
+	u32 dent_addr, dent_len;
+
+	prev  = (unsigned long) (page_address(sg->page) + sg->offset);
+	prev += (unsigned long) (dent_len = sg->length);
+	dent_addr = (u32) ((unsigned long)(page_address(sg->page) + sg->offset)
+			   & (IO_PAGE_SIZE - 1UL));
+	while (--nents) {
+		unsigned long addr;
+
+		sg++;
+		addr = (unsigned long) (page_address(sg->page) + sg->offset);
+		if (! VCONTIG(prev, addr)) {
+			dma_sg->dma_address = dent_addr;
+			dma_sg->dma_length = dent_len;
+			dma_sg++;
+
+			dent_addr = ((dent_addr +
+				      dent_len +
+				      (IO_PAGE_SIZE - 1UL)) >> IO_PAGE_SHIFT);
+			dent_addr <<= IO_PAGE_SHIFT;
+			dent_addr += addr & (IO_PAGE_SIZE - 1UL);
+			dent_len = 0;
+		}
+		dent_len += sg->length;
+		prev = addr + sg->length;
+	}
+	dma_sg->dma_address = dent_addr;
+	dma_sg->dma_length = dent_len;
+
+	return ((unsigned long) dent_addr +
+		(unsigned long) dent_len +
+		(IO_PAGE_SIZE - 1UL)) >> IO_PAGE_SHIFT;
+}
diff --git a/arch/sparc64/kernel/iommu_common.h b/arch/sparc64/kernel/iommu_common.h
new file mode 100644
index 0000000..ad79101
--- /dev/null
+++ b/arch/sparc64/kernel/iommu_common.h
@@ -0,0 +1,48 @@
+/* $Id: iommu_common.h,v 1.5 2001/12/11 09:41:01 davem Exp $
+ * iommu_common.h: UltraSparc SBUS/PCI common iommu declarations.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/iommu.h>
+#include <asm/scatterlist.h>
+
+/*
+ * These give mapping size of each iommu pte/tlb.
+ */
+#define IO_PAGE_SHIFT			13
+#define IO_PAGE_SIZE			(1UL << IO_PAGE_SHIFT)
+#define IO_PAGE_MASK			(~(IO_PAGE_SIZE-1))
+#define IO_PAGE_ALIGN(addr)		(((addr)+IO_PAGE_SIZE-1)&IO_PAGE_MASK)
+
+#define IO_TSB_ENTRIES			(128*1024)
+#define IO_TSB_SIZE			(IO_TSB_ENTRIES * 8)
+
+/*
+ * This is the hardwired shift in the iotlb tag/data parts.
+ */
+#define IOMMU_PAGE_SHIFT		13
+
+/* You are _strongly_ advised to enable the following debugging code
+ * any time you make changes to the sg code below, run it for a while
+ * with filesystems mounted read-only before buying the farm... -DaveM
+ */
+#undef VERIFY_SG
+
+#ifdef VERIFY_SG
+extern void verify_sglist(struct scatterlist *sg, int nents, iopte_t *iopte, int npages);
+#endif
+
+/* Two addresses are "virtually contiguous" if and only if:
+ * 1) They are equal, or...
+ * 2) They are both on a page boundary
+ */
+#define VCONTIG(__X, __Y)	(((__X) == (__Y)) || \
+				 (((__X) | (__Y)) << (64UL - PAGE_SHIFT)) == 0UL)
+
+extern unsigned long prepare_sg(struct scatterlist *sg, int nents);
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
new file mode 100644
index 0000000..a38cb50
--- /dev/null
+++ b/arch/sparc64/kernel/irq.c
@@ -0,0 +1,1269 @@
+/* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
+ * irq.c: UltraSparc IRQ handling/init/registry.
+ *
+ * Copyright (C) 1997  David S. Miller  (davem@caip.rutgers.edu)
+ * Copyright (C) 1998  Eddie C. Dost    (ecd@skynet.be)
+ * Copyright (C) 1998  Jakub Jelinek    (jj@ultra.linux.cz)
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/sbus.h>
+#include <asm/iommu.h>
+#include <asm/upa.h>
+#include <asm/oplib.h>
+#include <asm/timer.h>
+#include <asm/smp.h>
+#include <asm/starfire.h>
+#include <asm/uaccess.h>
+#include <asm/cache.h>
+#include <asm/cpudata.h>
+
+#ifdef CONFIG_SMP
+static void distribute_irqs(void);
+#endif
+
+/* UPA nodes send interrupt packet to UltraSparc with first data reg
+ * value low 5 (7 on Starfire) bits holding the IRQ identifier being
+ * delivered.  We must translate this into a non-vector IRQ so we can
+ * set the softint on this cpu.
+ *
+ * To make processing these packets efficient and race free we use
+ * an array of irq buckets below.  The interrupt vector handler in
+ * entry.S feeds incoming packets into per-cpu pil-indexed lists.
+ * The IVEC handler does not need to act atomically, the PIL dispatch
+ * code uses CAS to get an atomic snapshot of the list and clear it
+ * at the same time.
+ */
+
+struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
+
+/* This has to be in the main kernel image, it cannot be
+ * turned into per-cpu data.  The reason is that the main
+ * kernel image is locked into the TLB and this structure
+ * is accessed from the vectored interrupt trap handler.  If
+ * access to this structure takes a TLB miss it could cause
+ * the 5-level sparc v9 trap stack to overflow.
+ */
+struct irq_work_struct {
+	unsigned int	irq_worklists[16];
+};
+struct irq_work_struct __irq_work[NR_CPUS];
+#define irq_work(__cpu, __pil)	&(__irq_work[(__cpu)].irq_worklists[(__pil)])
+
+#ifdef CONFIG_PCI
+/* This is a table of physical addresses used to deal with IBF_DMA_SYNC.
+ * It is used for PCI only to synchronize DMA transfers with IRQ delivery
+ * for devices behind busses other than APB on Sabre systems.
+ *
+ * Currently these physical addresses are just config space accesses
+ * to the command register for that device.
+ */
+unsigned long pci_dma_wsync;
+unsigned long dma_sync_reg_table[256];
+unsigned char dma_sync_reg_table_entry = 0;
+#endif
+
+/* This is based upon code in the 32-bit Sparc kernel written mostly by
+ * David Redman (djhr@tadpole.co.uk).
+ */
+#define MAX_STATIC_ALLOC	4
+static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
+static int static_irq_count;
+
+/* This is exported so that fast IRQ handlers can get at it... -DaveM */
+struct irqaction *irq_action[NR_IRQS+1] = {
+	  NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
+	  NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
+};
+
+/* This only synchronizes entities which modify IRQ handler
+ * state and some selected user-level spots that want to
+ * read things in the table.  IRQ handler processing orders
+ * its' accesses such that no locking is needed.
+ */
+static DEFINE_SPINLOCK(irq_action_lock);
+
+static void register_irq_proc (unsigned int irq);
+
+/*
+ * Upper 2b of irqaction->flags holds the ino.
+ * irqaction->mask holds the smp affinity information.
+ */
+#define put_ino_in_irqaction(action, irq) \
+	action->flags &= 0xffffffffffffUL; \
+	if (__bucket(irq) == &pil0_dummy_bucket) \
+		action->flags |= 0xdeadUL << 48;  \
+	else \
+		action->flags |= __irq_ino(irq) << 48;
+#define get_ino_in_irqaction(action)	(action->flags >> 48)
+
+#define put_smpaff_in_irqaction(action, smpaff)	(action)->mask = (smpaff)
+#define get_smpaff_in_irqaction(action) 	((action)->mask)
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+	unsigned long flags;
+	int i = *(loff_t *) v;
+	struct irqaction *action;
+#ifdef CONFIG_SMP
+	int j;
+#endif
+
+	spin_lock_irqsave(&irq_action_lock, flags);
+	if (i <= NR_IRQS) {
+		if (!(action = *(i + irq_action)))
+			goto out_unlock;
+		seq_printf(p, "%3d: ", i);
+#ifndef CONFIG_SMP
+		seq_printf(p, "%10u ", kstat_irqs(i));
+#else
+		for (j = 0; j < NR_CPUS; j++) {
+			if (!cpu_online(j))
+				continue;
+			seq_printf(p, "%10u ",
+				   kstat_cpu(j).irqs[i]);
+		}
+#endif
+		seq_printf(p, " %s:%lx", action->name,
+			   get_ino_in_irqaction(action));
+		for (action = action->next; action; action = action->next) {
+			seq_printf(p, ", %s:%lx", action->name,
+				   get_ino_in_irqaction(action));
+		}
+		seq_putc(p, '\n');
+	}
+out_unlock:
+	spin_unlock_irqrestore(&irq_action_lock, flags);
+
+	return 0;
+}
+
+/* Now these are always passed a true fully specified sun4u INO. */
+void enable_irq(unsigned int irq)
+{
+	struct ino_bucket *bucket = __bucket(irq);
+	unsigned long imap;
+	unsigned long tid;
+
+	imap = bucket->imap;
+	if (imap == 0UL)
+		return;
+
+	preempt_disable();
+
+	if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+		unsigned long ver;
+
+		__asm__ ("rdpr %%ver, %0" : "=r" (ver));
+		if ((ver >> 32) == 0x003e0016) {
+			/* We set it to our JBUS ID. */
+			__asm__ __volatile__("ldxa [%%g0] %1, %0"
+					     : "=r" (tid)
+					     : "i" (ASI_JBUS_CONFIG));
+			tid = ((tid & (0x1fUL<<17)) << 9);
+			tid &= IMAP_TID_JBUS;
+		} else {
+			/* We set it to our Safari AID. */
+			__asm__ __volatile__("ldxa [%%g0] %1, %0"
+					     : "=r" (tid)
+					     : "i" (ASI_SAFARI_CONFIG));
+			tid = ((tid & (0x3ffUL<<17)) << 9);
+			tid &= IMAP_AID_SAFARI;
+		}
+	} else if (this_is_starfire == 0) {
+		/* We set it to our UPA MID. */
+		__asm__ __volatile__("ldxa [%%g0] %1, %0"
+				     : "=r" (tid)
+				     : "i" (ASI_UPA_CONFIG));
+		tid = ((tid & UPA_CONFIG_MID) << 9);
+		tid &= IMAP_TID_UPA;
+	} else {
+		tid = (starfire_translate(imap, smp_processor_id()) << 26);
+		tid &= IMAP_TID_UPA;
+	}
+
+	/* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
+	 * of this SYSIO's preconfigured IGN in the SYSIO Control
+	 * Register, the hardware just mirrors that value here.
+	 * However for Graphics and UPA Slave devices the full
+	 * IMAP_INR field can be set by the programmer here.
+	 *
+	 * Things like FFB can now be handled via the new IRQ mechanism.
+	 */
+	upa_writel(tid | IMAP_VALID, imap);
+
+	preempt_enable();
+}
+
+/* This now gets passed true ino's as well. */
+void disable_irq(unsigned int irq)
+{
+	struct ino_bucket *bucket = __bucket(irq);
+	unsigned long imap;
+
+	imap = bucket->imap;
+	if (imap != 0UL) {
+		u32 tmp;
+
+		/* NOTE: We do not want to futz with the IRQ clear registers
+		 *       and move the state to IDLE, the SCSI code does call
+		 *       disable_irq() to assure atomicity in the queue cmd
+		 *       SCSI adapter driver code.  Thus we'd lose interrupts.
+		 */
+		tmp = upa_readl(imap);
+		tmp &= ~IMAP_VALID;
+		upa_writel(tmp, imap);
+	}
+}
+
+/* The timer is the one "weird" interrupt which is generated by
+ * the CPU %tick register and not by some normal vectored interrupt
+ * source.  To handle this special case, we use this dummy INO bucket.
+ */
+static struct ino_bucket pil0_dummy_bucket = {
+	0,	/* irq_chain */
+	0,	/* pil */
+	0,	/* pending */
+	0,	/* flags */
+	0,	/* __unused */
+	NULL,	/* irq_info */
+	0UL,	/* iclr */
+	0UL,	/* imap */
+};
+
+unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap)
+{
+	struct ino_bucket *bucket;
+	int ino;
+
+	if (pil == 0) {
+		if (iclr != 0UL || imap != 0UL) {
+			prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
+				    iclr, imap);
+			prom_halt();
+		}
+		return __irq(&pil0_dummy_bucket);
+	}
+
+	/* RULE: Both must be specified in all other cases. */
+	if (iclr == 0UL || imap == 0UL) {
+		prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
+			    pil, inofixup, iclr, imap);
+		prom_halt();
+	}
+	
+	ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
+	if (ino > NUM_IVECS) {
+		prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
+			    ino, pil, inofixup, iclr, imap);
+		prom_halt();
+	}
+
+	/* Ok, looks good, set it up.  Don't touch the irq_chain or
+	 * the pending flag.
+	 */
+	bucket = &ivector_table[ino];
+	if ((bucket->flags & IBF_ACTIVE) ||
+	    (bucket->irq_info != NULL)) {
+		/* This is a gross fatal error if it happens here. */
+		prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n");
+		prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n",
+			    ino, pil, inofixup, iclr, imap);
+		prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n",
+			    bucket->pil, bucket->iclr, bucket->imap);
+		prom_printf("IRQ: Cannot continue, halting...\n");
+		prom_halt();
+	}
+	bucket->imap  = imap;
+	bucket->iclr  = iclr;
+	bucket->pil   = pil;
+	bucket->flags = 0;
+
+	bucket->irq_info = NULL;
+
+	return __irq(bucket);
+}
+
+static void atomic_bucket_insert(struct ino_bucket *bucket)
+{
+	unsigned long pstate;
+	unsigned int *ent;
+
+	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
+	__asm__ __volatile__("wrpr %0, %1, %%pstate"
+			     : : "r" (pstate), "i" (PSTATE_IE));
+	ent = irq_work(smp_processor_id(), bucket->pil);
+	bucket->irq_chain = *ent;
+	*ent = __irq(bucket);
+	__asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
+}
+
+int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
+		unsigned long irqflags, const char *name, void *dev_id)
+{
+	struct irqaction *action, *tmp = NULL;
+	struct ino_bucket *bucket = __bucket(irq);
+	unsigned long flags;
+	int pending = 0;
+
+	if ((bucket != &pil0_dummy_bucket) &&
+	    (bucket < &ivector_table[0] ||
+	     bucket >= &ivector_table[NUM_IVECS])) {
+		unsigned int *caller;
+
+		__asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
+		printk(KERN_CRIT "request_irq: Old style IRQ registry attempt "
+		       "from %p, irq %08x.\n", caller, irq);
+		return -EINVAL;
+	}	
+	if (!handler)
+	    return -EINVAL;
+
+	if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) {
+		/*
+	 	 * This function might sleep, we want to call it first,
+	 	 * outside of the atomic block. In SA_STATIC_ALLOC case,
+		 * random driver's kmalloc will fail, but it is safe.
+		 * If already initialized, random driver will not reinit.
+	 	 * Yes, this might clear the entropy pool if the wrong
+	 	 * driver is attempted to be loaded, without actually
+	 	 * installing a new handler, but is this really a problem,
+	 	 * only the sysadmin is able to do this.
+	 	 */
+		rand_initialize_irq(irq);
+	}
+
+	spin_lock_irqsave(&irq_action_lock, flags);
+
+	action = *(bucket->pil + irq_action);
+	if (action) {
+		if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ))
+			for (tmp = action; tmp->next; tmp = tmp->next)
+				;
+		else {
+			spin_unlock_irqrestore(&irq_action_lock, flags);
+			return -EBUSY;
+		}
+		action = NULL;		/* Or else! */
+	}
+
+	/* If this is flagged as statically allocated then we use our
+	 * private struct which is never freed.
+	 */
+	if (irqflags & SA_STATIC_ALLOC) {
+	    if (static_irq_count < MAX_STATIC_ALLOC)
+		action = &static_irqaction[static_irq_count++];
+	    else
+		printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
+		       "using kmalloc\n", irq, name);
+	}	
+	if (action == NULL)
+	    action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
+						 GFP_ATOMIC);
+	
+	if (!action) { 
+		spin_unlock_irqrestore(&irq_action_lock, flags);
+		return -ENOMEM;
+	}
+
+	if (bucket == &pil0_dummy_bucket) {
+		bucket->irq_info = action;
+		bucket->flags |= IBF_ACTIVE;
+	} else {
+		if ((bucket->flags & IBF_ACTIVE) != 0) {
+			void *orig = bucket->irq_info;
+			void **vector = NULL;
+
+			if ((bucket->flags & IBF_PCI) == 0) {
+				printk("IRQ: Trying to share non-PCI bucket.\n");
+				goto free_and_ebusy;
+			}
+			if ((bucket->flags & IBF_MULTI) == 0) {
+				vector = kmalloc(sizeof(void *) * 4, GFP_ATOMIC);
+				if (vector == NULL)
+					goto free_and_enomem;
+
+				/* We might have slept. */
+				if ((bucket->flags & IBF_MULTI) != 0) {
+					int ent;
+
+					kfree(vector);
+					vector = (void **)bucket->irq_info;
+					for(ent = 0; ent < 4; ent++) {
+						if (vector[ent] == NULL) {
+							vector[ent] = action;
+							break;
+						}
+					}
+					if (ent == 4)
+						goto free_and_ebusy;
+				} else {
+					vector[0] = orig;
+					vector[1] = action;
+					vector[2] = NULL;
+					vector[3] = NULL;
+					bucket->irq_info = vector;
+					bucket->flags |= IBF_MULTI;
+				}
+			} else {
+				int ent;
+
+				vector = (void **)orig;
+				for (ent = 0; ent < 4; ent++) {
+					if (vector[ent] == NULL) {
+						vector[ent] = action;
+						break;
+					}
+				}
+				if (ent == 4)
+					goto free_and_ebusy;
+			}
+		} else {
+			bucket->irq_info = action;
+			bucket->flags |= IBF_ACTIVE;
+		}
+		pending = bucket->pending;
+		if (pending)
+			bucket->pending = 0;
+	}
+
+	action->handler = handler;
+	action->flags = irqflags;
+	action->name = name;
+	action->next = NULL;
+	action->dev_id = dev_id;
+	put_ino_in_irqaction(action, irq);
+	put_smpaff_in_irqaction(action, CPU_MASK_NONE);
+
+	if (tmp)
+		tmp->next = action;
+	else
+		*(bucket->pil + irq_action) = action;
+
+	enable_irq(irq);
+
+	/* We ate the IVEC already, this makes sure it does not get lost. */
+	if (pending) {
+		atomic_bucket_insert(bucket);
+		set_softint(1 << bucket->pil);
+	}
+	spin_unlock_irqrestore(&irq_action_lock, flags);
+	if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC)))
+		register_irq_proc(__irq_ino(irq));
+
+#ifdef CONFIG_SMP
+	distribute_irqs();
+#endif
+	return 0;
+
+free_and_ebusy:
+	kfree(action);
+	spin_unlock_irqrestore(&irq_action_lock, flags);
+	return -EBUSY;
+
+free_and_enomem:
+	kfree(action);
+	spin_unlock_irqrestore(&irq_action_lock, flags);
+	return -ENOMEM;
+}
+
+EXPORT_SYMBOL(request_irq);
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+	struct irqaction *action;
+	struct irqaction *tmp = NULL;
+	unsigned long flags;
+	struct ino_bucket *bucket = __bucket(irq), *bp;
+
+	if ((bucket != &pil0_dummy_bucket) &&
+	    (bucket < &ivector_table[0] ||
+	     bucket >= &ivector_table[NUM_IVECS])) {
+		unsigned int *caller;
+
+		__asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
+		printk(KERN_CRIT "free_irq: Old style IRQ removal attempt "
+		       "from %p, irq %08x.\n", caller, irq);
+		return;
+	}
+	
+	spin_lock_irqsave(&irq_action_lock, flags);
+
+	action = *(bucket->pil + irq_action);
+	if (!action->handler) {
+		printk("Freeing free IRQ %d\n", bucket->pil);
+		return;
+	}
+	if (dev_id) {
+		for ( ; action; action = action->next) {
+			if (action->dev_id == dev_id)
+				break;
+			tmp = action;
+		}
+		if (!action) {
+			printk("Trying to free free shared IRQ %d\n", bucket->pil);
+			spin_unlock_irqrestore(&irq_action_lock, flags);
+			return;
+		}
+	} else if (action->flags & SA_SHIRQ) {
+		printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil);
+		spin_unlock_irqrestore(&irq_action_lock, flags);
+		return;
+	}
+
+	if (action->flags & SA_STATIC_ALLOC) {
+		printk("Attempt to free statically allocated IRQ %d (%s)\n",
+		       bucket->pil, action->name);
+		spin_unlock_irqrestore(&irq_action_lock, flags);
+		return;
+	}
+
+	if (action && tmp)
+		tmp->next = action->next;
+	else
+		*(bucket->pil + irq_action) = action->next;
+
+	spin_unlock_irqrestore(&irq_action_lock, flags);
+
+	synchronize_irq(irq);
+
+	spin_lock_irqsave(&irq_action_lock, flags);
+
+	if (bucket != &pil0_dummy_bucket) {
+		unsigned long imap = bucket->imap;
+		void **vector, *orig;
+		int ent;
+
+		orig = bucket->irq_info;
+		vector = (void **)orig;
+
+		if ((bucket->flags & IBF_MULTI) != 0) {
+			int other = 0;
+			void *orphan = NULL;
+			for (ent = 0; ent < 4; ent++) {
+				if (vector[ent] == action)
+					vector[ent] = NULL;
+				else if (vector[ent] != NULL) {
+					orphan = vector[ent];
+					other++;
+				}
+			}
+
+			/* Only free when no other shared irq
+			 * uses this bucket.
+			 */
+			if (other) {
+				if (other == 1) {
+					/* Convert back to non-shared bucket. */
+					bucket->irq_info = orphan;
+					bucket->flags &= ~(IBF_MULTI);
+					kfree(vector);
+				}
+				goto out;
+			}
+		} else {
+			bucket->irq_info = NULL;
+		}
+
+		/* This unique interrupt source is now inactive. */
+		bucket->flags &= ~IBF_ACTIVE;
+
+		/* See if any other buckets share this bucket's IMAP
+		 * and are still active.
+		 */
+		for (ent = 0; ent < NUM_IVECS; ent++) {
+			bp = &ivector_table[ent];
+			if (bp != bucket	&&
+			    bp->imap == imap	&&
+			    (bp->flags & IBF_ACTIVE) != 0)
+				break;
+		}
+
+		/* Only disable when no other sub-irq levels of
+		 * the same IMAP are active.
+		 */
+		if (ent == NUM_IVECS)
+			disable_irq(irq);
+	}
+
+out:
+	kfree(action);
+	spin_unlock_irqrestore(&irq_action_lock, flags);
+}
+
+EXPORT_SYMBOL(free_irq);
+
+#ifdef CONFIG_SMP
+void synchronize_irq(unsigned int irq)
+{
+	struct ino_bucket *bucket = __bucket(irq);
+
+#if 0
+	/* The following is how I wish I could implement this.
+	 * Unfortunately the ICLR registers are read-only, you can
+	 * only write ICLR_foo values to them.  To get the current
+	 * IRQ status you would need to get at the IRQ diag registers
+	 * in the PCI/SBUS controller and the layout of those vary
+	 * from one controller to the next, sigh... -DaveM
+	 */
+	unsigned long iclr = bucket->iclr;
+
+	while (1) {
+		u32 tmp = upa_readl(iclr);
+		
+		if (tmp == ICLR_TRANSMIT ||
+		    tmp == ICLR_PENDING) {
+			cpu_relax();
+			continue;
+		}
+		break;
+	}
+#else
+	/* So we have to do this with a INPROGRESS bit just like x86.  */
+	while (bucket->flags & IBF_INPROGRESS)
+		cpu_relax();
+#endif
+}
+#endif /* CONFIG_SMP */
+
+void catch_disabled_ivec(struct pt_regs *regs)
+{
+	int cpu = smp_processor_id();
+	struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0));
+
+	/* We can actually see this on Ultra/PCI PCI cards, which are bridges
+	 * to other devices.  Here a single IMAP enabled potentially multiple
+	 * unique interrupt sources (which each do have a unique ICLR register.
+	 *
+	 * So what we do is just register that the IVEC arrived, when registered
+	 * for real the request_irq() code will check the bit and signal
+	 * a local CPU interrupt for it.
+	 */
+#if 0
+	printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
+	       bucket - &ivector_table[0], regs->tpc);
+#endif
+	*irq_work(cpu, 0) = 0;
+	bucket->pending = 1;
+}
+
+/* Tune this... */
+#define FORWARD_VOLUME		12
+
+#ifdef CONFIG_SMP
+
+static inline void redirect_intr(int cpu, struct ino_bucket *bp)
+{
+	/* Ok, here is what is going on:
+	 * 1) Retargeting IRQs on Starfire is very
+	 *    expensive so just forget about it on them.
+	 * 2) Moving around very high priority interrupts
+	 *    is a losing game.
+	 * 3) If the current cpu is idle, interrupts are
+	 *    useful work, so keep them here.  But do not
+	 *    pass to our neighbour if he is not very idle.
+	 * 4) If sysadmin explicitly asks for directed intrs,
+	 *    Just Do It.
+	 */
+	struct irqaction *ap = bp->irq_info;
+	cpumask_t cpu_mask;
+	unsigned int buddy, ticks;
+
+	cpu_mask = get_smpaff_in_irqaction(ap);
+	cpus_and(cpu_mask, cpu_mask, cpu_online_map);
+	if (cpus_empty(cpu_mask))
+		cpu_mask = cpu_online_map;
+
+	if (this_is_starfire != 0 ||
+	    bp->pil >= 10 || current->pid == 0)
+		goto out;
+
+	/* 'cpu' is the MID (ie. UPAID), calculate the MID
+	 * of our buddy.
+	 */
+	buddy = cpu + 1;
+	if (buddy >= NR_CPUS)
+		buddy = 0;
+
+	ticks = 0;
+	while (!cpu_isset(buddy, cpu_mask)) {
+		if (++buddy >= NR_CPUS)
+			buddy = 0;
+		if (++ticks > NR_CPUS) {
+			put_smpaff_in_irqaction(ap, CPU_MASK_NONE);
+			goto out;
+		}
+	}
+
+	if (buddy == cpu)
+		goto out;
+
+	/* Voo-doo programming. */
+	if (cpu_data(buddy).idle_volume < FORWARD_VOLUME)
+		goto out;
+
+	/* This just so happens to be correct on Cheetah
+	 * at the moment.
+	 */
+	buddy <<= 26;
+
+	/* Push it to our buddy. */
+	upa_writel(buddy | IMAP_VALID, bp->imap);
+
+out:
+	return;
+}
+
+#endif
+
+void handler_irq(int irq, struct pt_regs *regs)
+{
+	struct ino_bucket *bp, *nbp;
+	int cpu = smp_processor_id();
+
+#ifndef CONFIG_SMP
+	/*
+	 * Check for TICK_INT on level 14 softint.
+	 */
+	{
+		unsigned long clr_mask = 1 << irq;
+		unsigned long tick_mask = tick_ops->softint_mask;
+
+		if ((irq == 14) && (get_softint() & tick_mask)) {
+			irq = 0;
+			clr_mask = tick_mask;
+		}
+		clear_softint(clr_mask);
+	}
+#else
+	int should_forward = 1;
+
+	clear_softint(1 << irq);
+#endif
+
+	irq_enter();
+	kstat_this_cpu.irqs[irq]++;
+
+	/* Sliiiick... */
+#ifndef CONFIG_SMP
+	bp = ((irq != 0) ?
+	      __bucket(xchg32(irq_work(cpu, irq), 0)) :
+	      &pil0_dummy_bucket);
+#else
+	bp = __bucket(xchg32(irq_work(cpu, irq), 0));
+#endif
+	for ( ; bp != NULL; bp = nbp) {
+		unsigned char flags = bp->flags;
+		unsigned char random = 0;
+
+		nbp = __bucket(bp->irq_chain);
+		bp->irq_chain = 0;
+
+		bp->flags |= IBF_INPROGRESS;
+
+		if ((flags & IBF_ACTIVE) != 0) {
+#ifdef CONFIG_PCI
+			if ((flags & IBF_DMA_SYNC) != 0) {
+				upa_readl(dma_sync_reg_table[bp->synctab_ent]);
+				upa_readq(pci_dma_wsync);
+			}
+#endif
+			if ((flags & IBF_MULTI) == 0) {
+				struct irqaction *ap = bp->irq_info;
+				int ret;
+
+				ret = ap->handler(__irq(bp), ap->dev_id, regs);
+				if (ret == IRQ_HANDLED)
+					random |= ap->flags;
+			} else {
+				void **vector = (void **)bp->irq_info;
+				int ent;
+				for (ent = 0; ent < 4; ent++) {
+					struct irqaction *ap = vector[ent];
+					if (ap != NULL) {
+						int ret;
+
+						ret = ap->handler(__irq(bp),
+								  ap->dev_id,
+								  regs);
+						if (ret == IRQ_HANDLED)
+							random |= ap->flags;
+					}
+				}
+			}
+			/* Only the dummy bucket lacks IMAP/ICLR. */
+			if (bp->pil != 0) {
+#ifdef CONFIG_SMP
+				if (should_forward) {
+					redirect_intr(cpu, bp);
+					should_forward = 0;
+				}
+#endif
+				upa_writel(ICLR_IDLE, bp->iclr);
+
+				/* Test and add entropy */
+				if (random & SA_SAMPLE_RANDOM)
+					add_interrupt_randomness(irq);
+			}
+		} else
+			bp->pending = 1;
+
+		bp->flags &= ~IBF_INPROGRESS;
+	}
+	irq_exit();
+}
+
+#ifdef CONFIG_BLK_DEV_FD
+extern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs);
+
+void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
+{
+	struct irqaction *action = *(irq + irq_action);
+	struct ino_bucket *bucket;
+	int cpu = smp_processor_id();
+
+	irq_enter();
+	kstat_this_cpu.irqs[irq]++;
+
+	*(irq_work(cpu, irq)) = 0;
+	bucket = get_ino_in_irqaction(action) + ivector_table;
+
+	bucket->flags |= IBF_INPROGRESS;
+
+	floppy_interrupt(irq, dev_cookie, regs);
+	upa_writel(ICLR_IDLE, bucket->iclr);
+
+	bucket->flags &= ~IBF_INPROGRESS;
+
+	irq_exit();
+}
+#endif
+
+/* The following assumes that the branch lies before the place we
+ * are branching to.  This is the case for a trap vector...
+ * You have been warned.
+ */
+#define SPARC_BRANCH(dest_addr, inst_addr) \
+          (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))
+
+#define SPARC_NOP (0x01000000)
+
+static void install_fast_irq(unsigned int cpu_irq,
+			     irqreturn_t (*handler)(int, void *, struct pt_regs *))
+{
+	extern unsigned long sparc64_ttable_tl0;
+	unsigned long ttent = (unsigned long) &sparc64_ttable_tl0;
+	unsigned int *insns;
+
+	ttent += 0x820;
+	ttent += (cpu_irq - 1) << 5;
+	insns = (unsigned int *) ttent;
+	insns[0] = SPARC_BRANCH(((unsigned long) handler),
+				((unsigned long)&insns[0]));
+	insns[1] = SPARC_NOP;
+	__asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent));
+}
+
+int request_fast_irq(unsigned int irq,
+		     irqreturn_t (*handler)(int, void *, struct pt_regs *),
+		     unsigned long irqflags, const char *name, void *dev_id)
+{
+	struct irqaction *action;
+	struct ino_bucket *bucket = __bucket(irq);
+	unsigned long flags;
+
+	/* No pil0 dummy buckets allowed here. */
+	if (bucket < &ivector_table[0] ||
+	    bucket >= &ivector_table[NUM_IVECS]) {
+		unsigned int *caller;
+
+		__asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
+		printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt "
+		       "from %p, irq %08x.\n", caller, irq);
+		return -EINVAL;
+	}	
+	
+	if (!handler)
+		return -EINVAL;
+
+	if ((bucket->pil == 0) || (bucket->pil == 14)) {
+		printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n");
+		return -EBUSY;
+	}
+
+	spin_lock_irqsave(&irq_action_lock, flags);
+
+	action = *(bucket->pil + irq_action);
+	if (action) {
+		if (action->flags & SA_SHIRQ)
+			panic("Trying to register fast irq when already shared.\n");
+		if (irqflags & SA_SHIRQ)
+			panic("Trying to register fast irq as shared.\n");
+		printk("request_fast_irq: Trying to register yet already owned.\n");
+		spin_unlock_irqrestore(&irq_action_lock, flags);
+		return -EBUSY;
+	}
+
+	/*
+	 * We do not check for SA_SAMPLE_RANDOM in this path. Neither do we
+	 * support smp intr affinity in this path.
+	 */
+	if (irqflags & SA_STATIC_ALLOC) {
+		if (static_irq_count < MAX_STATIC_ALLOC)
+			action = &static_irqaction[static_irq_count++];
+		else
+			printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
+			       "using kmalloc\n", bucket->pil, name);
+	}
+	if (action == NULL)
+		action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
+						     GFP_ATOMIC);
+	if (!action) {
+		spin_unlock_irqrestore(&irq_action_lock, flags);
+		return -ENOMEM;
+	}
+	install_fast_irq(bucket->pil, handler);
+
+	bucket->irq_info = action;
+	bucket->flags |= IBF_ACTIVE;
+
+	action->handler = handler;
+	action->flags = irqflags;
+	action->dev_id = NULL;
+	action->name = name;
+	action->next = NULL;
+	put_ino_in_irqaction(action, irq);
+	put_smpaff_in_irqaction(action, CPU_MASK_NONE);
+
+	*(bucket->pil + irq_action) = action;
+	enable_irq(irq);
+
+	spin_unlock_irqrestore(&irq_action_lock, flags);
+
+#ifdef CONFIG_SMP
+	distribute_irqs();
+#endif
+	return 0;
+}
+
+/* We really don't need these at all on the Sparc.  We only have
+ * stubs here because they are exported to modules.
+ */
+unsigned long probe_irq_on(void)
+{
+	return 0;
+}
+
+EXPORT_SYMBOL(probe_irq_on);
+
+int probe_irq_off(unsigned long mask)
+{
+	return 0;
+}
+
+EXPORT_SYMBOL(probe_irq_off);
+
+#ifdef CONFIG_SMP
+static int retarget_one_irq(struct irqaction *p, int goal_cpu)
+{
+	struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table;
+	unsigned long imap = bucket->imap;
+	unsigned int tid;
+
+	while (!cpu_online(goal_cpu)) {
+		if (++goal_cpu >= NR_CPUS)
+			goal_cpu = 0;
+	}
+
+	if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+		tid = goal_cpu << 26;
+		tid &= IMAP_AID_SAFARI;
+	} else if (this_is_starfire == 0) {
+		tid = goal_cpu << 26;
+		tid &= IMAP_TID_UPA;
+	} else {
+		tid = (starfire_translate(imap, goal_cpu) << 26);
+		tid &= IMAP_TID_UPA;
+	}
+	upa_writel(tid | IMAP_VALID, imap);
+
+	while (!cpu_online(goal_cpu)) {
+		if (++goal_cpu >= NR_CPUS)
+			goal_cpu = 0;
+	}
+
+	return goal_cpu;
+}
+
+/* Called from request_irq. */
+static void distribute_irqs(void)
+{
+	unsigned long flags;
+	int cpu, level;
+
+	spin_lock_irqsave(&irq_action_lock, flags);
+	cpu = 0;
+
+	/*
+	 * Skip the timer at [0], and very rare error/power intrs at [15].
+	 * Also level [12], it causes problems on Ex000 systems.
+	 */
+	for (level = 1; level < NR_IRQS; level++) {
+		struct irqaction *p = irq_action[level];
+		if (level == 12) continue;
+		while(p) {
+			cpu = retarget_one_irq(p, cpu);
+			p = p->next;
+		}
+	}
+	spin_unlock_irqrestore(&irq_action_lock, flags);
+}
+#endif
+
+
+struct sun5_timer *prom_timers;
+static u64 prom_limit0, prom_limit1;
+
+static void map_prom_timers(void)
+{
+	unsigned int addr[3];
+	int tnode, err;
+
+	/* PROM timer node hangs out in the top level of device siblings... */
+	tnode = prom_finddevice("/counter-timer");
+
+	/* Assume if node is not present, PROM uses different tick mechanism
+	 * which we should not care about.
+	 */
+	if (tnode == 0 || tnode == -1) {
+		prom_timers = (struct sun5_timer *) 0;
+		return;
+	}
+
+	/* If PROM is really using this, it must be mapped by him. */
+	err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
+	if (err == -1) {
+		prom_printf("PROM does not have timer mapped, trying to continue.\n");
+		prom_timers = (struct sun5_timer *) 0;
+		return;
+	}
+	prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
+}
+
+static void kill_prom_timer(void)
+{
+	if (!prom_timers)
+		return;
+
+	/* Save them away for later. */
+	prom_limit0 = prom_timers->limit0;
+	prom_limit1 = prom_timers->limit1;
+
+	/* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
+	 * We turn both off here just to be paranoid.
+	 */
+	prom_timers->limit0 = 0;
+	prom_timers->limit1 = 0;
+
+	/* Wheee, eat the interrupt packet too... */
+	__asm__ __volatile__(
+"	mov	0x40, %%g2\n"
+"	ldxa	[%%g0] %0, %%g1\n"
+"	ldxa	[%%g2] %1, %%g1\n"
+"	stxa	%%g0, [%%g0] %0\n"
+"	membar	#Sync\n"
+	: /* no outputs */
+	: "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
+	: "g1", "g2");
+}
+
+void enable_prom_timer(void)
+{
+	if (!prom_timers)
+		return;
+
+	/* Set it to whatever was there before. */
+	prom_timers->limit1 = prom_limit1;
+	prom_timers->count1 = 0;
+	prom_timers->limit0 = prom_limit0;
+	prom_timers->count0 = 0;
+}
+
+void init_irqwork_curcpu(void)
+{
+	register struct irq_work_struct *workp asm("o2");
+	register unsigned long tmp asm("o3");
+	int cpu = hard_smp_processor_id();
+
+	memset(__irq_work + cpu, 0, sizeof(*workp));
+
+	/* Make sure we are called with PSTATE_IE disabled.  */
+	__asm__ __volatile__("rdpr	%%pstate, %0\n\t"
+			     : "=r" (tmp));
+	if (tmp & PSTATE_IE) {
+		prom_printf("BUG: init_irqwork_curcpu() called with "
+			    "PSTATE_IE enabled, bailing.\n");
+		__asm__ __volatile__("mov	%%i7, %0\n\t"
+				     : "=r" (tmp));
+		prom_printf("BUG: Called from %lx\n", tmp);
+		prom_halt();
+	}
+
+	/* Set interrupt globals.  */
+	workp = &__irq_work[cpu];
+	__asm__ __volatile__(
+	"rdpr	%%pstate, %0\n\t"
+	"wrpr	%0, %1, %%pstate\n\t"
+	"mov	%2, %%g6\n\t"
+	"wrpr	%0, 0x0, %%pstate\n\t"
+	: "=&r" (tmp)
+	: "i" (PSTATE_IG), "r" (workp));
+}
+
+/* Only invoked on boot processor. */
+void __init init_IRQ(void)
+{
+	map_prom_timers();
+	kill_prom_timer();
+	memset(&ivector_table[0], 0, sizeof(ivector_table));
+
+	/* We need to clear any IRQ's pending in the soft interrupt
+	 * registers, a spurious one could be left around from the
+	 * PROM timer which we just disabled.
+	 */
+	clear_softint(get_softint());
+
+	/* Now that ivector table is initialized, it is safe
+	 * to receive IRQ vector traps.  We will normally take
+	 * one or two right now, in case some device PROM used
+	 * to boot us wants to speak to us.  We just ignore them.
+	 */
+	__asm__ __volatile__("rdpr	%%pstate, %%g1\n\t"
+			     "or	%%g1, %0, %%g1\n\t"
+			     "wrpr	%%g1, 0x0, %%pstate"
+			     : /* No outputs */
+			     : "i" (PSTATE_IE)
+			     : "g1");
+}
+
+static struct proc_dir_entry * root_irq_dir;
+static struct proc_dir_entry * irq_dir [NUM_IVECS];
+
+#ifdef CONFIG_SMP
+
+static int irq_affinity_read_proc (char *page, char **start, off_t off,
+			int count, int *eof, void *data)
+{
+	struct ino_bucket *bp = ivector_table + (long)data;
+	struct irqaction *ap = bp->irq_info;
+	cpumask_t mask;
+	int len;
+
+	mask = get_smpaff_in_irqaction(ap);
+	if (cpus_empty(mask))
+		mask = cpu_online_map;
+
+	len = cpumask_scnprintf(page, count, mask);
+	if (count - len < 2)
+		return -EINVAL;
+	len += sprintf(page + len, "\n");
+	return len;
+}
+
+static inline void set_intr_affinity(int irq, cpumask_t hw_aff)
+{
+	struct ino_bucket *bp = ivector_table + irq;
+
+	/* Users specify affinity in terms of hw cpu ids.
+	 * As soon as we do this, handler_irq() might see and take action.
+	 */
+	put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, hw_aff);
+
+	/* Migration is simply done by the next cpu to service this
+	 * interrupt.
+	 */
+}
+
+static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
+					unsigned long count, void *data)
+{
+	int irq = (long) data, full_count = count, err;
+	cpumask_t new_value;
+
+	err = cpumask_parse(buffer, count, new_value);
+
+	/*
+	 * Do not allow disabling IRQs completely - it's a too easy
+	 * way to make the system unusable accidentally :-) At least
+	 * one online CPU still has to be targeted.
+	 */
+	cpus_and(new_value, new_value, cpu_online_map);
+	if (cpus_empty(new_value))
+		return -EINVAL;
+
+	set_intr_affinity(irq, new_value);
+
+	return full_count;
+}
+
+#endif
+
+#define MAX_NAMELEN 10
+
+static void register_irq_proc (unsigned int irq)
+{
+	char name [MAX_NAMELEN];
+
+	if (!root_irq_dir || irq_dir[irq])
+		return;
+
+	memset(name, 0, MAX_NAMELEN);
+	sprintf(name, "%x", irq);
+
+	/* create /proc/irq/1234 */
+	irq_dir[irq] = proc_mkdir(name, root_irq_dir);
+
+#ifdef CONFIG_SMP
+	/* XXX SMP affinity not supported on starfire yet. */
+	if (this_is_starfire == 0) {
+		struct proc_dir_entry *entry;
+
+		/* create /proc/irq/1234/smp_affinity */
+		entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
+
+		if (entry) {
+			entry->nlink = 1;
+			entry->data = (void *)(long)irq;
+			entry->read_proc = irq_affinity_read_proc;
+			entry->write_proc = irq_affinity_write_proc;
+		}
+	}
+#endif
+}
+
+void init_irq_proc (void)
+{
+	/* create /proc/irq */
+	root_irq_dir = proc_mkdir("irq", NULL);
+}
+
diff --git a/arch/sparc64/kernel/isa.c b/arch/sparc64/kernel/isa.c
new file mode 100644
index 0000000..30862ab
--- /dev/null
+++ b/arch/sparc64/kernel/isa.c
@@ -0,0 +1,329 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <asm/oplib.h>
+#include <asm/isa.h>
+
+struct sparc_isa_bridge *isa_chain;
+
+static void __init fatal_err(const char *reason)
+{
+	prom_printf("ISA: fatal error, %s.\n", reason);
+}
+
+static void __init report_dev(struct sparc_isa_device *isa_dev, int child)
+{
+	if (child)
+		printk(" (%s)", isa_dev->prom_name);
+	else
+		printk(" [%s", isa_dev->prom_name);
+}
+
+static void __init isa_dev_get_resource(struct sparc_isa_device *isa_dev,
+					struct linux_prom_registers *pregs,
+					int pregs_size)
+{
+	unsigned long base, len;
+	int prop_len;
+
+	prop_len = prom_getproperty(isa_dev->prom_node, "reg",
+				    (char *) pregs, pregs_size);
+
+	if (prop_len <= 0)
+		return;
+
+	/* Only the first one is interesting. */
+	len = pregs[0].reg_size;
+	base = (((unsigned long)pregs[0].which_io << 32) |
+		(unsigned long)pregs[0].phys_addr);
+	base += isa_dev->bus->parent->io_space.start;
+
+	isa_dev->resource.start = base;
+	isa_dev->resource.end   = (base + len - 1UL);
+	isa_dev->resource.flags = IORESOURCE_IO;
+	isa_dev->resource.name  = isa_dev->prom_name;
+
+	request_resource(&isa_dev->bus->parent->io_space,
+			 &isa_dev->resource);
+}
+
+/* I can't believe they didn't put a real INO in the isa device
+ * interrupts property.  The whole point of the OBP properties
+ * is to shield the kernel from IRQ routing details.
+ *
+ * The P1275 standard for ISA devices seems to also have been
+ * totally ignored.
+ *
+ * On later systems, an interrupt-map and interrupt-map-mask scheme
+ * akin to EBUS is used.
+ */
+static struct {
+	int	obp_irq;
+	int	pci_ino;
+} grover_irq_table[] = {
+	{ 1, 0x00 },	/* dma, unknown ino at this point */
+	{ 2, 0x27 },	/* floppy */
+	{ 3, 0x22 },	/* parallel */
+	{ 4, 0x2b },	/* serial */
+	{ 5, 0x25 },	/* acpi power management */
+
+	{ 0, 0x00 }	/* end of table */
+};
+
+static int __init isa_dev_get_irq_using_imap(struct sparc_isa_device *isa_dev,
+					     struct sparc_isa_bridge *isa_br,
+					     int *interrupt,
+					     struct linux_prom_registers *pregs)
+{
+	unsigned int hi, lo, irq;
+	int i;
+
+	hi = pregs->which_io & isa_br->isa_intmask.phys_hi;
+	lo = pregs->phys_addr & isa_br->isa_intmask.phys_lo;
+	irq = *interrupt & isa_br->isa_intmask.interrupt;
+	for (i = 0; i < isa_br->num_isa_intmap; i++) {
+		if ((isa_br->isa_intmap[i].phys_hi == hi) &&
+		    (isa_br->isa_intmap[i].phys_lo == lo) &&
+		    (isa_br->isa_intmap[i].interrupt == irq)) {
+			*interrupt = isa_br->isa_intmap[i].cinterrupt;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static void __init isa_dev_get_irq(struct sparc_isa_device *isa_dev,
+				   struct linux_prom_registers *pregs)
+{
+	int irq_prop;
+
+	irq_prop = prom_getintdefault(isa_dev->prom_node,
+				      "interrupts", -1);
+	if (irq_prop <= 0) {
+		goto no_irq;
+	} else {
+		struct pci_controller_info *pcic;
+		struct pci_pbm_info *pbm;
+		int i;
+
+		if (isa_dev->bus->num_isa_intmap) {
+			if (!isa_dev_get_irq_using_imap(isa_dev,
+							isa_dev->bus,
+							&irq_prop,
+							pregs))
+				goto route_irq;
+		}
+
+		for (i = 0; grover_irq_table[i].obp_irq != 0; i++) {
+			if (grover_irq_table[i].obp_irq == irq_prop) {
+				int ino = grover_irq_table[i].pci_ino;
+
+				if (ino == 0)
+					goto no_irq;
+ 
+				irq_prop = ino;
+				goto route_irq;
+			}
+		}
+		goto no_irq;
+
+route_irq:
+		pbm = isa_dev->bus->parent;
+		pcic = pbm->parent;
+		isa_dev->irq = pcic->irq_build(pbm, NULL, irq_prop);
+		return;
+	}
+
+no_irq:
+	isa_dev->irq = PCI_IRQ_NONE;
+}
+
+static void __init isa_fill_children(struct sparc_isa_device *parent_isa_dev)
+{
+	int node = prom_getchild(parent_isa_dev->prom_node);
+
+	if (node == 0)
+		return;
+
+	printk(" ->");
+	while (node != 0) {
+		struct linux_prom_registers regs[PROMREG_MAX];
+		struct sparc_isa_device *isa_dev;
+		int prop_len;
+
+		isa_dev = kmalloc(sizeof(*isa_dev), GFP_KERNEL);
+		if (!isa_dev) {
+			fatal_err("cannot allocate child isa_dev");
+			prom_halt();
+		}
+
+		memset(isa_dev, 0, sizeof(*isa_dev));
+
+		/* Link it in to parent. */
+		isa_dev->next = parent_isa_dev->child;
+		parent_isa_dev->child = isa_dev;
+
+		isa_dev->bus = parent_isa_dev->bus;
+		isa_dev->prom_node = node;
+		prop_len = prom_getproperty(node, "name",
+					    (char *) isa_dev->prom_name,
+					    sizeof(isa_dev->prom_name));
+		if (prop_len <= 0) {
+			fatal_err("cannot get child isa_dev OBP node name");
+			prom_halt();
+		}
+
+		prop_len = prom_getproperty(node, "compatible",
+					    (char *) isa_dev->compatible,
+					    sizeof(isa_dev->compatible));
+
+		/* Not having this is OK. */
+		if (prop_len <= 0)
+			isa_dev->compatible[0] = '\0';
+
+		isa_dev_get_resource(isa_dev, regs, sizeof(regs));
+		isa_dev_get_irq(isa_dev, regs);
+
+		report_dev(isa_dev, 1);
+
+		node = prom_getsibling(node);
+	}
+}
+
+static void __init isa_fill_devices(struct sparc_isa_bridge *isa_br)
+{
+	int node = prom_getchild(isa_br->prom_node);
+
+	while (node != 0) {
+		struct linux_prom_registers regs[PROMREG_MAX];
+		struct sparc_isa_device *isa_dev;
+		int prop_len;
+
+		isa_dev = kmalloc(sizeof(*isa_dev), GFP_KERNEL);
+		if (!isa_dev) {
+			fatal_err("cannot allocate isa_dev");
+			prom_halt();
+		}
+
+		memset(isa_dev, 0, sizeof(*isa_dev));
+
+		/* Link it in. */
+		isa_dev->next = NULL;
+		if (isa_br->devices == NULL) {
+			isa_br->devices = isa_dev;
+		} else {
+			struct sparc_isa_device *tmp = isa_br->devices;
+
+			while (tmp->next)
+				tmp = tmp->next;
+
+			tmp->next = isa_dev;
+		}
+
+		isa_dev->bus = isa_br;
+		isa_dev->prom_node = node;
+		prop_len = prom_getproperty(node, "name",
+					    (char *) isa_dev->prom_name,
+					    sizeof(isa_dev->prom_name));
+		if (prop_len <= 0) {
+			fatal_err("cannot get isa_dev OBP node name");
+			prom_halt();
+		}
+
+		prop_len = prom_getproperty(node, "compatible",
+					    (char *) isa_dev->compatible,
+					    sizeof(isa_dev->compatible));
+
+		/* Not having this is OK. */
+		if (prop_len <= 0)
+			isa_dev->compatible[0] = '\0';
+
+		isa_dev_get_resource(isa_dev, regs, sizeof(regs));
+		isa_dev_get_irq(isa_dev, regs);
+
+		report_dev(isa_dev, 0);
+
+		isa_fill_children(isa_dev);
+
+		printk("]");
+
+		node = prom_getsibling(node);
+	}
+}
+
+void __init isa_init(void)
+{
+	struct pci_dev *pdev;
+	unsigned short vendor, device;
+	int index = 0;
+
+	vendor = PCI_VENDOR_ID_AL;
+	device = PCI_DEVICE_ID_AL_M1533;
+
+	pdev = NULL;
+	while ((pdev = pci_get_device(vendor, device, pdev)) != NULL) {
+		struct pcidev_cookie *pdev_cookie;
+		struct pci_pbm_info *pbm;
+		struct sparc_isa_bridge *isa_br;
+		int prop_len;
+
+		pdev_cookie = pdev->sysdata;
+		if (!pdev_cookie) {
+			printk("ISA: Warning, ISA bridge ignored due to "
+			       "lack of OBP data.\n");
+			continue;
+		}
+		pbm = pdev_cookie->pbm;
+
+		isa_br = kmalloc(sizeof(*isa_br), GFP_KERNEL);
+		if (!isa_br) {
+			fatal_err("cannot allocate sparc_isa_bridge");
+			prom_halt();
+		}
+
+		memset(isa_br, 0, sizeof(*isa_br));
+
+		/* Link it in. */
+		isa_br->next = isa_chain;
+		isa_chain = isa_br;
+
+		isa_br->parent = pbm;
+		isa_br->self = pdev;
+		isa_br->index = index++;
+		isa_br->prom_node = pdev_cookie->prom_node;
+		strncpy(isa_br->prom_name, pdev_cookie->prom_name,
+			sizeof(isa_br->prom_name));
+
+		prop_len = prom_getproperty(isa_br->prom_node,
+					    "ranges",
+					    (char *) isa_br->isa_ranges,
+					    sizeof(isa_br->isa_ranges));
+		if (prop_len <= 0)
+			isa_br->num_isa_ranges = 0;
+		else
+			isa_br->num_isa_ranges =
+				(prop_len / sizeof(struct linux_prom_isa_ranges));
+
+		prop_len = prom_getproperty(isa_br->prom_node,
+					    "interrupt-map",
+					    (char *) isa_br->isa_intmap,
+					    sizeof(isa_br->isa_intmap));
+		if (prop_len <= 0)
+			isa_br->num_isa_intmap = 0;
+		else
+			isa_br->num_isa_intmap =
+				(prop_len / sizeof(struct linux_prom_isa_intmap));
+
+		prop_len = prom_getproperty(isa_br->prom_node,
+					    "interrupt-map-mask",
+					    (char *) &(isa_br->isa_intmask),
+					    sizeof(isa_br->isa_intmask));
+
+		printk("isa%d:", isa_br->index);
+
+		isa_fill_devices(isa_br);
+
+		printk("\n");
+	}
+}
diff --git a/arch/sparc64/kernel/itlb_base.S b/arch/sparc64/kernel/itlb_base.S
new file mode 100644
index 0000000..b5e32df
--- /dev/null
+++ b/arch/sparc64/kernel/itlb_base.S
@@ -0,0 +1,83 @@
+/* $Id: itlb_base.S,v 1.12 2002/02/09 19:49:30 davem Exp $
+ * itlb_base.S:	Front end to ITLB miss replacement strategy.
+ *              This is included directly into the trap table.
+ *
+ * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1997,1998 Jakub Jelinek   (jj@ultra.linux.cz)
+ */
+
+#if PAGE_SHIFT == 13
+/*
+ * To compute vpte offset, we need to do ((addr >> 13) << 3),
+ * which can be optimized to (addr >> 10) if bits 10/11/12 can
+ * be guaranteed to be 0 ... mmu_context.h does guarantee this
+ * by only using 10 bits in the hwcontext value.
+ */
+#define CREATE_VPTE_OFFSET1(r1, r2) \
+				srax	r1, 10, r2
+#define CREATE_VPTE_OFFSET2(r1, r2)
+#define CREATE_VPTE_NOP		nop
+#else /* PAGE_SHIFT */
+#define CREATE_VPTE_OFFSET1(r1, r2) \
+				srax	r1, PAGE_SHIFT, r2
+#define CREATE_VPTE_OFFSET2(r1, r2) \
+				sllx	r2, 3, r2
+#define CREATE_VPTE_NOP
+#endif /* PAGE_SHIFT */
+
+
+/* Ways we can get here:
+ *
+ * 1) Nucleus instruction misses from module code.
+ * 2) All user instruction misses.
+ *
+ * All real page faults merge their code paths to the
+ * sparc64_realfault_common label below.
+ */
+
+/* ITLB ** ICACHE line 1: Quick user TLB misses		*/
+	ldxa		[%g1 + %g1] ASI_IMMU, %g4	! Get TAG_ACCESS
+	CREATE_VPTE_OFFSET1(%g4, %g6)			! Create VPTE offset
+	CREATE_VPTE_OFFSET2(%g4, %g6)			! Create VPTE offset
+	ldxa		[%g3 + %g6] ASI_P, %g5		! Load VPTE
+1:	brgez,pn	%g5, 3f				! Not valid, branch out
+	 sethi		%hi(_PAGE_EXEC), %g4		! Delay-slot
+	andcc		%g5, %g4, %g0			! Executable?
+	be,pn		%xcc, 3f			! Nope, branch.
+	 nop						! Delay-slot
+2:	stxa		%g5, [%g0] ASI_ITLB_DATA_IN	! Load PTE into TLB
+	retry						! Trap return
+3:	rdpr		%pstate, %g4			! Move into alternate globals
+
+/* ITLB ** ICACHE line 2: Real faults			*/
+	wrpr		%g4, PSTATE_AG|PSTATE_MG, %pstate
+	rdpr		%tpc, %g5			! And load faulting VA
+	mov		FAULT_CODE_ITLB, %g4		! It was read from ITLB
+sparc64_realfault_common:				! Called by TL0 dtlb_miss too
+	stb		%g4, [%g6 + TI_FAULT_CODE]
+	stx		%g5, [%g6 + TI_FAULT_ADDR]
+	ba,pt		%xcc, etrap			! Save state
+1:	 rd		%pc, %g7			! ...
+	nop
+
+/* ITLB ** ICACHE line 3: Finish faults + window fixups	*/
+	call		do_sparc64_fault		! Call fault handler
+	 add		%sp, PTREGS_OFF, %o0! Compute pt_regs arg
+	ba,pt		%xcc, rtrap_clr_l6		! Restore cpu state
+	 nop
+winfix_trampoline:
+	rdpr		%tpc, %g3			! Prepare winfixup TNPC
+	or		%g3, 0x7c, %g3			! Compute offset to branch
+	wrpr		%g3, %tnpc			! Write it into TNPC
+	done						! Do it to it
+
+/* ITLB ** ICACHE line 4: Unused...	*/
+	nop
+	nop
+	nop
+	nop
+	CREATE_VPTE_NOP
+
+#undef CREATE_VPTE_OFFSET1
+#undef CREATE_VPTE_OFFSET2
+#undef CREATE_VPTE_NOP
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
new file mode 100644
index 0000000..7066d7b
--- /dev/null
+++ b/arch/sparc64/kernel/kprobes.c
@@ -0,0 +1,394 @@
+/* arch/sparc64/kernel/kprobes.c
+ *
+ * Copyright (C) 2004 David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+
+#include <asm/kdebug.h>
+#include <asm/signal.h>
+
+/* We do not have hardware single-stepping on sparc64.
+ * So we implement software single-stepping with breakpoint
+ * traps.  The top-level scheme is similar to that used
+ * in the x86 kprobes implementation.
+ *
+ * In the kprobe->ainsn.insn[] array we store the original
+ * instruction at index zero and a break instruction at
+ * index one.
+ *
+ * When we hit a kprobe we:
+ * - Run the pre-handler
+ * - Remember "regs->tnpc" and interrupt level stored in
+ *   "regs->tstate" so we can restore them later
+ * - Disable PIL interrupts
+ * - Set regs->tpc to point to kprobe->ainsn.insn[0]
+ * - Set regs->tnpc to point to kprobe->ainsn.insn[1]
+ * - Mark that we are actively in a kprobe
+ *
+ * At this point we wait for the second breakpoint at
+ * kprobe->ainsn.insn[1] to hit.  When it does we:
+ * - Run the post-handler
+ * - Set regs->tpc to "remembered" regs->tnpc stored above,
+ *   restore the PIL interrupt level in "regs->tstate" as well
+ * - Make any adjustments necessary to regs->tnpc in order
+ *   to handle relative branches correctly.  See below.
+ * - Mark that we are no longer actively in a kprobe.
+ */
+
+int arch_prepare_kprobe(struct kprobe *p)
+{
+	return 0;
+}
+
+void arch_copy_kprobe(struct kprobe *p)
+{
+	p->ainsn.insn[0] = *p->addr;
+	p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
+}
+
+void arch_remove_kprobe(struct kprobe *p)
+{
+}
+
+/* kprobe_status settings */
+#define KPROBE_HIT_ACTIVE	0x00000001
+#define KPROBE_HIT_SS		0x00000002
+
+static struct kprobe *current_kprobe;
+static unsigned long current_kprobe_orig_tnpc;
+static unsigned long current_kprobe_orig_tstate_pil;
+static unsigned int kprobe_status;
+
+static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+	current_kprobe_orig_tnpc = regs->tnpc;
+	current_kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
+	regs->tstate |= TSTATE_PIL;
+
+	/*single step inline, if it a breakpoint instruction*/
+	if (p->opcode == BREAKPOINT_INSTRUCTION) {
+		regs->tpc = (unsigned long) p->addr;
+		regs->tnpc = current_kprobe_orig_tnpc;
+	} else {
+		regs->tpc = (unsigned long) &p->ainsn.insn[0];
+		regs->tnpc = (unsigned long) &p->ainsn.insn[1];
+	}
+}
+
+static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
+{
+	*p->addr = p->opcode;
+	flushi(p->addr);
+
+	regs->tpc = (unsigned long) p->addr;
+	regs->tnpc = current_kprobe_orig_tnpc;
+	regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
+			current_kprobe_orig_tstate_pil);
+}
+
+static int kprobe_handler(struct pt_regs *regs)
+{
+	struct kprobe *p;
+	void *addr = (void *) regs->tpc;
+	int ret = 0;
+
+	preempt_disable();
+
+	if (kprobe_running()) {
+		/* We *are* holding lock here, so this is safe.
+		 * Disarm the probe we just hit, and ignore it.
+		 */
+		p = get_kprobe(addr);
+		if (p) {
+			if (kprobe_status == KPROBE_HIT_SS) {
+				regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
+					current_kprobe_orig_tstate_pil);
+				unlock_kprobes();
+				goto no_kprobe;
+			}
+			disarm_kprobe(p, regs);
+			ret = 1;
+		} else {
+			p = current_kprobe;
+			if (p->break_handler && p->break_handler(p, regs))
+				goto ss_probe;
+		}
+		/* If it's not ours, can't be delete race, (we hold lock). */
+		goto no_kprobe;
+	}
+
+	lock_kprobes();
+	p = get_kprobe(addr);
+	if (!p) {
+		unlock_kprobes();
+		if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
+			/*
+			 * The breakpoint instruction was removed right
+			 * after we hit it.  Another cpu has removed
+			 * either a probepoint or a debugger breakpoint
+			 * at this address.  In either case, no further
+			 * handling of this interrupt is appropriate.
+			 */
+			ret = 1;
+		}
+		/* Not one of ours: let kernel handle it */
+		goto no_kprobe;
+	}
+
+	kprobe_status = KPROBE_HIT_ACTIVE;
+	current_kprobe = p;
+	if (p->pre_handler && p->pre_handler(p, regs))
+		return 1;
+
+ss_probe:
+	prepare_singlestep(p, regs);
+	kprobe_status = KPROBE_HIT_SS;
+	return 1;
+
+no_kprobe:
+	preempt_enable_no_resched();
+	return ret;
+}
+
+/* If INSN is a relative control transfer instruction,
+ * return the corrected branch destination value.
+ *
+ * The original INSN location was REAL_PC, it actually
+ * executed at PC and produced destination address NPC.
+ */
+static unsigned long relbranch_fixup(u32 insn, unsigned long real_pc,
+				     unsigned long pc, unsigned long npc)
+{
+	/* Branch not taken, no mods necessary.  */
+	if (npc == pc + 0x4UL)
+		return real_pc + 0x4UL;
+
+	/* The three cases are call, branch w/prediction,
+	 * and traditional branch.
+	 */
+	if ((insn & 0xc0000000) == 0x40000000 ||
+	    (insn & 0xc1c00000) == 0x00400000 ||
+	    (insn & 0xc1c00000) == 0x00800000) {
+		/* The instruction did all the work for us
+		 * already, just apply the offset to the correct
+		 * instruction location.
+		 */
+		return (real_pc + (npc - pc));
+	}
+
+	return real_pc + 0x4UL;
+}
+
+/* If INSN is an instruction which writes it's PC location
+ * into a destination register, fix that up.
+ */
+static void retpc_fixup(struct pt_regs *regs, u32 insn, unsigned long real_pc)
+{
+	unsigned long *slot = NULL;
+
+	/* Simplest cast is call, which always uses %o7 */
+	if ((insn & 0xc0000000) == 0x40000000) {
+		slot = &regs->u_regs[UREG_I7];
+	}
+
+	/* Jmpl encodes the register inside of the opcode */
+	if ((insn & 0xc1f80000) == 0x81c00000) {
+		unsigned long rd = ((insn >> 25) & 0x1f);
+
+		if (rd <= 15) {
+			slot = &regs->u_regs[rd];
+		} else {
+			/* Hard case, it goes onto the stack. */
+			flushw_all();
+
+			rd -= 16;
+			slot = (unsigned long *)
+				(regs->u_regs[UREG_FP] + STACK_BIAS);
+			slot += rd;
+		}
+	}
+	if (slot != NULL)
+		*slot = real_pc;
+}
+
+/*
+ * Called after single-stepping.  p->addr is the address of the
+ * instruction whose first byte has been replaced by the breakpoint
+ * instruction.  To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction.  The address of this
+ * copy is p->ainsn.insn.
+ *
+ * This function prepares to return from the post-single-step
+ * breakpoint trap.
+ */
+static void resume_execution(struct kprobe *p, struct pt_regs *regs)
+{
+	u32 insn = p->ainsn.insn[0];
+
+	regs->tpc = current_kprobe_orig_tnpc;
+	regs->tnpc = relbranch_fixup(insn,
+				     (unsigned long) p->addr,
+				     (unsigned long) &p->ainsn.insn[0],
+				     regs->tnpc);
+	retpc_fixup(regs, insn, (unsigned long) p->addr);
+
+	regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
+			current_kprobe_orig_tstate_pil);
+}
+
+static inline int post_kprobe_handler(struct pt_regs *regs)
+{
+	if (!kprobe_running())
+		return 0;
+
+	if (current_kprobe->post_handler)
+		current_kprobe->post_handler(current_kprobe, regs, 0);
+
+	resume_execution(current_kprobe, regs);
+
+	unlock_kprobes();
+	preempt_enable_no_resched();
+
+	return 1;
+}
+
+/* Interrupts disabled, kprobe_lock held. */
+static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+	if (current_kprobe->fault_handler
+	    && current_kprobe->fault_handler(current_kprobe, regs, trapnr))
+		return 1;
+
+	if (kprobe_status & KPROBE_HIT_SS) {
+		resume_execution(current_kprobe, regs);
+
+		unlock_kprobes();
+		preempt_enable_no_resched();
+	}
+	return 0;
+}
+
+/*
+ * Wrapper routine to for handling exceptions.
+ */
+int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
+			     void *data)
+{
+	struct die_args *args = (struct die_args *)data;
+	switch (val) {
+	case DIE_DEBUG:
+		if (kprobe_handler(args->regs))
+			return NOTIFY_STOP;
+		break;
+	case DIE_DEBUG_2:
+		if (post_kprobe_handler(args->regs))
+			return NOTIFY_STOP;
+		break;
+	case DIE_GPF:
+		if (kprobe_running() &&
+		    kprobe_fault_handler(args->regs, args->trapnr))
+			return NOTIFY_STOP;
+		break;
+	case DIE_PAGE_FAULT:
+		if (kprobe_running() &&
+		    kprobe_fault_handler(args->regs, args->trapnr))
+			return NOTIFY_STOP;
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+asmlinkage void kprobe_trap(unsigned long trap_level, struct pt_regs *regs)
+{
+	BUG_ON(trap_level != 0x170 && trap_level != 0x171);
+
+	if (user_mode(regs)) {
+		local_irq_enable();
+		bad_trap(regs, trap_level);
+		return;
+	}
+
+	/* trap_level == 0x170 --> ta 0x70
+	 * trap_level == 0x171 --> ta 0x71
+	 */
+	if (notify_die((trap_level == 0x170) ? DIE_DEBUG : DIE_DEBUG_2,
+		       (trap_level == 0x170) ? "debug" : "debug_2",
+		       regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP)
+		bad_trap(regs, trap_level);
+}
+
+/* Jprobes support.  */
+static struct pt_regs jprobe_saved_regs;
+static struct pt_regs *jprobe_saved_regs_location;
+static struct sparc_stackf jprobe_saved_stack;
+
+int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct jprobe *jp = container_of(p, struct jprobe, kp);
+
+	jprobe_saved_regs_location = regs;
+	memcpy(&jprobe_saved_regs, regs, sizeof(*regs));
+
+	/* Save a whole stack frame, this gets arguments
+	 * pushed onto the stack after using up all the
+	 * arg registers.
+	 */
+	memcpy(&jprobe_saved_stack,
+	       (char *) (regs->u_regs[UREG_FP] + STACK_BIAS),
+	       sizeof(jprobe_saved_stack));
+
+	regs->tpc  = (unsigned long) jp->entry;
+	regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
+	regs->tstate |= TSTATE_PIL;
+
+	return 1;
+}
+
+void jprobe_return(void)
+{
+	preempt_enable_no_resched();
+	__asm__ __volatile__(
+		".globl	jprobe_return_trap_instruction\n"
+"jprobe_return_trap_instruction:\n\t"
+		"ta 0x70");
+}
+
+extern void jprobe_return_trap_instruction(void);
+
+extern void __show_regs(struct pt_regs * regs);
+
+int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	u32 *addr = (u32 *) regs->tpc;
+
+	if (addr == (u32 *) jprobe_return_trap_instruction) {
+		if (jprobe_saved_regs_location != regs) {
+			printk("JPROBE: Current regs (%p) does not match "
+			       "saved regs (%p).\n",
+			       regs, jprobe_saved_regs_location);
+			printk("JPROBE: Saved registers\n");
+			__show_regs(jprobe_saved_regs_location);
+			printk("JPROBE: Current registers\n");
+			__show_regs(regs);
+			BUG();
+		}
+		/* Restore old register state.  Do pt_regs
+		 * first so that UREG_FP is the original one for
+		 * the stack frame restore.
+		 */
+		memcpy(regs, &jprobe_saved_regs, sizeof(*regs));
+
+		memcpy((char *) (regs->u_regs[UREG_FP] + STACK_BIAS),
+		       &jprobe_saved_stack,
+		       sizeof(jprobe_saved_stack));
+
+		return 1;
+	}
+	return 0;
+}
diff --git a/arch/sparc64/kernel/module.c b/arch/sparc64/kernel/module.c
new file mode 100644
index 0000000..6c83e37
--- /dev/null
+++ b/arch/sparc64/kernel/module.c
@@ -0,0 +1,209 @@
+/* Kernel module help for sparc64.
+ *
+ * Copyright (C) 2001 Rusty Russell.
+ * Copyright (C) 2002 David S. Miller.
+ */
+
+#include <linux/moduleloader.h>
+#include <linux/kernel.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+
+#include <asm/processor.h>
+#include <asm/spitfire.h>
+
+static void *module_map(unsigned long size)
+{
+	struct vm_struct *area;
+
+	size = PAGE_ALIGN(size);
+	if (!size || size > MODULES_LEN)
+		return NULL;
+
+	area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
+	if (!area)
+		return NULL;
+
+	return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
+}
+
+void *module_alloc(unsigned long size)
+{
+	void *ret;
+
+	/* We handle the zero case fine, unlike vmalloc */
+	if (size == 0)
+		return NULL;
+
+	ret = module_map(size);
+	if (!ret)
+		ret = ERR_PTR(-ENOMEM);
+	else
+		memset(ret, 0, size);
+
+	return ret;
+}
+
+/* Free memory returned from module_core_alloc/module_init_alloc */
+void module_free(struct module *mod, void *module_region)
+{
+	vfree(module_region);
+	/* FIXME: If module_region == mod->init_region, trim exception
+           table entries. */
+}
+
+/* Make generic code ignore STT_REGISTER dummy undefined symbols.  */
+int module_frob_arch_sections(Elf_Ehdr *hdr,
+			      Elf_Shdr *sechdrs,
+			      char *secstrings,
+			      struct module *mod)
+{
+	unsigned int symidx;
+	Elf64_Sym *sym;
+	const char *strtab;
+	int i;
+
+	for (symidx = 0; sechdrs[symidx].sh_type != SHT_SYMTAB; symidx++) {
+		if (symidx == hdr->e_shnum-1) {
+			printk("%s: no symtab found.\n", mod->name);
+			return -ENOEXEC;
+		}
+	}
+	sym = (Elf64_Sym *)sechdrs[symidx].sh_addr;
+	strtab = (char *)sechdrs[sechdrs[symidx].sh_link].sh_addr;
+
+	for (i = 1; i < sechdrs[symidx].sh_size / sizeof(Elf_Sym); i++) {
+		if (sym[i].st_shndx == SHN_UNDEF &&
+		    ELF64_ST_TYPE(sym[i].st_info) == STT_REGISTER)
+			sym[i].st_shndx = SHN_ABS;
+	}
+	return 0;
+}
+
+int apply_relocate(Elf64_Shdr *sechdrs,
+		   const char *strtab,
+		   unsigned int symindex,
+		   unsigned int relsec,
+		   struct module *me)
+{
+	printk(KERN_ERR "module %s: non-ADD RELOCATION unsupported\n",
+	       me->name);
+	return -ENOEXEC;
+}
+
+int apply_relocate_add(Elf64_Shdr *sechdrs,
+		       const char *strtab,
+		       unsigned int symindex,
+		       unsigned int relsec,
+		       struct module *me)
+{
+	unsigned int i;
+	Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+	Elf64_Sym *sym;
+	u8 *location;
+	u32 *loc32;
+
+	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+		Elf64_Addr v;
+
+		/* This is where to make the change */
+		location = (u8 *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+			+ rel[i].r_offset;
+		loc32 = (u32 *) location;
+
+		BUG_ON(((u64)location >> (u64)32) != (u64)0);
+
+		/* This is the symbol it is referring to.  Note that all
+		   undefined symbols have been resolved.  */
+		sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+			+ ELF64_R_SYM(rel[i].r_info);
+		v = sym->st_value + rel[i].r_addend;
+
+		switch (ELF64_R_TYPE(rel[i].r_info) & 0xff) {
+		case R_SPARC_64:
+			location[0] = v >> 56;
+			location[1] = v >> 48;
+			location[2] = v >> 40;
+			location[3] = v >> 32;
+			location[4] = v >> 24;
+			location[5] = v >> 16;
+			location[6] = v >>  8;
+			location[7] = v >>  0;
+			break;
+
+		case R_SPARC_32:
+			location[0] = v >> 24;
+			location[1] = v >> 16;
+			location[2] = v >>  8;
+			location[3] = v >>  0;
+			break;
+
+		case R_SPARC_WDISP30:
+			v -= (Elf64_Addr) location;
+			*loc32 = (*loc32 & ~0x3fffffff) |
+				((v >> 2) & 0x3fffffff);
+			break;
+
+		case R_SPARC_WDISP22:
+			v -= (Elf64_Addr) location;
+			*loc32 = (*loc32 & ~0x3fffff) |
+				((v >> 2) & 0x3fffff);
+			break;
+
+		case R_SPARC_WDISP19:
+			v -= (Elf64_Addr) location;
+			*loc32 = (*loc32 & ~0x7ffff) |
+				((v >> 2) & 0x7ffff);
+			break;
+
+		case R_SPARC_LO10:
+			*loc32 = (*loc32 & ~0x3ff) | (v & 0x3ff);
+			break;
+
+		case R_SPARC_HI22:
+			*loc32 = (*loc32 & ~0x3fffff) |
+				((v >> 10) & 0x3fffff);
+			break;
+
+		case R_SPARC_OLO10:
+			*loc32 = (*loc32 & ~0x1fff) |
+				(((v & 0x3ff) +
+				  (ELF64_R_TYPE(rel[i].r_info) >> 8))
+				 & 0x1fff);
+			break;
+
+		default:
+			printk(KERN_ERR "module %s: Unknown relocation: %x\n",
+			       me->name,
+			       (int) (ELF64_R_TYPE(rel[i].r_info) & 0xff));
+			return -ENOEXEC;
+		};
+	}
+	return 0;
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+		    const Elf_Shdr *sechdrs,
+		    struct module *me)
+{
+	/* Cheetah's I-cache is fully coherent.  */
+	if (tlb_type == spitfire) {
+		unsigned long va;
+
+		flushw_all();
+		for (va =  0; va < (PAGE_SIZE << 1); va += 32)
+			spitfire_put_icache_tag(va, 0x0);
+		__asm__ __volatile__("flush %g6");
+	}
+
+	return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+}
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
new file mode 100644
index 0000000..bba140d
--- /dev/null
+++ b/arch/sparc64/kernel/pci.c
@@ -0,0 +1,805 @@
+/* $Id: pci.c,v 1.39 2002/01/05 01:13:43 davem Exp $
+ * pci.c: UltraSparc PCI controller support.
+ *
+ * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1998, 1999 Eddie C. Dost   (ecd@skynet.be)
+ * Copyright (C) 1999 Jakub Jelinek   (jj@ultra.linux.cz)
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+
+#include <asm/uaccess.h>
+#include <asm/pbm.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/ebus.h>
+#include <asm/isa.h>
+
+unsigned long pci_memspace_mask = 0xffffffffUL;
+
+#ifndef CONFIG_PCI
+/* A "nop" PCI implementation. */
+asmlinkage int sys_pciconfig_read(unsigned long bus, unsigned long dfn,
+				  unsigned long off, unsigned long len,
+				  unsigned char *buf)
+{
+	return 0;
+}
+asmlinkage int sys_pciconfig_write(unsigned long bus, unsigned long dfn,
+				   unsigned long off, unsigned long len,
+				   unsigned char *buf)
+{
+	return 0;
+}
+#else
+
+/* List of all PCI controllers found in the system. */
+struct pci_controller_info *pci_controller_root = NULL;
+
+/* Each PCI controller found gets a unique index. */
+int pci_num_controllers = 0;
+
+/* At boot time the user can give the kernel a command
+ * line option which controls if and how PCI devices
+ * are reordered at PCI bus probing time.
+ */
+int pci_device_reorder = 0;
+
+volatile int pci_poke_in_progress;
+volatile int pci_poke_cpu = -1;
+volatile int pci_poke_faulted;
+
+static DEFINE_SPINLOCK(pci_poke_lock);
+
+void pci_config_read8(u8 *addr, u8 *ret)
+{
+	unsigned long flags;
+	u8 byte;
+
+	spin_lock_irqsave(&pci_poke_lock, flags);
+	pci_poke_cpu = smp_processor_id();
+	pci_poke_in_progress = 1;
+	pci_poke_faulted = 0;
+	__asm__ __volatile__("membar #Sync\n\t"
+			     "lduba [%1] %2, %0\n\t"
+			     "membar #Sync"
+			     : "=r" (byte)
+			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
+			     : "memory");
+	pci_poke_in_progress = 0;
+	pci_poke_cpu = -1;
+	if (!pci_poke_faulted)
+		*ret = byte;
+	spin_unlock_irqrestore(&pci_poke_lock, flags);
+}
+
+void pci_config_read16(u16 *addr, u16 *ret)
+{
+	unsigned long flags;
+	u16 word;
+
+	spin_lock_irqsave(&pci_poke_lock, flags);
+	pci_poke_cpu = smp_processor_id();
+	pci_poke_in_progress = 1;
+	pci_poke_faulted = 0;
+	__asm__ __volatile__("membar #Sync\n\t"
+			     "lduha [%1] %2, %0\n\t"
+			     "membar #Sync"
+			     : "=r" (word)
+			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
+			     : "memory");
+	pci_poke_in_progress = 0;
+	pci_poke_cpu = -1;
+	if (!pci_poke_faulted)
+		*ret = word;
+	spin_unlock_irqrestore(&pci_poke_lock, flags);
+}
+
+void pci_config_read32(u32 *addr, u32 *ret)
+{
+	unsigned long flags;
+	u32 dword;
+
+	spin_lock_irqsave(&pci_poke_lock, flags);
+	pci_poke_cpu = smp_processor_id();
+	pci_poke_in_progress = 1;
+	pci_poke_faulted = 0;
+	__asm__ __volatile__("membar #Sync\n\t"
+			     "lduwa [%1] %2, %0\n\t"
+			     "membar #Sync"
+			     : "=r" (dword)
+			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
+			     : "memory");
+	pci_poke_in_progress = 0;
+	pci_poke_cpu = -1;
+	if (!pci_poke_faulted)
+		*ret = dword;
+	spin_unlock_irqrestore(&pci_poke_lock, flags);
+}
+
+void pci_config_write8(u8 *addr, u8 val)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&pci_poke_lock, flags);
+	pci_poke_cpu = smp_processor_id();
+	pci_poke_in_progress = 1;
+	pci_poke_faulted = 0;
+	__asm__ __volatile__("membar #Sync\n\t"
+			     "stba %0, [%1] %2\n\t"
+			     "membar #Sync"
+			     : /* no outputs */
+			     : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
+			     : "memory");
+	pci_poke_in_progress = 0;
+	pci_poke_cpu = -1;
+	spin_unlock_irqrestore(&pci_poke_lock, flags);
+}
+
+void pci_config_write16(u16 *addr, u16 val)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&pci_poke_lock, flags);
+	pci_poke_cpu = smp_processor_id();
+	pci_poke_in_progress = 1;
+	pci_poke_faulted = 0;
+	__asm__ __volatile__("membar #Sync\n\t"
+			     "stha %0, [%1] %2\n\t"
+			     "membar #Sync"
+			     : /* no outputs */
+			     : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
+			     : "memory");
+	pci_poke_in_progress = 0;
+	pci_poke_cpu = -1;
+	spin_unlock_irqrestore(&pci_poke_lock, flags);
+}
+
+void pci_config_write32(u32 *addr, u32 val)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&pci_poke_lock, flags);
+	pci_poke_cpu = smp_processor_id();
+	pci_poke_in_progress = 1;
+	pci_poke_faulted = 0;
+	__asm__ __volatile__("membar #Sync\n\t"
+			     "stwa %0, [%1] %2\n\t"
+			     "membar #Sync"
+			     : /* no outputs */
+			     : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
+			     : "memory");
+	pci_poke_in_progress = 0;
+	pci_poke_cpu = -1;
+	spin_unlock_irqrestore(&pci_poke_lock, flags);
+}
+
+/* Probe for all PCI controllers in the system. */
+extern void sabre_init(int, char *);
+extern void psycho_init(int, char *);
+extern void schizo_init(int, char *);
+extern void schizo_plus_init(int, char *);
+extern void tomatillo_init(int, char *);
+
+static struct {
+	char *model_name;
+	void (*init)(int, char *);
+} pci_controller_table[] __initdata = {
+	{ "SUNW,sabre", sabre_init },
+	{ "pci108e,a000", sabre_init },
+	{ "pci108e,a001", sabre_init },
+	{ "SUNW,psycho", psycho_init },
+	{ "pci108e,8000", psycho_init },
+	{ "SUNW,schizo", schizo_init },
+	{ "pci108e,8001", schizo_init },
+	{ "SUNW,schizo+", schizo_plus_init },
+	{ "pci108e,8002", schizo_plus_init },
+	{ "SUNW,tomatillo", tomatillo_init },
+	{ "pci108e,a801", tomatillo_init },
+};
+#define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \
+				  sizeof(pci_controller_table[0]))
+
+static int __init pci_controller_init(char *model_name, int namelen, int node)
+{
+	int i;
+
+	for (i = 0; i < PCI_NUM_CONTROLLER_TYPES; i++) {
+		if (!strncmp(model_name,
+			     pci_controller_table[i].model_name,
+			     namelen)) {
+			pci_controller_table[i].init(node, model_name);
+			return 1;
+		}
+	}
+	printk("PCI: Warning unknown controller, model name [%s]\n",
+	       model_name);
+	printk("PCI: Ignoring controller...\n");
+
+	return 0;
+}
+
+static int __init pci_is_controller(char *model_name, int namelen, int node)
+{
+	int i;
+
+	for (i = 0; i < PCI_NUM_CONTROLLER_TYPES; i++) {
+		if (!strncmp(model_name,
+			     pci_controller_table[i].model_name,
+			     namelen)) {
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int __init pci_controller_scan(int (*handler)(char *, int, int))
+{
+	char namebuf[64];
+	int node;
+	int count = 0;
+
+	node = prom_getchild(prom_root_node);
+	while ((node = prom_searchsiblings(node, "pci")) != 0) {
+		int len;
+
+		if ((len = prom_getproperty(node, "model", namebuf, sizeof(namebuf))) > 0 ||
+		    (len = prom_getproperty(node, "compatible", namebuf, sizeof(namebuf))) > 0) {
+			int item_len = 0;
+
+			/* Our value may be a multi-valued string in the
+			 * case of some compatible properties. For sanity,
+			 * only try the first one. */
+
+			while (namebuf[item_len] && len) {
+				len--;
+				item_len++;
+			}
+
+			if (handler(namebuf, item_len, node))
+				count++;
+		}
+
+		node = prom_getsibling(node);
+		if (!node)
+			break;
+	}
+
+	return count;
+}
+
+
+/* Is there some PCI controller in the system?  */
+int __init pcic_present(void)
+{
+	return pci_controller_scan(pci_is_controller);
+}
+
+/* Find each controller in the system, attach and initialize
+ * software state structure for each and link into the
+ * pci_controller_root.  Setup the controller enough such
+ * that bus scanning can be done.
+ */
+static void __init pci_controller_probe(void)
+{
+	printk("PCI: Probing for controllers.\n");
+
+	pci_controller_scan(pci_controller_init);
+}
+
+static void __init pci_scan_each_controller_bus(void)
+{
+	struct pci_controller_info *p;
+
+	for (p = pci_controller_root; p; p = p->next)
+		p->scan_bus(p);
+}
+
+/* Reorder the pci_dev chain, so that onboard devices come first
+ * and then come the pluggable cards.
+ */
+static void __init pci_reorder_devs(void)
+{
+	struct list_head *pci_onboard = &pci_devices;
+	struct list_head *walk = pci_onboard->next;
+
+	while (walk != pci_onboard) {
+		struct pci_dev *pdev = pci_dev_g(walk);
+		struct list_head *walk_next = walk->next;
+
+		if (pdev->irq && (__irq_ino(pdev->irq) & 0x20)) {
+			list_del(walk);
+			list_add(walk, pci_onboard);
+		}
+
+		walk = walk_next;
+	}
+}
+
+extern void clock_probe(void);
+extern void power_init(void);
+
+static int __init pcibios_init(void)
+{
+	pci_controller_probe();
+	if (pci_controller_root == NULL)
+		return 0;
+
+	pci_scan_each_controller_bus();
+
+	if (pci_device_reorder)
+		pci_reorder_devs();
+
+	isa_init();
+	ebus_init();
+	clock_probe();
+	power_init();
+
+	return 0;
+}
+
+subsys_initcall(pcibios_init);
+
+void pcibios_fixup_bus(struct pci_bus *pbus)
+{
+	struct pci_pbm_info *pbm = pbus->sysdata;
+
+	/* Generic PCI bus probing sets these to point at
+	 * &io{port,mem}_resouce which is wrong for us.
+	 */
+	pbus->resource[0] = &pbm->io_space;
+	pbus->resource[1] = &pbm->mem_space;
+}
+
+int pci_claim_resource(struct pci_dev *pdev, int resource)
+{
+	struct pci_pbm_info *pbm = pdev->bus->sysdata;
+	struct resource *res = &pdev->resource[resource];
+	struct resource *root;
+
+	if (!pbm)
+		return -EINVAL;
+
+	if (res->flags & IORESOURCE_IO)
+		root = &pbm->io_space;
+	else
+		root = &pbm->mem_space;
+
+	pbm->parent->resource_adjust(pdev, res, root);
+
+	return request_resource(root, res);
+}
+
+/*
+ * Given the PCI bus a device resides on, try to
+ * find an acceptable resource allocation for a
+ * specific device resource..
+ */
+static int pci_assign_bus_resource(const struct pci_bus *bus,
+	struct pci_dev *dev,
+	struct resource *res,
+	unsigned long size,
+	unsigned long min,
+	int resno)
+{
+	unsigned int type_mask;
+	int i;
+
+	type_mask = IORESOURCE_IO | IORESOURCE_MEM;
+	for (i = 0 ; i < 4; i++) {
+		struct resource *r = bus->resource[i];
+		if (!r)
+			continue;
+
+		/* type_mask must match */
+		if ((res->flags ^ r->flags) & type_mask)
+			continue;
+
+		/* Ok, try it out.. */
+		if (allocate_resource(r, res, size, min, -1, size, NULL, NULL) < 0)
+			continue;
+
+		/* PCI config space updated by caller.  */
+		return 0;
+	}
+	return -EBUSY;
+}
+
+int pci_assign_resource(struct pci_dev *pdev, int resource)
+{
+	struct pcidev_cookie *pcp = pdev->sysdata;
+	struct pci_pbm_info *pbm = pcp->pbm;
+	struct resource *res = &pdev->resource[resource];
+	unsigned long min, size;
+	int err;
+
+	if (res->flags & IORESOURCE_IO)
+		min = pbm->io_space.start + 0x400UL;
+	else
+		min = pbm->mem_space.start;
+
+	size = res->end - res->start + 1;
+
+	err = pci_assign_bus_resource(pdev->bus, pdev, res, size, min, resource);
+
+	if (err < 0) {
+		printk("PCI: Failed to allocate resource %d for %s\n",
+		       resource, pci_name(pdev));
+	} else {
+		/* Update PCI config space. */
+		pbm->parent->base_address_update(pdev, resource);
+	}
+
+	return err;
+}
+
+/* Sort resources by alignment */
+void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
+{
+	int i;
+
+	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+		struct resource *r;
+		struct resource_list *list, *tmp;
+		unsigned long r_align;
+
+		r = &dev->resource[i];
+		r_align = r->end - r->start;
+		
+		if (!(r->flags) || r->parent)
+			continue;
+		if (!r_align) {
+			printk(KERN_WARNING "PCI: Ignore bogus resource %d "
+					    "[%lx:%lx] of %s\n",
+					    i, r->start, r->end, pci_name(dev));
+			continue;
+		}
+		r_align = (i < PCI_BRIDGE_RESOURCES) ? r_align + 1 : r->start;
+		for (list = head; ; list = list->next) {
+			unsigned long align = 0;
+			struct resource_list *ln = list->next;
+			int idx;
+
+			if (ln) {
+				idx = ln->res - &ln->dev->resource[0];
+				align = (idx < PCI_BRIDGE_RESOURCES) ?
+					ln->res->end - ln->res->start + 1 :
+					ln->res->start;
+			}
+			if (r_align > align) {
+				tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
+				if (!tmp)
+					panic("pdev_sort_resources(): "
+					      "kmalloc() failed!\n");
+				tmp->next = ln;
+				tmp->res = r;
+				tmp->dev = dev;
+				list->next = tmp;
+				break;
+			}
+		}
+	}
+}
+
+void pcibios_update_irq(struct pci_dev *pdev, int irq)
+{
+}
+
+void pcibios_align_resource(void *data, struct resource *res,
+			    unsigned long size, unsigned long align)
+{
+}
+
+int pcibios_enable_device(struct pci_dev *pdev, int mask)
+{
+	return 0;
+}
+
+void pcibios_resource_to_bus(struct pci_dev *pdev, struct pci_bus_region *region,
+			     struct resource *res)
+{
+	struct pci_pbm_info *pbm = pdev->bus->sysdata;
+	struct resource zero_res, *root;
+
+	zero_res.start = 0;
+	zero_res.end = 0;
+	zero_res.flags = res->flags;
+
+	if (res->flags & IORESOURCE_IO)
+		root = &pbm->io_space;
+	else
+		root = &pbm->mem_space;
+
+	pbm->parent->resource_adjust(pdev, &zero_res, root);
+
+	region->start = res->start - zero_res.start;
+	region->end = res->end - zero_res.start;
+}
+
+void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res,
+			     struct pci_bus_region *region)
+{
+	struct pci_pbm_info *pbm = pdev->bus->sysdata;
+	struct resource *root;
+
+	res->start = region->start;
+	res->end = region->end;
+
+	if (res->flags & IORESOURCE_IO)
+		root = &pbm->io_space;
+	else
+		root = &pbm->mem_space;
+
+	pbm->parent->resource_adjust(pdev, res, root);
+}
+
+char * __init pcibios_setup(char *str)
+{
+	if (!strcmp(str, "onboardfirst")) {
+		pci_device_reorder = 1;
+		return NULL;
+	}
+	if (!strcmp(str, "noreorder")) {
+		pci_device_reorder = 0;
+		return NULL;
+	}
+	return str;
+}
+
+/* Platform support for /proc/bus/pci/X/Y mmap()s. */
+
+/* If the user uses a host-bridge as the PCI device, he may use
+ * this to perform a raw mmap() of the I/O or MEM space behind
+ * that controller.
+ *
+ * This can be useful for execution of x86 PCI bios initialization code
+ * on a PCI card, like the xfree86 int10 stuff does.
+ */
+static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
+				      enum pci_mmap_state mmap_state)
+{
+	struct pcidev_cookie *pcp = pdev->sysdata;
+	struct pci_pbm_info *pbm;
+	struct pci_controller_info *p;
+	unsigned long space_size, user_offset, user_size;
+
+	if (!pcp)
+		return -ENXIO;
+	pbm = pcp->pbm;
+	if (!pbm)
+		return -ENXIO;
+
+	p = pbm->parent;
+	if (p->pbms_same_domain) {
+		unsigned long lowest, highest;
+
+		lowest = ~0UL; highest = 0UL;
+		if (mmap_state == pci_mmap_io) {
+			if (p->pbm_A.io_space.flags) {
+				lowest = p->pbm_A.io_space.start;
+				highest = p->pbm_A.io_space.end + 1;
+			}
+			if (p->pbm_B.io_space.flags) {
+				if (lowest > p->pbm_B.io_space.start)
+					lowest = p->pbm_B.io_space.start;
+				if (highest < p->pbm_B.io_space.end + 1)
+					highest = p->pbm_B.io_space.end + 1;
+			}
+			space_size = highest - lowest;
+		} else {
+			if (p->pbm_A.mem_space.flags) {
+				lowest = p->pbm_A.mem_space.start;
+				highest = p->pbm_A.mem_space.end + 1;
+			}
+			if (p->pbm_B.mem_space.flags) {
+				if (lowest > p->pbm_B.mem_space.start)
+					lowest = p->pbm_B.mem_space.start;
+				if (highest < p->pbm_B.mem_space.end + 1)
+					highest = p->pbm_B.mem_space.end + 1;
+			}
+			space_size = highest - lowest;
+		}
+	} else {
+		if (mmap_state == pci_mmap_io) {
+			space_size = (pbm->io_space.end -
+				      pbm->io_space.start) + 1;
+		} else {
+			space_size = (pbm->mem_space.end -
+				      pbm->mem_space.start) + 1;
+		}
+	}
+
+	/* Make sure the request is in range. */
+	user_offset = vma->vm_pgoff << PAGE_SHIFT;
+	user_size = vma->vm_end - vma->vm_start;
+
+	if (user_offset >= space_size ||
+	    (user_offset + user_size) > space_size)
+		return -EINVAL;
+
+	if (p->pbms_same_domain) {
+		unsigned long lowest = ~0UL;
+
+		if (mmap_state == pci_mmap_io) {
+			if (p->pbm_A.io_space.flags)
+				lowest = p->pbm_A.io_space.start;
+			if (p->pbm_B.io_space.flags &&
+			    lowest > p->pbm_B.io_space.start)
+				lowest = p->pbm_B.io_space.start;
+		} else {
+			if (p->pbm_A.mem_space.flags)
+				lowest = p->pbm_A.mem_space.start;
+			if (p->pbm_B.mem_space.flags &&
+			    lowest > p->pbm_B.mem_space.start)
+				lowest = p->pbm_B.mem_space.start;
+		}
+		vma->vm_pgoff = (lowest + user_offset) >> PAGE_SHIFT;
+	} else {
+		if (mmap_state == pci_mmap_io) {
+			vma->vm_pgoff = (pbm->io_space.start +
+					 user_offset) >> PAGE_SHIFT;
+		} else {
+			vma->vm_pgoff = (pbm->mem_space.start +
+					 user_offset) >> PAGE_SHIFT;
+		}
+	}
+
+	return 0;
+}
+
+/* Adjust vm_pgoff of VMA such that it is the physical page offset corresponding
+ * to the 32-bit pci bus offset for DEV requested by the user.
+ *
+ * Basically, the user finds the base address for his device which he wishes
+ * to mmap.  They read the 32-bit value from the config space base register,
+ * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
+ * offset parameter of mmap on /proc/bus/pci/XXX for that device.
+ *
+ * Returns negative error code on failure, zero on success.
+ */
+static int __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
+				  enum pci_mmap_state mmap_state)
+{
+	unsigned long user_offset = vma->vm_pgoff << PAGE_SHIFT;
+	unsigned long user32 = user_offset & pci_memspace_mask;
+	unsigned long largest_base, this_base, addr32;
+	int i;
+
+	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
+		return __pci_mmap_make_offset_bus(dev, vma, mmap_state);
+
+	/* Figure out which base address this is for. */
+	largest_base = 0UL;
+	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+		struct resource *rp = &dev->resource[i];
+
+		/* Active? */
+		if (!rp->flags)
+			continue;
+
+		/* Same type? */
+		if (i == PCI_ROM_RESOURCE) {
+			if (mmap_state != pci_mmap_mem)
+				continue;
+		} else {
+			if ((mmap_state == pci_mmap_io &&
+			     (rp->flags & IORESOURCE_IO) == 0) ||
+			    (mmap_state == pci_mmap_mem &&
+			     (rp->flags & IORESOURCE_MEM) == 0))
+				continue;
+		}
+
+		this_base = rp->start;
+
+		addr32 = (this_base & PAGE_MASK) & pci_memspace_mask;
+
+		if (mmap_state == pci_mmap_io)
+			addr32 &= 0xffffff;
+
+		if (addr32 <= user32 && this_base > largest_base)
+			largest_base = this_base;
+	}
+
+	if (largest_base == 0UL)
+		return -EINVAL;
+
+	/* Now construct the final physical address. */
+	if (mmap_state == pci_mmap_io)
+		vma->vm_pgoff = (((largest_base & ~0xffffffUL) | user32) >> PAGE_SHIFT);
+	else
+		vma->vm_pgoff = (((largest_base & ~(pci_memspace_mask)) | user32) >> PAGE_SHIFT);
+
+	return 0;
+}
+
+/* Set vm_flags of VMA, as appropriate for this architecture, for a pci device
+ * mapping.
+ */
+static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
+					    enum pci_mmap_state mmap_state)
+{
+	vma->vm_flags |= (VM_IO | VM_RESERVED);
+}
+
+/* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
+ * device mapping.
+ */
+static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
+					     enum pci_mmap_state mmap_state)
+{
+	/* Our io_remap_page_range/io_remap_pfn_range takes care of this,
+	   do nothing. */
+}
+
+/* Perform the actual remap of the pages for a PCI device mapping, as appropriate
+ * for this architecture.  The region in the process to map is described by vm_start
+ * and vm_end members of VMA, the base physical address is found in vm_pgoff.
+ * The pci device structure is provided so that architectures may make mapping
+ * decisions on a per-device or per-bus basis.
+ *
+ * Returns a negative error code on failure, zero on success.
+ */
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+			enum pci_mmap_state mmap_state,
+			int write_combine)
+{
+	int ret;
+
+	ret = __pci_mmap_make_offset(dev, vma, mmap_state);
+	if (ret < 0)
+		return ret;
+
+	__pci_mmap_set_flags(dev, vma, mmap_state);
+	__pci_mmap_set_pgprot(dev, vma, mmap_state);
+
+	ret = io_remap_pfn_range(vma, vma->vm_start,
+				 vma->vm_pgoff,
+				 vma->vm_end - vma->vm_start,
+				 vma->vm_page_prot);
+	if (ret)
+		return ret;
+
+	vma->vm_flags |= VM_IO;
+	return 0;
+}
+
+/* Return the domain nuber for this pci bus */
+
+int pci_domain_nr(struct pci_bus *pbus)
+{
+	struct pci_pbm_info *pbm = pbus->sysdata;
+	int ret;
+
+	if (pbm == NULL || pbm->parent == NULL) {
+		ret = -ENXIO;
+	} else {
+		struct pci_controller_info *p = pbm->parent;
+
+		ret = p->index;
+		if (p->pbms_same_domain == 0)
+			ret = ((ret << 1) +
+			       ((pbm == &pbm->parent->pbm_B) ? 1 : 0));
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(pci_domain_nr);
+
+int pcibios_prep_mwi(struct pci_dev *dev)
+{
+	/* We set correct PCI_CACHE_LINE_SIZE register values for every
+	 * device probed on this platform.  So there is nothing to check
+	 * and this always succeeds.
+	 */
+	return 0;
+}
+
+#endif /* !(CONFIG_PCI) */
diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c
new file mode 100644
index 0000000..58310aa
--- /dev/null
+++ b/arch/sparc64/kernel/pci_common.c
@@ -0,0 +1,1040 @@
+/* $Id: pci_common.c,v 1.29 2002/02/01 00:56:03 davem Exp $
+ * pci_common.c: PCI controller common support.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <asm/pbm.h>
+
+/* Fix self device of BUS and hook it into BUS->self.
+ * The pci_scan_bus does not do this for the host bridge.
+ */
+void __init pci_fixup_host_bridge_self(struct pci_bus *pbus)
+{
+	struct pci_dev *pdev;
+
+	list_for_each_entry(pdev, &pbus->devices, bus_list) {
+		if (pdev->class >> 8 == PCI_CLASS_BRIDGE_HOST) {
+			pbus->self = pdev;
+			return;
+		}
+	}
+
+	prom_printf("PCI: Critical error, cannot find host bridge PDEV.\n");
+	prom_halt();
+}
+
+/* Find the OBP PROM device tree node for a PCI device.
+ * Return zero if not found.
+ */
+static int __init find_device_prom_node(struct pci_pbm_info *pbm,
+					struct pci_dev *pdev,
+					int bus_prom_node,
+					struct linux_prom_pci_registers *pregs,
+					int *nregs)
+{
+	int node;
+
+	/*
+	 * Return the PBM's PROM node in case we are it's PCI device,
+	 * as the PBM's reg property is different to standard PCI reg
+	 * properties. We would delete this device entry otherwise,
+	 * which confuses XFree86's device probing...
+	 */
+	if ((pdev->bus->number == pbm->pci_bus->number) && (pdev->devfn == 0) &&
+	    (pdev->vendor == PCI_VENDOR_ID_SUN) &&
+	    (pdev->device == PCI_DEVICE_ID_SUN_PBM ||
+	     pdev->device == PCI_DEVICE_ID_SUN_SCHIZO ||
+	     pdev->device == PCI_DEVICE_ID_SUN_TOMATILLO ||
+	     pdev->device == PCI_DEVICE_ID_SUN_SABRE ||
+	     pdev->device == PCI_DEVICE_ID_SUN_HUMMINGBIRD)) {
+		*nregs = 0;
+		return bus_prom_node;
+	}
+
+	node = prom_getchild(bus_prom_node);
+	while (node != 0) {
+		int err = prom_getproperty(node, "reg",
+					   (char *)pregs,
+					   sizeof(*pregs) * PROMREG_MAX);
+		if (err == 0 || err == -1)
+			goto do_next_sibling;
+		if (((pregs[0].phys_hi >> 8) & 0xff) == pdev->devfn) {
+			*nregs = err / sizeof(*pregs);
+			return node;
+		}
+
+	do_next_sibling:
+		node = prom_getsibling(node);
+	}
+	return 0;
+}
+
+/* Older versions of OBP on PCI systems encode 64-bit MEM
+ * space assignments incorrectly, this fixes them up.  We also
+ * take the opportunity here to hide other kinds of bogus
+ * assignments.
+ */
+static void __init fixup_obp_assignments(struct pci_dev *pdev,
+					 struct pcidev_cookie *pcp)
+{
+	int i;
+
+	if (pdev->vendor == PCI_VENDOR_ID_AL &&
+	    (pdev->device == PCI_DEVICE_ID_AL_M7101 ||
+	     pdev->device == PCI_DEVICE_ID_AL_M1533)) {
+		int i;
+
+		/* Zap all of the normal resources, they are
+		 * meaningless and generate bogus resource collision
+		 * messages.  This is OpenBoot's ill-fated attempt to
+		 * represent the implicit resources that these devices
+		 * have.
+		 */
+		pcp->num_prom_assignments = 0;
+		for (i = 0; i < 6; i++) {
+			pdev->resource[i].start =
+				pdev->resource[i].end =
+				pdev->resource[i].flags = 0;
+		}
+		pdev->resource[PCI_ROM_RESOURCE].start =
+			pdev->resource[PCI_ROM_RESOURCE].end =
+			pdev->resource[PCI_ROM_RESOURCE].flags = 0;
+		return;
+	}
+
+	for (i = 0; i < pcp->num_prom_assignments; i++) {
+		struct linux_prom_pci_registers *ap;
+		int space;
+
+		ap = &pcp->prom_assignments[i];
+		space = ap->phys_hi >> 24;
+		if ((space & 0x3) == 2 &&
+		    (space & 0x4) != 0) {
+			ap->phys_hi &= ~(0x7 << 24);
+			ap->phys_hi |= 0x3 << 24;
+		}
+	}
+}
+
+/* Fill in the PCI device cookie sysdata for the given
+ * PCI device.  This cookie is the means by which one
+ * can get to OBP and PCI controller specific information
+ * for a PCI device.
+ */
+static void __init pdev_cookie_fillin(struct pci_pbm_info *pbm,
+				      struct pci_dev *pdev,
+				      int bus_prom_node)
+{
+	struct linux_prom_pci_registers pregs[PROMREG_MAX];
+	struct pcidev_cookie *pcp;
+	int device_prom_node, nregs, err;
+
+	device_prom_node = find_device_prom_node(pbm, pdev, bus_prom_node,
+						 pregs, &nregs);
+	if (device_prom_node == 0) {
+		/* If it is not in the OBP device tree then
+		 * there must be a damn good reason for it.
+		 *
+		 * So what we do is delete the device from the
+		 * PCI device tree completely.  This scenario
+		 * is seen, for example, on CP1500 for the
+		 * second EBUS/HappyMeal pair if the external
+		 * connector for it is not present.
+		 */
+		pci_remove_bus_device(pdev);
+		return;
+	}
+
+	pcp = kmalloc(sizeof(*pcp), GFP_ATOMIC);
+	if (pcp == NULL) {
+		prom_printf("PCI_COOKIE: Fatal malloc error, aborting...\n");
+		prom_halt();
+	}
+	pcp->pbm = pbm;
+	pcp->prom_node = device_prom_node;
+	memcpy(pcp->prom_regs, pregs, sizeof(pcp->prom_regs));
+	pcp->num_prom_regs = nregs;
+	err = prom_getproperty(device_prom_node, "name",
+			       pcp->prom_name, sizeof(pcp->prom_name));
+	if (err > 0)
+		pcp->prom_name[err] = 0;
+	else
+		pcp->prom_name[0] = 0;
+
+	err = prom_getproperty(device_prom_node,
+			       "assigned-addresses",
+			       (char *)pcp->prom_assignments,
+			       sizeof(pcp->prom_assignments));
+	if (err == 0 || err == -1)
+		pcp->num_prom_assignments = 0;
+	else
+		pcp->num_prom_assignments =
+			(err / sizeof(pcp->prom_assignments[0]));
+
+	if (strcmp(pcp->prom_name, "ebus") == 0) {
+		struct linux_prom_ebus_ranges erng[PROM_PCIRNG_MAX];
+		int iter;
+
+		/* EBUS is special... */
+		err = prom_getproperty(device_prom_node, "ranges",
+				       (char *)&erng[0], sizeof(erng));
+		if (err == 0 || err == -1) {
+			prom_printf("EBUS: Fatal error, no range property\n");
+			prom_halt();
+		}
+		err = (err / sizeof(erng[0]));
+		for(iter = 0; iter < err; iter++) {
+			struct linux_prom_ebus_ranges *ep = &erng[iter];
+			struct linux_prom_pci_registers *ap;
+
+			ap = &pcp->prom_assignments[iter];
+
+			ap->phys_hi = ep->parent_phys_hi;
+			ap->phys_mid = ep->parent_phys_mid;
+			ap->phys_lo = ep->parent_phys_lo;
+			ap->size_hi = 0;
+			ap->size_lo = ep->size;
+		}
+		pcp->num_prom_assignments = err;
+	}
+
+	fixup_obp_assignments(pdev, pcp);
+
+	pdev->sysdata = pcp;
+}
+
+void __init pci_fill_in_pbm_cookies(struct pci_bus *pbus,
+				    struct pci_pbm_info *pbm,
+				    int prom_node)
+{
+	struct pci_dev *pdev, *pdev_next;
+	struct pci_bus *this_pbus, *pbus_next;
+
+	/* This must be _safe because the cookie fillin
+	   routine can delete devices from the tree.  */
+	list_for_each_entry_safe(pdev, pdev_next, &pbus->devices, bus_list)
+		pdev_cookie_fillin(pbm, pdev, prom_node);
+
+	list_for_each_entry_safe(this_pbus, pbus_next, &pbus->children, node) {
+		struct pcidev_cookie *pcp = this_pbus->self->sysdata;
+
+		pci_fill_in_pbm_cookies(this_pbus, pbm, pcp->prom_node);
+	}
+}
+
+static void __init bad_assignment(struct pci_dev *pdev,
+				  struct linux_prom_pci_registers *ap,
+				  struct resource *res,
+				  int do_prom_halt)
+{
+	prom_printf("PCI: Bogus PROM assignment. BUS[%02x] DEVFN[%x]\n",
+		    pdev->bus->number, pdev->devfn);
+	if (ap)
+		prom_printf("PCI: phys[%08x:%08x:%08x] size[%08x:%08x]\n",
+			    ap->phys_hi, ap->phys_mid, ap->phys_lo,
+			    ap->size_hi, ap->size_lo);
+	if (res)
+		prom_printf("PCI: RES[%016lx-->%016lx:(%lx)]\n",
+			    res->start, res->end, res->flags);
+	prom_printf("Please email this information to davem@redhat.com\n");
+	if (do_prom_halt)
+		prom_halt();
+}
+
+static struct resource *
+__init get_root_resource(struct linux_prom_pci_registers *ap,
+			 struct pci_pbm_info *pbm)
+{
+	int space = (ap->phys_hi >> 24) & 3;
+
+	switch (space) {
+	case 0:
+		/* Configuration space, silently ignore it. */
+		return NULL;
+
+	case 1:
+		/* 16-bit IO space */
+		return &pbm->io_space;
+
+	case 2:
+		/* 32-bit MEM space */
+		return &pbm->mem_space;
+
+	case 3:
+		/* 64-bit MEM space, these are allocated out of
+		 * the 32-bit mem_space range for the PBM, ie.
+		 * we just zero out the upper 32-bits.
+		 */
+		return &pbm->mem_space;
+
+	default:
+		printk("PCI: What is resource space %x? "
+		       "Tell davem@redhat.com about it!\n", space);
+		return NULL;
+	};
+}
+
+static struct resource *
+__init get_device_resource(struct linux_prom_pci_registers *ap,
+			   struct pci_dev *pdev)
+{
+	struct resource *res;
+	int breg = (ap->phys_hi & 0xff);
+
+	switch (breg) {
+	case  PCI_ROM_ADDRESS:
+		/* Unfortunately I have seen several cases where
+		 * buggy FCODE uses a space value of '1' (I/O space)
+		 * in the register property for the ROM address
+		 * so disable this sanity check for now.
+		 */
+#if 0
+	{
+		int space = (ap->phys_hi >> 24) & 3;
+
+		/* It had better be MEM space. */
+		if (space != 2)
+			bad_assignment(pdev, ap, NULL, 0);
+	}
+#endif
+		res = &pdev->resource[PCI_ROM_RESOURCE];
+		break;
+
+	case PCI_BASE_ADDRESS_0:
+	case PCI_BASE_ADDRESS_1:
+	case PCI_BASE_ADDRESS_2:
+	case PCI_BASE_ADDRESS_3:
+	case PCI_BASE_ADDRESS_4:
+	case PCI_BASE_ADDRESS_5:
+		res = &pdev->resource[(breg - PCI_BASE_ADDRESS_0) / 4];
+		break;
+
+	default:
+		bad_assignment(pdev, ap, NULL, 0);
+		res = NULL;
+		break;
+	};
+
+	return res;
+}
+
+static int __init pdev_resource_collisions_expected(struct pci_dev *pdev)
+{
+	if (pdev->vendor != PCI_VENDOR_ID_SUN)
+		return 0;
+
+	if (pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS ||
+	    pdev->device == PCI_DEVICE_ID_SUN_RIO_1394 ||
+	    pdev->device == PCI_DEVICE_ID_SUN_RIO_USB)
+		return 1;
+
+	return 0;
+}
+
+static void __init pdev_record_assignments(struct pci_pbm_info *pbm,
+					   struct pci_dev *pdev)
+{
+	struct pcidev_cookie *pcp = pdev->sysdata;
+	int i;
+
+	for (i = 0; i < pcp->num_prom_assignments; i++) {
+		struct linux_prom_pci_registers *ap;
+		struct resource *root, *res;
+
+		/* The format of this property is specified in
+		 * the PCI Bus Binding to IEEE1275-1994.
+		 */
+		ap = &pcp->prom_assignments[i];
+		root = get_root_resource(ap, pbm);
+		res = get_device_resource(ap, pdev);
+		if (root == NULL || res == NULL ||
+		    res->flags == 0)
+			continue;
+
+		/* Ok we know which resource this PROM assignment is
+		 * for, sanity check it.
+		 */
+		if ((res->start & 0xffffffffUL) != ap->phys_lo)
+			bad_assignment(pdev, ap, res, 1);
+
+		/* If it is a 64-bit MEM space assignment, verify that
+		 * the resource is too and that the upper 32-bits match.
+		 */
+		if (((ap->phys_hi >> 24) & 3) == 3) {
+			if (((res->flags & IORESOURCE_MEM) == 0) ||
+			    ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
+			     != PCI_BASE_ADDRESS_MEM_TYPE_64))
+				bad_assignment(pdev, ap, res, 1);
+			if ((res->start >> 32) != ap->phys_mid)
+				bad_assignment(pdev, ap, res, 1);
+
+			/* PBM cannot generate cpu initiated PIOs
+			 * to the full 64-bit space.  Therefore the
+			 * upper 32-bits better be zero.  If it is
+			 * not, just skip it and we will assign it
+			 * properly ourselves.
+			 */
+			if ((res->start >> 32) != 0UL) {
+				printk(KERN_ERR "PCI: OBP assigns out of range MEM address "
+				       "%016lx for region %ld on device %s\n",
+				       res->start, (res - &pdev->resource[0]), pci_name(pdev));
+				continue;
+			}
+		}
+
+		/* Adjust the resource into the physical address space
+		 * of this PBM.
+		 */
+		pbm->parent->resource_adjust(pdev, res, root);
+
+		if (request_resource(root, res) < 0) {
+			/* OK, there is some conflict.  But this is fine
+			 * since we'll reassign it in the fixup pass.
+			 *
+			 * We notify the user that OBP made an error if it
+			 * is a case we don't expect.
+			 */
+			if (!pdev_resource_collisions_expected(pdev)) {
+				printk(KERN_ERR "PCI: Address space collision on region %ld "
+				       "[%016lx:%016lx] of device %s\n",
+				       (res - &pdev->resource[0]),
+				       res->start, res->end,
+				       pci_name(pdev));
+			}
+		}
+	}
+}
+
+void __init pci_record_assignments(struct pci_pbm_info *pbm,
+				   struct pci_bus *pbus)
+{
+	struct pci_dev *dev;
+	struct pci_bus *bus;
+
+	list_for_each_entry(dev, &pbus->devices, bus_list)
+		pdev_record_assignments(pbm, dev);
+
+	list_for_each_entry(bus, &pbus->children, node)
+		pci_record_assignments(pbm, bus);
+}
+
+/* Return non-zero if PDEV has implicit I/O resources even
+ * though it may not have an I/O base address register
+ * active.
+ */
+static int __init has_implicit_io(struct pci_dev *pdev)
+{
+	int class = pdev->class >> 8;
+
+	if (class == PCI_CLASS_NOT_DEFINED ||
+	    class == PCI_CLASS_NOT_DEFINED_VGA ||
+	    class == PCI_CLASS_STORAGE_IDE ||
+	    (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
+		return 1;
+
+	return 0;
+}
+
+static void __init pdev_assign_unassigned(struct pci_pbm_info *pbm,
+					  struct pci_dev *pdev)
+{
+	u32 reg;
+	u16 cmd;
+	int i, io_seen, mem_seen;
+
+	io_seen = mem_seen = 0;
+	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+		struct resource *root, *res;
+		unsigned long size, min, max, align;
+
+		res = &pdev->resource[i];
+
+		if (res->flags & IORESOURCE_IO)
+			io_seen++;
+		else if (res->flags & IORESOURCE_MEM)
+			mem_seen++;
+
+		/* If it is already assigned or the resource does
+		 * not exist, there is nothing to do.
+		 */
+		if (res->parent != NULL || res->flags == 0UL)
+			continue;
+
+		/* Determine the root we allocate from. */
+		if (res->flags & IORESOURCE_IO) {
+			root = &pbm->io_space;
+			min = root->start + 0x400UL;
+			max = root->end;
+		} else {
+			root = &pbm->mem_space;
+			min = root->start;
+			max = min + 0x80000000UL;
+		}
+
+		size = res->end - res->start;
+		align = size + 1;
+		if (allocate_resource(root, res, size + 1, min, max, align, NULL, NULL) < 0) {
+			/* uh oh */
+			prom_printf("PCI: Failed to allocate resource %d for %s\n",
+				    i, pci_name(pdev));
+			prom_halt();
+		}
+
+		/* Update PCI config space. */
+		pbm->parent->base_address_update(pdev, i);
+	}
+
+	/* Special case, disable the ROM.  Several devices
+	 * act funny (ie. do not respond to memory space writes)
+	 * when it is left enabled.  A good example are Qlogic,ISP
+	 * adapters.
+	 */
+	pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &reg);
+	reg &= ~PCI_ROM_ADDRESS_ENABLE;
+	pci_write_config_dword(pdev, PCI_ROM_ADDRESS, reg);
+
+	/* If we saw I/O or MEM resources, enable appropriate
+	 * bits in PCI command register.
+	 */
+	if (io_seen || mem_seen) {
+		pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+		if (io_seen || has_implicit_io(pdev))
+			cmd |= PCI_COMMAND_IO;
+		if (mem_seen)
+			cmd |= PCI_COMMAND_MEMORY;
+		pci_write_config_word(pdev, PCI_COMMAND, cmd);
+	}
+
+	/* If this is a PCI bridge or an IDE controller,
+	 * enable bus mastering.  In the former case also
+	 * set the cache line size correctly.
+	 */
+	if (((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI) ||
+	    (((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) &&
+	     ((pdev->class & 0x80) != 0))) {
+		pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+		cmd |= PCI_COMMAND_MASTER;
+		pci_write_config_word(pdev, PCI_COMMAND, cmd);
+
+		if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI)
+			pci_write_config_byte(pdev,
+					      PCI_CACHE_LINE_SIZE,
+					      (64 / sizeof(u32)));
+	}
+}
+
+void __init pci_assign_unassigned(struct pci_pbm_info *pbm,
+				  struct pci_bus *pbus)
+{
+	struct pci_dev *dev;
+	struct pci_bus *bus;
+
+	list_for_each_entry(dev, &pbus->devices, bus_list)
+		pdev_assign_unassigned(pbm, dev);
+
+	list_for_each_entry(bus, &pbus->children, node)
+		pci_assign_unassigned(pbm, bus);
+}
+
+static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt)
+{
+	struct linux_prom_pci_intmap bridge_local_intmap[PROM_PCIIMAP_MAX], *intmap;
+	struct linux_prom_pci_intmask bridge_local_intmask, *intmask;
+	struct pcidev_cookie *dev_pcp = pdev->sysdata;
+	struct pci_pbm_info *pbm = dev_pcp->pbm;
+	struct linux_prom_pci_registers *pregs = dev_pcp->prom_regs;
+	unsigned int hi, mid, lo, irq;
+	int i, num_intmap, map_slot;
+
+	intmap = &pbm->pbm_intmap[0];
+	intmask = &pbm->pbm_intmask;
+	num_intmap = pbm->num_pbm_intmap;
+	map_slot = 0;
+
+	/* If we are underneath a PCI bridge, use PROM register
+	 * property of the parent bridge which is closest to
+	 * the PBM.
+	 *
+	 * However if that parent bridge has interrupt map/mask
+	 * properties of its own we use the PROM register property
+	 * of the next child device on the path to PDEV.
+	 *
+	 * In detail the two cases are (note that the 'X' below is the
+	 * 'next child on the path to PDEV' mentioned above):
+	 *
+	 * 1) PBM --> PCI bus lacking int{map,mask} --> X ... PDEV
+	 *
+	 *    Here we use regs of 'PCI bus' device.
+	 *
+	 * 2) PBM --> PCI bus with int{map,mask} --> X ... PDEV
+	 *
+	 *    Here we use regs of 'X'.  Note that X can be PDEV.
+	 */
+	if (pdev->bus->number != pbm->pci_first_busno) {
+		struct pcidev_cookie *bus_pcp, *regs_pcp;
+		struct pci_dev *bus_dev, *regs_dev;
+		int plen;
+
+		bus_dev = pdev->bus->self;
+		regs_dev = pdev;
+
+		while (bus_dev->bus &&
+		       bus_dev->bus->number != pbm->pci_first_busno) {
+			regs_dev = bus_dev;
+			bus_dev = bus_dev->bus->self;
+		}
+
+		regs_pcp = regs_dev->sysdata;
+		pregs = regs_pcp->prom_regs;
+
+		bus_pcp = bus_dev->sysdata;
+
+		/* But if the PCI bridge has it's own interrupt map
+		 * and mask properties, use that and the regs of the
+		 * PCI entity at the next level down on the path to the
+		 * device.
+		 */
+		plen = prom_getproperty(bus_pcp->prom_node, "interrupt-map",
+					(char *) &bridge_local_intmap[0],
+					sizeof(bridge_local_intmap));
+		if (plen != -1) {
+			intmap = &bridge_local_intmap[0];
+			num_intmap = plen / sizeof(struct linux_prom_pci_intmap);
+			plen = prom_getproperty(bus_pcp->prom_node,
+						"interrupt-map-mask",
+						(char *) &bridge_local_intmask,
+						sizeof(bridge_local_intmask));
+			if (plen == -1) {
+				printk("pci_intmap_match: Warning! Bridge has intmap "
+				       "but no intmask.\n");
+				printk("pci_intmap_match: Trying to recover.\n");
+				return 0;
+			}
+
+			if (pdev->bus->self != bus_dev)
+				map_slot = 1;
+		} else {
+			pregs = bus_pcp->prom_regs;
+			map_slot = 1;
+		}
+	}
+
+	if (map_slot) {
+		*interrupt = ((*interrupt
+			       - 1
+			       + PCI_SLOT(pdev->devfn)) & 0x3) + 1;
+	}
+
+	hi   = pregs->phys_hi & intmask->phys_hi;
+	mid  = pregs->phys_mid & intmask->phys_mid;
+	lo   = pregs->phys_lo & intmask->phys_lo;
+	irq  = *interrupt & intmask->interrupt;
+
+	for (i = 0; i < num_intmap; i++) {
+		if (intmap[i].phys_hi  == hi	&&
+		    intmap[i].phys_mid == mid	&&
+		    intmap[i].phys_lo  == lo	&&
+		    intmap[i].interrupt == irq) {
+			*interrupt = intmap[i].cinterrupt;
+			printk("PCI-IRQ: Routing bus[%2x] slot[%2x] map[%d] to INO[%02x]\n",
+			       pdev->bus->number, PCI_SLOT(pdev->devfn),
+			       map_slot, *interrupt);
+			return 1;
+		}
+	}
+
+	/* We will run this code even if pbm->num_pbm_intmap is zero, just so
+	 * we can apply the slot mapping to the PROM interrupt property value.
+	 * So do not spit out these warnings in that case.
+	 */
+	if (num_intmap != 0) {
+		/* Print it both to OBP console and kernel one so that if bootup
+		 * hangs here the user has the information to report.
+		 */
+		prom_printf("pci_intmap_match: bus %02x, devfn %02x: ",
+			    pdev->bus->number, pdev->devfn);
+		prom_printf("IRQ [%08x.%08x.%08x.%08x] not found in interrupt-map\n",
+			    pregs->phys_hi, pregs->phys_mid, pregs->phys_lo, *interrupt);
+		prom_printf("Please email this information to davem@redhat.com\n");
+
+		printk("pci_intmap_match: bus %02x, devfn %02x: ",
+		       pdev->bus->number, pdev->devfn);
+		printk("IRQ [%08x.%08x.%08x.%08x] not found in interrupt-map\n",
+		       pregs->phys_hi, pregs->phys_mid, pregs->phys_lo, *interrupt);
+		printk("Please email this information to davem@redhat.com\n");
+	}
+
+	return 0;
+}
+
+static void __init pdev_fixup_irq(struct pci_dev *pdev)
+{
+	struct pcidev_cookie *pcp = pdev->sysdata;
+	struct pci_pbm_info *pbm = pcp->pbm;
+	struct pci_controller_info *p = pbm->parent;
+	unsigned int portid = pbm->portid;
+	unsigned int prom_irq;
+	int prom_node = pcp->prom_node;
+	int err;
+
+	/* If this is an empty EBUS device, sometimes OBP fails to
+	 * give it a valid fully specified interrupts property.
+	 * The EBUS hooked up to SunHME on PCI I/O boards of
+	 * Ex000 systems is one such case.
+	 *
+	 * The interrupt is not important so just ignore it.
+	 */
+	if (pdev->vendor == PCI_VENDOR_ID_SUN &&
+	    pdev->device == PCI_DEVICE_ID_SUN_EBUS &&
+	    !prom_getchild(prom_node)) {
+		pdev->irq = 0;
+		return;
+	}
+
+	err = prom_getproperty(prom_node, "interrupts",
+			       (char *)&prom_irq, sizeof(prom_irq));
+	if (err == 0 || err == -1) {
+		pdev->irq = 0;
+		return;
+	}
+
+	/* Fully specified already? */
+	if (((prom_irq & PCI_IRQ_IGN) >> 6) == portid) {
+		pdev->irq = p->irq_build(pbm, pdev, prom_irq);
+		goto have_irq;
+	}
+
+	/* An onboard device? (bit 5 set) */
+	if ((prom_irq & PCI_IRQ_INO) & 0x20) {
+		pdev->irq = p->irq_build(pbm, pdev, (portid << 6 | prom_irq));
+		goto have_irq;
+	}
+
+	/* Can we find a matching entry in the interrupt-map? */
+	if (pci_intmap_match(pdev, &prom_irq)) {
+		pdev->irq = p->irq_build(pbm, pdev, (portid << 6) | prom_irq);
+		goto have_irq;
+	}
+
+	/* Ok, we have to do it the hard way. */
+	{
+		unsigned int bus, slot, line;
+
+		bus = (pbm == &pbm->parent->pbm_B) ? (1 << 4) : 0;
+
+		/* If we have a legal interrupt property, use it as
+		 * the IRQ line.
+		 */
+		if (prom_irq > 0 && prom_irq < 5) {
+			line = ((prom_irq - 1) & 3);
+		} else {
+			u8 pci_irq_line;
+
+			/* Else just directly consult PCI config space. */
+			pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pci_irq_line);
+			line = ((pci_irq_line - 1) & 3);
+		}
+
+		/* Now figure out the slot.
+		 *
+		 * Basically, device number zero on the top-level bus is
+		 * always the PCI host controller.  Slot 0 is then device 1.
+		 * PBM A supports two external slots (0 and 1), and PBM B
+		 * supports 4 external slots (0, 1, 2, and 3).  On-board PCI
+		 * devices are wired to device numbers outside of these
+		 * ranges. -DaveM
+ 		 */
+		if (pdev->bus->number == pbm->pci_first_busno) {
+			slot = PCI_SLOT(pdev->devfn) - pbm->pci_first_slot;
+		} else {
+			struct pci_dev *bus_dev;
+
+			/* Underneath a bridge, use slot number of parent
+			 * bridge which is closest to the PBM.
+			 */
+			bus_dev = pdev->bus->self;
+			while (bus_dev->bus &&
+			       bus_dev->bus->number != pbm->pci_first_busno)
+				bus_dev = bus_dev->bus->self;
+
+			slot = PCI_SLOT(bus_dev->devfn) - pbm->pci_first_slot;
+		}
+		slot = slot << 2;
+
+		pdev->irq = p->irq_build(pbm, pdev,
+					 ((portid << 6) & PCI_IRQ_IGN) |
+					 (bus | slot | line));
+	}
+
+have_irq:
+	pci_write_config_byte(pdev, PCI_INTERRUPT_LINE,
+			      pdev->irq & PCI_IRQ_INO);
+}
+
+void __init pci_fixup_irq(struct pci_pbm_info *pbm,
+			  struct pci_bus *pbus)
+{
+	struct pci_dev *dev;
+	struct pci_bus *bus;
+
+	list_for_each_entry(dev, &pbus->devices, bus_list)
+		pdev_fixup_irq(dev);
+
+	list_for_each_entry(bus, &pbus->children, node)
+		pci_fixup_irq(pbm, bus);
+}
+
+static void pdev_setup_busmastering(struct pci_dev *pdev, int is_66mhz)
+{
+	u16 cmd;
+	u8 hdr_type, min_gnt, ltimer;
+
+	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+	cmd |= PCI_COMMAND_MASTER;
+	pci_write_config_word(pdev, PCI_COMMAND, cmd);
+
+	/* Read it back, if the mastering bit did not
+	 * get set, the device does not support bus
+	 * mastering so we have nothing to do here.
+	 */
+	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+	if ((cmd & PCI_COMMAND_MASTER) == 0)
+		return;
+
+	/* Set correct cache line size, 64-byte on all
+	 * Sparc64 PCI systems.  Note that the value is
+	 * measured in 32-bit words.
+	 */
+	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+			      64 / sizeof(u32));
+
+	pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr_type);
+	hdr_type &= ~0x80;
+	if (hdr_type != PCI_HEADER_TYPE_NORMAL)
+		return;
+
+	/* If the latency timer is already programmed with a non-zero
+	 * value, assume whoever set it (OBP or whoever) knows what
+	 * they are doing.
+	 */
+	pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ltimer);
+	if (ltimer != 0)
+		return;
+
+	/* XXX Since I'm tipping off the min grant value to
+	 * XXX choose a suitable latency timer value, I also
+	 * XXX considered making use of the max latency value
+	 * XXX as well.  Unfortunately I've seen too many bogusly
+	 * XXX low settings for it to the point where it lacks
+	 * XXX any usefulness.  In one case, an ethernet card
+	 * XXX claimed a min grant of 10 and a max latency of 5.
+	 * XXX Now, if I had two such cards on the same bus I
+	 * XXX could not set the desired burst period (calculated
+	 * XXX from min grant) without violating the max latency
+	 * XXX bound.  Duh...
+	 * XXX
+	 * XXX I blame dumb PC bios implementors for stuff like
+	 * XXX this, most of them don't even try to do something
+	 * XXX sensible with latency timer values and just set some
+	 * XXX default value (usually 32) into every device.
+	 */
+
+	pci_read_config_byte(pdev, PCI_MIN_GNT, &min_gnt);
+
+	if (min_gnt == 0) {
+		/* If no min_gnt setting then use a default
+		 * value.
+		 */
+		if (is_66mhz)
+			ltimer = 16;
+		else
+			ltimer = 32;
+	} else {
+		int shift_factor;
+
+		if (is_66mhz)
+			shift_factor = 2;
+		else
+			shift_factor = 3;
+
+		/* Use a default value when the min_gnt value
+		 * is erroneously high.
+		 */
+		if (((unsigned int) min_gnt << shift_factor) > 512 ||
+		    ((min_gnt << shift_factor) & 0xff) == 0) {
+			ltimer = 8 << shift_factor;
+		} else {
+			ltimer = min_gnt << shift_factor;
+		}
+	}
+
+	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ltimer);
+}
+
+void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm,
+				     struct pci_bus *pbus)
+{
+	struct pci_dev *pdev;
+	int all_are_66mhz;
+	u16 status;
+
+	if (pbm->is_66mhz_capable == 0) {
+		all_are_66mhz = 0;
+		goto out;
+	}
+
+	all_are_66mhz = 1;
+	list_for_each_entry(pdev, &pbus->devices, bus_list) {
+		pci_read_config_word(pdev, PCI_STATUS, &status);
+		if (!(status & PCI_STATUS_66MHZ)) {
+			all_are_66mhz = 0;
+			break;
+		}
+	}
+out:
+	pbm->all_devs_66mhz = all_are_66mhz;
+
+	printk("PCI%d(PBM%c): Bus running at %dMHz\n",
+	       pbm->parent->index,
+	       (pbm == &pbm->parent->pbm_A) ? 'A' : 'B',
+	       (all_are_66mhz ? 66 : 33));
+}
+
+void pci_setup_busmastering(struct pci_pbm_info *pbm,
+			    struct pci_bus *pbus)
+{
+	struct pci_dev *dev;
+	struct pci_bus *bus;
+	int is_66mhz;
+
+	is_66mhz = pbm->is_66mhz_capable && pbm->all_devs_66mhz;
+
+	list_for_each_entry(dev, &pbus->devices, bus_list)
+		pdev_setup_busmastering(dev, is_66mhz);
+
+	list_for_each_entry(bus, &pbus->children, node)
+		pci_setup_busmastering(pbm, bus);
+}
+
+void pci_register_legacy_regions(struct resource *io_res,
+				 struct resource *mem_res)
+{
+	struct resource *p;
+
+	/* VGA Video RAM. */
+	p = kmalloc(sizeof(*p), GFP_KERNEL);
+	if (!p)
+		return;
+
+	memset(p, 0, sizeof(*p));
+	p->name = "Video RAM area";
+	p->start = mem_res->start + 0xa0000UL;
+	p->end = p->start + 0x1ffffUL;
+	p->flags = IORESOURCE_BUSY;
+	request_resource(mem_res, p);
+
+	p = kmalloc(sizeof(*p), GFP_KERNEL);
+	if (!p)
+		return;
+
+	memset(p, 0, sizeof(*p));
+	p->name = "System ROM";
+	p->start = mem_res->start + 0xf0000UL;
+	p->end = p->start + 0xffffUL;
+	p->flags = IORESOURCE_BUSY;
+	request_resource(mem_res, p);
+
+	p = kmalloc(sizeof(*p), GFP_KERNEL);
+	if (!p)
+		return;
+
+	memset(p, 0, sizeof(*p));
+	p->name = "Video ROM";
+	p->start = mem_res->start + 0xc0000UL;
+	p->end = p->start + 0x7fffUL;
+	p->flags = IORESOURCE_BUSY;
+	request_resource(mem_res, p);
+}
+
+/* Generic helper routines for PCI error reporting. */
+void pci_scan_for_target_abort(struct pci_controller_info *p,
+			       struct pci_pbm_info *pbm,
+			       struct pci_bus *pbus)
+{
+	struct pci_dev *pdev;
+	struct pci_bus *bus;
+
+	list_for_each_entry(pdev, &pbus->devices, bus_list) {
+		u16 status, error_bits;
+
+		pci_read_config_word(pdev, PCI_STATUS, &status);
+		error_bits =
+			(status & (PCI_STATUS_SIG_TARGET_ABORT |
+				   PCI_STATUS_REC_TARGET_ABORT));
+		if (error_bits) {
+			pci_write_config_word(pdev, PCI_STATUS, error_bits);
+			printk("PCI%d(PBM%c): Device [%s] saw Target Abort [%016x]\n",
+			       p->index, ((pbm == &p->pbm_A) ? 'A' : 'B'),
+			       pci_name(pdev), status);
+		}
+	}
+
+	list_for_each_entry(bus, &pbus->children, node)
+		pci_scan_for_target_abort(p, pbm, bus);
+}
+
+void pci_scan_for_master_abort(struct pci_controller_info *p,
+			       struct pci_pbm_info *pbm,
+			       struct pci_bus *pbus)
+{
+	struct pci_dev *pdev;
+	struct pci_bus *bus;
+
+	list_for_each_entry(pdev, &pbus->devices, bus_list) {
+		u16 status, error_bits;
+
+		pci_read_config_word(pdev, PCI_STATUS, &status);
+		error_bits =
+			(status & (PCI_STATUS_REC_MASTER_ABORT));
+		if (error_bits) {
+			pci_write_config_word(pdev, PCI_STATUS, error_bits);
+			printk("PCI%d(PBM%c): Device [%s] received Master Abort [%016x]\n",
+			       p->index, ((pbm == &p->pbm_A) ? 'A' : 'B'),
+			       pci_name(pdev), status);
+		}
+	}
+
+	list_for_each_entry(bus, &pbus->children, node)
+		pci_scan_for_master_abort(p, pbm, bus);
+}
+
+void pci_scan_for_parity_error(struct pci_controller_info *p,
+			       struct pci_pbm_info *pbm,
+			       struct pci_bus *pbus)
+{
+	struct pci_dev *pdev;
+	struct pci_bus *bus;
+
+	list_for_each_entry(pdev, &pbus->devices, bus_list) {
+		u16 status, error_bits;
+
+		pci_read_config_word(pdev, PCI_STATUS, &status);
+		error_bits =
+			(status & (PCI_STATUS_PARITY |
+				   PCI_STATUS_DETECTED_PARITY));
+		if (error_bits) {
+			pci_write_config_word(pdev, PCI_STATUS, error_bits);
+			printk("PCI%d(PBM%c): Device [%s] saw Parity Error [%016x]\n",
+			       p->index, ((pbm == &p->pbm_A) ? 'A' : 'B'),
+			       pci_name(pdev), status);
+		}
+	}
+
+	list_for_each_entry(bus, &pbus->children, node)
+		pci_scan_for_parity_error(p, pbm, bus);
+}
diff --git a/arch/sparc64/kernel/pci_impl.h b/arch/sparc64/kernel/pci_impl.h
new file mode 100644
index 0000000..6c32059
--- /dev/null
+++ b/arch/sparc64/kernel/pci_impl.h
@@ -0,0 +1,49 @@
+/* $Id: pci_impl.h,v 1.9 2001/06/13 06:34:30 davem Exp $
+ * pci_impl.h: Helper definitions for PCI controller support.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#ifndef PCI_IMPL_H
+#define PCI_IMPL_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <asm/io.h>
+
+extern struct pci_controller_info *pci_controller_root;
+
+extern int pci_num_controllers;
+
+/* PCI bus scanning and fixup support. */
+extern void pci_fixup_host_bridge_self(struct pci_bus *pbus);
+extern void pci_fill_in_pbm_cookies(struct pci_bus *pbus,
+				    struct pci_pbm_info *pbm,
+				    int prom_node);
+extern void pci_record_assignments(struct pci_pbm_info *pbm,
+				   struct pci_bus *pbus);
+extern void pci_assign_unassigned(struct pci_pbm_info *pbm,
+				  struct pci_bus *pbus);
+extern void pci_fixup_irq(struct pci_pbm_info *pbm,
+			  struct pci_bus *pbus);
+extern void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm,
+					    struct pci_bus *pbus);
+extern void pci_setup_busmastering(struct pci_pbm_info *pbm,
+				   struct pci_bus *pbus);
+extern void pci_register_legacy_regions(struct resource *io_res,
+					struct resource *mem_res);
+
+/* Error reporting support. */
+extern void pci_scan_for_target_abort(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
+extern void pci_scan_for_master_abort(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
+extern void pci_scan_for_parity_error(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
+
+/* Configuration space access. */
+extern void pci_config_read8(u8 *addr, u8 *ret);
+extern void pci_config_read16(u16 *addr, u16 *ret);
+extern void pci_config_read32(u32 *addr, u32 *ret);
+extern void pci_config_write8(u8 *addr, u8 val);
+extern void pci_config_write16(u16 *addr, u16 val);
+extern void pci_config_write32(u32 *addr, u32 val);
+
+#endif /* !(PCI_IMPL_H) */
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
new file mode 100644
index 0000000..2929834
--- /dev/null
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -0,0 +1,855 @@
+/* $Id: pci_iommu.c,v 1.17 2001/12/17 07:05:09 davem Exp $
+ * pci_iommu.c: UltraSparc PCI controller IOM/STC support.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/pbm.h>
+
+#include "iommu_common.h"
+
+#define PCI_STC_CTXMATCH_ADDR(STC, CTX)	\
+	((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
+
+/* Accessing IOMMU and Streaming Buffer registers.
+ * REG parameter is a physical address.  All registers
+ * are 64-bits in size.
+ */
+#define pci_iommu_read(__reg) \
+({	u64 __ret; \
+	__asm__ __volatile__("ldxa [%1] %2, %0" \
+			     : "=r" (__ret) \
+			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
+			     : "memory"); \
+	__ret; \
+})
+#define pci_iommu_write(__reg, __val) \
+	__asm__ __volatile__("stxa %0, [%1] %2" \
+			     : /* no outputs */ \
+			     : "r" (__val), "r" (__reg), \
+			       "i" (ASI_PHYS_BYPASS_EC_E))
+
+/* Must be invoked under the IOMMU lock. */
+static void __iommu_flushall(struct pci_iommu *iommu)
+{
+	unsigned long tag;
+	int entry;
+
+	tag = iommu->iommu_flush + (0xa580UL - 0x0210UL);
+	for (entry = 0; entry < 16; entry++) {
+		pci_iommu_write(tag, 0);
+		tag += 8;
+	}
+
+	/* Ensure completion of previous PIO writes. */
+	(void) pci_iommu_read(iommu->write_complete_reg);
+
+	/* Now update everyone's flush point. */
+	for (entry = 0; entry < PBM_NCLUSTERS; entry++) {
+		iommu->alloc_info[entry].flush =
+			iommu->alloc_info[entry].next;
+	}
+}
+
+#define IOPTE_CONSISTENT(CTX) \
+	(IOPTE_VALID | IOPTE_CACHE | \
+	 (((CTX) << 47) & IOPTE_CONTEXT))
+
+#define IOPTE_STREAMING(CTX) \
+	(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
+
+/* Existing mappings are never marked invalid, instead they
+ * are pointed to a dummy page.
+ */
+#define IOPTE_IS_DUMMY(iommu, iopte)	\
+	((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
+
+static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
+{
+	unsigned long val = iopte_val(*iopte);
+
+	val &= ~IOPTE_PAGE;
+	val |= iommu->dummy_page_pa;
+
+	iopte_val(*iopte) = val;
+}
+
+void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize)
+{
+	int i;
+
+	tsbsize /= sizeof(iopte_t);
+
+	for (i = 0; i < tsbsize; i++)
+		iopte_make_dummy(iommu, &iommu->page_table[i]);
+}
+
+static iopte_t *alloc_streaming_cluster(struct pci_iommu *iommu, unsigned long npages)
+{
+	iopte_t *iopte, *limit, *first;
+	unsigned long cnum, ent, flush_point;
+
+	cnum = 0;
+	while ((1UL << cnum) < npages)
+		cnum++;
+	iopte  = (iommu->page_table +
+		  (cnum << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
+
+	if (cnum == 0)
+		limit = (iommu->page_table +
+			 iommu->lowest_consistent_map);
+	else
+		limit = (iopte +
+			 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
+
+	iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
+	flush_point = iommu->alloc_info[cnum].flush;
+	
+	first = iopte;
+	for (;;) {
+		if (IOPTE_IS_DUMMY(iommu, iopte)) {
+			if ((iopte + (1 << cnum)) >= limit)
+				ent = 0;
+			else
+				ent = ent + 1;
+			iommu->alloc_info[cnum].next = ent;
+			if (ent == flush_point)
+				__iommu_flushall(iommu);
+			break;
+		}
+		iopte += (1 << cnum);
+		ent++;
+		if (iopte >= limit) {
+			iopte = (iommu->page_table +
+				 (cnum <<
+				  (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
+			ent = 0;
+		}
+		if (ent == flush_point)
+			__iommu_flushall(iommu);
+		if (iopte == first)
+			goto bad;
+	}
+
+	/* I've got your streaming cluster right here buddy boy... */
+	return iopte;
+
+bad:
+	printk(KERN_EMERG "pci_iommu: alloc_streaming_cluster of npages(%ld) failed!\n",
+	       npages);
+	return NULL;
+}
+
+static void free_streaming_cluster(struct pci_iommu *iommu, dma_addr_t base,
+				   unsigned long npages, unsigned long ctx)
+{
+	unsigned long cnum, ent;
+
+	cnum = 0;
+	while ((1UL << cnum) < npages)
+		cnum++;
+
+	ent = (base << (32 - IO_PAGE_SHIFT + PBM_LOGCLUSTERS - iommu->page_table_sz_bits))
+		>> (32 + PBM_LOGCLUSTERS + cnum - iommu->page_table_sz_bits);
+
+	/* If the global flush might not have caught this entry,
+	 * adjust the flush point such that we will flush before
+	 * ever trying to reuse it.
+	 */
+#define between(X,Y,Z)	(((Z) - (Y)) >= ((X) - (Y)))
+	if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
+		iommu->alloc_info[cnum].flush = ent;
+#undef between
+}
+
+/* We allocate consistent mappings from the end of cluster zero. */
+static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long npages)
+{
+	iopte_t *iopte;
+
+	iopte = iommu->page_table + (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS));
+	while (iopte > iommu->page_table) {
+		iopte--;
+		if (IOPTE_IS_DUMMY(iommu, iopte)) {
+			unsigned long tmp = npages;
+
+			while (--tmp) {
+				iopte--;
+				if (!IOPTE_IS_DUMMY(iommu, iopte))
+					break;
+			}
+			if (tmp == 0) {
+				u32 entry = (iopte - iommu->page_table);
+
+				if (entry < iommu->lowest_consistent_map)
+					iommu->lowest_consistent_map = entry;
+				return iopte;
+			}
+		}
+	}
+	return NULL;
+}
+
+/* Allocate and map kernel buffer of size SIZE using consistent mode
+ * DMA for PCI device PDEV.  Return non-NULL cpu-side address if
+ * successful and set *DMA_ADDRP to the PCI side dma address.
+ */
+void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
+{
+	struct pcidev_cookie *pcp;
+	struct pci_iommu *iommu;
+	iopte_t *iopte;
+	unsigned long flags, order, first_page, ctx;
+	void *ret;
+	int npages;
+
+	size = IO_PAGE_ALIGN(size);
+	order = get_order(size);
+	if (order >= 10)
+		return NULL;
+
+	first_page = __get_free_pages(GFP_ATOMIC, order);
+	if (first_page == 0UL)
+		return NULL;
+	memset((char *)first_page, 0, PAGE_SIZE << order);
+
+	pcp = pdev->sysdata;
+	iommu = pcp->pbm->iommu;
+
+	spin_lock_irqsave(&iommu->lock, flags);
+	iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
+	if (iopte == NULL) {
+		spin_unlock_irqrestore(&iommu->lock, flags);
+		free_pages(first_page, order);
+		return NULL;
+	}
+
+	*dma_addrp = (iommu->page_table_map_base +
+		      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
+	ret = (void *) first_page;
+	npages = size >> IO_PAGE_SHIFT;
+	ctx = 0;
+	if (iommu->iommu_ctxflush)
+		ctx = iommu->iommu_cur_ctx++;
+	first_page = __pa(first_page);
+	while (npages--) {
+		iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) |
+				     IOPTE_WRITE |
+				     (first_page & IOPTE_PAGE));
+		iopte++;
+		first_page += IO_PAGE_SIZE;
+	}
+
+	{
+		int i;
+		u32 daddr = *dma_addrp;
+
+		npages = size >> IO_PAGE_SHIFT;
+		for (i = 0; i < npages; i++) {
+			pci_iommu_write(iommu->iommu_flush, daddr);
+			daddr += IO_PAGE_SIZE;
+		}
+	}
+
+	spin_unlock_irqrestore(&iommu->lock, flags);
+
+	return ret;
+}
+
+/* Free and unmap a consistent DMA translation. */
+void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
+{
+	struct pcidev_cookie *pcp;
+	struct pci_iommu *iommu;
+	iopte_t *iopte;
+	unsigned long flags, order, npages, i, ctx;
+
+	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
+	pcp = pdev->sysdata;
+	iommu = pcp->pbm->iommu;
+	iopte = iommu->page_table +
+		((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+
+	spin_lock_irqsave(&iommu->lock, flags);
+
+	if ((iopte - iommu->page_table) ==
+	    iommu->lowest_consistent_map) {
+		iopte_t *walk = iopte + npages;
+		iopte_t *limit;
+
+		limit = (iommu->page_table +
+			 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
+		while (walk < limit) {
+			if (!IOPTE_IS_DUMMY(iommu, walk))
+				break;
+			walk++;
+		}
+		iommu->lowest_consistent_map =
+			(walk - iommu->page_table);
+	}
+
+	/* Data for consistent mappings cannot enter the streaming
+	 * buffers, so we only need to update the TSB.  We flush
+	 * the IOMMU here as well to prevent conflicts with the
+	 * streaming mapping deferred tlb flush scheme.
+	 */
+
+	ctx = 0;
+	if (iommu->iommu_ctxflush)
+		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
+
+	for (i = 0; i < npages; i++, iopte++)
+		iopte_make_dummy(iommu, iopte);
+
+	if (iommu->iommu_ctxflush) {
+		pci_iommu_write(iommu->iommu_ctxflush, ctx);
+	} else {
+		for (i = 0; i < npages; i++) {
+			u32 daddr = dvma + (i << IO_PAGE_SHIFT);
+
+			pci_iommu_write(iommu->iommu_flush, daddr);
+		}
+	}
+
+	spin_unlock_irqrestore(&iommu->lock, flags);
+
+	order = get_order(size);
+	if (order < 10)
+		free_pages((unsigned long)cpu, order);
+}
+
+/* Map a single buffer at PTR of SZ bytes for PCI DMA
+ * in streaming mode.
+ */
+dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
+{
+	struct pcidev_cookie *pcp;
+	struct pci_iommu *iommu;
+	struct pci_strbuf *strbuf;
+	iopte_t *base;
+	unsigned long flags, npages, oaddr;
+	unsigned long i, base_paddr, ctx;
+	u32 bus_addr, ret;
+	unsigned long iopte_protection;
+
+	pcp = pdev->sysdata;
+	iommu = pcp->pbm->iommu;
+	strbuf = &pcp->pbm->stc;
+
+	if (direction == PCI_DMA_NONE)
+		BUG();
+
+	oaddr = (unsigned long)ptr;
+	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
+	npages >>= IO_PAGE_SHIFT;
+
+	spin_lock_irqsave(&iommu->lock, flags);
+
+	base = alloc_streaming_cluster(iommu, npages);
+	if (base == NULL)
+		goto bad;
+	bus_addr = (iommu->page_table_map_base +
+		    ((base - iommu->page_table) << IO_PAGE_SHIFT));
+	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
+	base_paddr = __pa(oaddr & IO_PAGE_MASK);
+	ctx = 0;
+	if (iommu->iommu_ctxflush)
+		ctx = iommu->iommu_cur_ctx++;
+	if (strbuf->strbuf_enabled)
+		iopte_protection = IOPTE_STREAMING(ctx);
+	else
+		iopte_protection = IOPTE_CONSISTENT(ctx);
+	if (direction != PCI_DMA_TODEVICE)
+		iopte_protection |= IOPTE_WRITE;
+
+	for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
+		iopte_val(*base) = iopte_protection | base_paddr;
+
+	spin_unlock_irqrestore(&iommu->lock, flags);
+
+	return ret;
+
+bad:
+	spin_unlock_irqrestore(&iommu->lock, flags);
+	return PCI_DMA_ERROR_CODE;
+}
+
+/* Unmap a single streaming mode DMA translation. */
+void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
+{
+	struct pcidev_cookie *pcp;
+	struct pci_iommu *iommu;
+	struct pci_strbuf *strbuf;
+	iopte_t *base;
+	unsigned long flags, npages, i, ctx;
+
+	if (direction == PCI_DMA_NONE)
+		BUG();
+
+	pcp = pdev->sysdata;
+	iommu = pcp->pbm->iommu;
+	strbuf = &pcp->pbm->stc;
+
+	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
+	npages >>= IO_PAGE_SHIFT;
+	base = iommu->page_table +
+		((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+#ifdef DEBUG_PCI_IOMMU
+	if (IOPTE_IS_DUMMY(iommu, base))
+		printk("pci_unmap_single called on non-mapped region %08x,%08x from %016lx\n",
+		       bus_addr, sz, __builtin_return_address(0));
+#endif
+	bus_addr &= IO_PAGE_MASK;
+
+	spin_lock_irqsave(&iommu->lock, flags);
+
+	/* Record the context, if any. */
+	ctx = 0;
+	if (iommu->iommu_ctxflush)
+		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
+
+	/* Step 1: Kick data out of streaming buffers if necessary. */
+	if (strbuf->strbuf_enabled) {
+		u32 vaddr = bus_addr;
+
+		PCI_STC_FLUSHFLAG_INIT(strbuf);
+		if (strbuf->strbuf_ctxflush &&
+		    iommu->iommu_ctxflush) {
+			unsigned long matchreg, flushreg;
+
+			flushreg = strbuf->strbuf_ctxflush;
+			matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
+			do {
+				pci_iommu_write(flushreg, ctx);
+			} while(((long)pci_iommu_read(matchreg)) < 0L);
+		} else {
+			for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
+				pci_iommu_write(strbuf->strbuf_pflush, vaddr);
+		}
+
+		pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
+		(void) pci_iommu_read(iommu->write_complete_reg);
+		while (!PCI_STC_FLUSHFLAG_SET(strbuf))
+			membar("#LoadLoad");
+	}
+
+	/* Step 2: Clear out first TSB entry. */
+	iopte_make_dummy(iommu, base);
+
+	free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
+			       npages, ctx);
+
+	spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+#define SG_ENT_PHYS_ADDRESS(SG)	\
+	(__pa(page_address((SG)->page)) + (SG)->offset)
+
+static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
+			   int nused, int nelems, unsigned long iopte_protection)
+{
+	struct scatterlist *dma_sg = sg;
+	struct scatterlist *sg_end = sg + nelems;
+	int i;
+
+	for (i = 0; i < nused; i++) {
+		unsigned long pteval = ~0UL;
+		u32 dma_npages;
+
+		dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
+			      dma_sg->dma_length +
+			      ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
+		do {
+			unsigned long offset;
+			signed int len;
+
+			/* If we are here, we know we have at least one
+			 * more page to map.  So walk forward until we
+			 * hit a page crossing, and begin creating new
+			 * mappings from that spot.
+			 */
+			for (;;) {
+				unsigned long tmp;
+
+				tmp = SG_ENT_PHYS_ADDRESS(sg);
+				len = sg->length;
+				if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
+					pteval = tmp & IO_PAGE_MASK;
+					offset = tmp & (IO_PAGE_SIZE - 1UL);
+					break;
+				}
+				if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
+					pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
+					offset = 0UL;
+					len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
+					break;
+				}
+				sg++;
+			}
+
+			pteval = iopte_protection | (pteval & IOPTE_PAGE);
+			while (len > 0) {
+				*iopte++ = __iopte(pteval);
+				pteval += IO_PAGE_SIZE;
+				len -= (IO_PAGE_SIZE - offset);
+				offset = 0;
+				dma_npages--;
+			}
+
+			pteval = (pteval & IOPTE_PAGE) + len;
+			sg++;
+
+			/* Skip over any tail mappings we've fully mapped,
+			 * adjusting pteval along the way.  Stop when we
+			 * detect a page crossing event.
+			 */
+			while (sg < sg_end &&
+			       (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
+			       (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
+			       ((pteval ^
+				 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
+				pteval += sg->length;
+				sg++;
+			}
+			if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
+				pteval = ~0UL;
+		} while (dma_npages != 0);
+		dma_sg++;
+	}
+}
+
+/* Map a set of buffers described by SGLIST with NELEMS array
+ * elements in streaming mode for PCI DMA.
+ * When making changes here, inspect the assembly output. I was having
+ * hard time to kepp this routine out of using stack slots for holding variables.
+ */
+int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
+{
+	struct pcidev_cookie *pcp;
+	struct pci_iommu *iommu;
+	struct pci_strbuf *strbuf;
+	unsigned long flags, ctx, npages, iopte_protection;
+	iopte_t *base;
+	u32 dma_base;
+	struct scatterlist *sgtmp;
+	int used;
+
+	/* Fast path single entry scatterlists. */
+	if (nelems == 1) {
+		sglist->dma_address =
+			pci_map_single(pdev,
+				       (page_address(sglist->page) + sglist->offset),
+				       sglist->length, direction);
+		sglist->dma_length = sglist->length;
+		return 1;
+	}
+
+	pcp = pdev->sysdata;
+	iommu = pcp->pbm->iommu;
+	strbuf = &pcp->pbm->stc;
+	
+	if (direction == PCI_DMA_NONE)
+		BUG();
+
+	/* Step 1: Prepare scatter list. */
+
+	npages = prepare_sg(sglist, nelems);
+
+	/* Step 2: Allocate a cluster. */
+
+	spin_lock_irqsave(&iommu->lock, flags);
+
+	base = alloc_streaming_cluster(iommu, npages);
+	if (base == NULL)
+		goto bad;
+	dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT);
+
+	/* Step 3: Normalize DMA addresses. */
+	used = nelems;
+
+	sgtmp = sglist;
+	while (used && sgtmp->dma_length) {
+		sgtmp->dma_address += dma_base;
+		sgtmp++;
+		used--;
+	}
+	used = nelems - used;
+
+	/* Step 4: Choose a context if necessary. */
+	ctx = 0;
+	if (iommu->iommu_ctxflush)
+		ctx = iommu->iommu_cur_ctx++;
+
+	/* Step 5: Create the mappings. */
+	if (strbuf->strbuf_enabled)
+		iopte_protection = IOPTE_STREAMING(ctx);
+	else
+		iopte_protection = IOPTE_CONSISTENT(ctx);
+	if (direction != PCI_DMA_TODEVICE)
+		iopte_protection |= IOPTE_WRITE;
+	fill_sg (base, sglist, used, nelems, iopte_protection);
+#ifdef VERIFY_SG
+	verify_sglist(sglist, nelems, base, npages);
+#endif
+
+	spin_unlock_irqrestore(&iommu->lock, flags);
+
+	return used;
+
+bad:
+	spin_unlock_irqrestore(&iommu->lock, flags);
+	return PCI_DMA_ERROR_CODE;
+}
+
+/* Unmap a set of streaming mode DMA translations. */
+void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
+{
+	struct pcidev_cookie *pcp;
+	struct pci_iommu *iommu;
+	struct pci_strbuf *strbuf;
+	iopte_t *base;
+	unsigned long flags, ctx, i, npages;
+	u32 bus_addr;
+
+	if (direction == PCI_DMA_NONE)
+		BUG();
+
+	pcp = pdev->sysdata;
+	iommu = pcp->pbm->iommu;
+	strbuf = &pcp->pbm->stc;
+	
+	bus_addr = sglist->dma_address & IO_PAGE_MASK;
+
+	for (i = 1; i < nelems; i++)
+		if (sglist[i].dma_length == 0)
+			break;
+	i--;
+	npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
+
+	base = iommu->page_table +
+		((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+
+#ifdef DEBUG_PCI_IOMMU
+	if (IOPTE_IS_DUMMY(iommu, base))
+		printk("pci_unmap_sg called on non-mapped region %016lx,%d from %016lx\n", sglist->dma_address, nelems, __builtin_return_address(0));
+#endif
+
+	spin_lock_irqsave(&iommu->lock, flags);
+
+	/* Record the context, if any. */
+	ctx = 0;
+	if (iommu->iommu_ctxflush)
+		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
+
+	/* Step 1: Kick data out of streaming buffers if necessary. */
+	if (strbuf->strbuf_enabled) {
+		u32 vaddr = (u32) bus_addr;
+
+		PCI_STC_FLUSHFLAG_INIT(strbuf);
+		if (strbuf->strbuf_ctxflush &&
+		    iommu->iommu_ctxflush) {
+			unsigned long matchreg, flushreg;
+
+			flushreg = strbuf->strbuf_ctxflush;
+			matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
+			do {
+				pci_iommu_write(flushreg, ctx);
+			} while(((long)pci_iommu_read(matchreg)) < 0L);
+		} else {
+			for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
+				pci_iommu_write(strbuf->strbuf_pflush, vaddr);
+		}
+
+		pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
+		(void) pci_iommu_read(iommu->write_complete_reg);
+		while (!PCI_STC_FLUSHFLAG_SET(strbuf))
+			membar("#LoadLoad");
+	}
+
+	/* Step 2: Clear out first TSB entry. */
+	iopte_make_dummy(iommu, base);
+
+	free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
+			       npages, ctx);
+
+	spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+/* Make physical memory consistent for a single
+ * streaming mode DMA translation after a transfer.
+ */
+void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
+{
+	struct pcidev_cookie *pcp;
+	struct pci_iommu *iommu;
+	struct pci_strbuf *strbuf;
+	unsigned long flags, ctx, npages;
+
+	pcp = pdev->sysdata;
+	iommu = pcp->pbm->iommu;
+	strbuf = &pcp->pbm->stc;
+
+	if (!strbuf->strbuf_enabled)
+		return;
+
+	spin_lock_irqsave(&iommu->lock, flags);
+
+	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
+	npages >>= IO_PAGE_SHIFT;
+	bus_addr &= IO_PAGE_MASK;
+
+	/* Step 1: Record the context, if any. */
+	ctx = 0;
+	if (iommu->iommu_ctxflush &&
+	    strbuf->strbuf_ctxflush) {
+		iopte_t *iopte;
+
+		iopte = iommu->page_table +
+			((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
+		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
+	}
+
+	/* Step 2: Kick data out of streaming buffers. */
+	PCI_STC_FLUSHFLAG_INIT(strbuf);
+	if (iommu->iommu_ctxflush &&
+	    strbuf->strbuf_ctxflush) {
+		unsigned long matchreg, flushreg;
+
+		flushreg = strbuf->strbuf_ctxflush;
+		matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
+		do {
+			pci_iommu_write(flushreg, ctx);
+		} while(((long)pci_iommu_read(matchreg)) < 0L);
+	} else {
+		unsigned long i;
+
+		for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
+			pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
+	}
+
+	/* Step 3: Perform flush synchronization sequence. */
+	pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
+	(void) pci_iommu_read(iommu->write_complete_reg);
+	while (!PCI_STC_FLUSHFLAG_SET(strbuf))
+		membar("#LoadLoad");
+
+	spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+/* Make physical memory consistent for a set of streaming
+ * mode DMA translations after a transfer.
+ */
+void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
+{
+	struct pcidev_cookie *pcp;
+	struct pci_iommu *iommu;
+	struct pci_strbuf *strbuf;
+	unsigned long flags, ctx;
+
+	pcp = pdev->sysdata;
+	iommu = pcp->pbm->iommu;
+	strbuf = &pcp->pbm->stc;
+
+	if (!strbuf->strbuf_enabled)
+		return;
+
+	spin_lock_irqsave(&iommu->lock, flags);
+
+	/* Step 1: Record the context, if any. */
+	ctx = 0;
+	if (iommu->iommu_ctxflush &&
+	    strbuf->strbuf_ctxflush) {
+		iopte_t *iopte;
+
+		iopte = iommu->page_table +
+			((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
+	}
+
+	/* Step 2: Kick data out of streaming buffers. */
+	PCI_STC_FLUSHFLAG_INIT(strbuf);
+	if (iommu->iommu_ctxflush &&
+	    strbuf->strbuf_ctxflush) {
+		unsigned long matchreg, flushreg;
+
+		flushreg = strbuf->strbuf_ctxflush;
+		matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
+		do {
+			pci_iommu_write(flushreg, ctx);
+		} while (((long)pci_iommu_read(matchreg)) < 0L);
+	} else {
+		unsigned long i, npages;
+		u32 bus_addr;
+
+		bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
+
+		for(i = 1; i < nelems; i++)
+			if (!sglist[i].dma_length)
+				break;
+		i--;
+		npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
+		for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
+			pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
+	}
+
+	/* Step 3: Perform flush synchronization sequence. */
+	pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
+	(void) pci_iommu_read(iommu->write_complete_reg);
+	while (!PCI_STC_FLUSHFLAG_SET(strbuf))
+		membar("#LoadLoad");
+
+	spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
+{
+	struct pci_dev *ali_isa_bridge;
+	u8 val;
+
+	/* ALI sound chips generate 31-bits of DMA, a special register
+	 * determines what bit 31 is emitted as.
+	 */
+	ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
+					 PCI_DEVICE_ID_AL_M1533,
+					 NULL);
+
+	pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
+	if (set_bit)
+		val |= 0x01;
+	else
+		val &= ~0x01;
+	pci_write_config_byte(ali_isa_bridge, 0x7e, val);
+	pci_dev_put(ali_isa_bridge);
+}
+
+int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
+{
+	struct pcidev_cookie *pcp = pdev->sysdata;
+	u64 dma_addr_mask;
+
+	if (pdev == NULL) {
+		dma_addr_mask = 0xffffffff;
+	} else {
+		struct pci_iommu *iommu = pcp->pbm->iommu;
+
+		dma_addr_mask = iommu->dma_addr_mask;
+
+		if (pdev->vendor == PCI_VENDOR_ID_AL &&
+		    pdev->device == PCI_DEVICE_ID_AL_M5451 &&
+		    device_mask == 0x7fffffff) {
+			ali_sound_dma_hack(pdev,
+					   (dma_addr_mask & 0x80000000) != 0);
+			return 1;
+		}
+	}
+
+	if (device_mask >= (1UL << 32UL))
+		return 0;
+
+	return (device_mask & dma_addr_mask) == dma_addr_mask;
+}
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
new file mode 100644
index 0000000..3567fa8
--- /dev/null
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -0,0 +1,1560 @@
+/* $Id: pci_psycho.c,v 1.33 2002/02/01 00:58:33 davem Exp $
+ * pci_psycho.c: PSYCHO/U2P specific PCI controller support.
+ *
+ * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
+ * Copyright (C) 1998, 1999 Eddie C. Dost   (ecd@skynet.be)
+ * Copyright (C) 1999 Jakub Jelinek   (jakub@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <asm/pbm.h>
+#include <asm/iommu.h>
+#include <asm/irq.h>
+#include <asm/starfire.h>
+
+#include "pci_impl.h"
+#include "iommu_common.h"
+
+/* All PSYCHO registers are 64-bits.  The following accessor
+ * routines are how they are accessed.  The REG parameter
+ * is a physical address.
+ */
+#define psycho_read(__reg) \
+({	u64 __ret; \
+	__asm__ __volatile__("ldxa [%1] %2, %0" \
+			     : "=r" (__ret) \
+			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
+			     : "memory"); \
+	__ret; \
+})
+#define psycho_write(__reg, __val) \
+	__asm__ __volatile__("stxa %0, [%1] %2" \
+			     : /* no outputs */ \
+			     : "r" (__val), "r" (__reg), \
+			       "i" (ASI_PHYS_BYPASS_EC_E) \
+			     : "memory")
+
+/* Misc. PSYCHO PCI controller register offsets and definitions. */
+#define PSYCHO_CONTROL		0x0010UL
+#define  PSYCHO_CONTROL_IMPL	 0xf000000000000000UL /* Implementation of this PSYCHO*/
+#define  PSYCHO_CONTROL_VER	 0x0f00000000000000UL /* Version of this PSYCHO       */
+#define  PSYCHO_CONTROL_MID	 0x00f8000000000000UL /* UPA Module ID of PSYCHO      */
+#define  PSYCHO_CONTROL_IGN	 0x0007c00000000000UL /* Interrupt Group Number       */
+#define  PSYCHO_CONTROL_RESV     0x00003ffffffffff0UL /* Reserved                     */
+#define  PSYCHO_CONTROL_APCKEN	 0x0000000000000008UL /* Address Parity Check Enable  */
+#define  PSYCHO_CONTROL_APERR	 0x0000000000000004UL /* Incoming System Addr Parerr  */
+#define  PSYCHO_CONTROL_IAP	 0x0000000000000002UL /* Invert UPA Parity            */
+#define  PSYCHO_CONTROL_MODE	 0x0000000000000001UL /* PSYCHO clock mode            */
+#define PSYCHO_PCIA_CTRL	0x2000UL
+#define PSYCHO_PCIB_CTRL	0x4000UL
+#define  PSYCHO_PCICTRL_RESV1	 0xfffffff000000000UL /* Reserved                     */
+#define  PSYCHO_PCICTRL_SBH_ERR	 0x0000000800000000UL /* Streaming byte hole error    */
+#define  PSYCHO_PCICTRL_SERR	 0x0000000400000000UL /* SERR signal asserted         */
+#define  PSYCHO_PCICTRL_SPEED	 0x0000000200000000UL /* PCI speed (1 is U2P clock)   */
+#define  PSYCHO_PCICTRL_RESV2	 0x00000001ffc00000UL /* Reserved                     */
+#define  PSYCHO_PCICTRL_ARB_PARK 0x0000000000200000UL /* PCI arbitration parking      */
+#define  PSYCHO_PCICTRL_RESV3	 0x00000000001ff800UL /* Reserved                     */
+#define  PSYCHO_PCICTRL_SBH_INT	 0x0000000000000400UL /* Streaming byte hole int enab */
+#define  PSYCHO_PCICTRL_WEN	 0x0000000000000200UL /* Power Mgmt Wake Enable       */
+#define  PSYCHO_PCICTRL_EEN	 0x0000000000000100UL /* PCI Error Interrupt Enable   */
+#define  PSYCHO_PCICTRL_RESV4	 0x00000000000000c0UL /* Reserved                     */
+#define  PSYCHO_PCICTRL_AEN	 0x000000000000003fUL /* PCI DVMA Arbitration Enable  */
+
+/* U2P Programmer's Manual, page 13-55, configuration space
+ * address format:
+ * 
+ *  32             24 23 16 15    11 10       8 7   2  1 0
+ * ---------------------------------------------------------
+ * |0 0 0 0 0 0 0 0 1| bus | device | function | reg | 0 0 |
+ * ---------------------------------------------------------
+ */
+#define PSYCHO_CONFIG_BASE(PBM)	\
+	((PBM)->config_space | (1UL << 24))
+#define PSYCHO_CONFIG_ENCODE(BUS, DEVFN, REG)	\
+	(((unsigned long)(BUS)   << 16) |	\
+	 ((unsigned long)(DEVFN) << 8)  |	\
+	 ((unsigned long)(REG)))
+
+static void *psycho_pci_config_mkaddr(struct pci_pbm_info *pbm,
+				      unsigned char bus,
+				      unsigned int devfn,
+				      int where)
+{
+	if (!pbm)
+		return NULL;
+	return (void *)
+		(PSYCHO_CONFIG_BASE(pbm) |
+		 PSYCHO_CONFIG_ENCODE(bus, devfn, where));
+}
+
+static int psycho_out_of_range(struct pci_pbm_info *pbm,
+			       unsigned char bus,
+			       unsigned char devfn)
+{
+	return ((pbm->parent == 0) ||
+		((pbm == &pbm->parent->pbm_B) &&
+		 (bus == pbm->pci_first_busno) &&
+		 PCI_SLOT(devfn) > 8) ||
+		((pbm == &pbm->parent->pbm_A) &&
+		 (bus == pbm->pci_first_busno) &&
+		 PCI_SLOT(devfn) > 8));
+}
+
+/* PSYCHO PCI configuration space accessors. */
+
+static int psycho_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
+			       int where, int size, u32 *value)
+{
+	struct pci_pbm_info *pbm = bus_dev->sysdata;
+	unsigned char bus = bus_dev->number;
+	u32 *addr;
+	u16 tmp16;
+	u8 tmp8;
+
+	switch (size) {
+	case 1:
+		*value = 0xff;
+		break;
+	case 2:
+		*value = 0xffff;
+		break;
+	case 4:
+		*value = 0xffffffff;
+		break;
+	}
+
+	addr = psycho_pci_config_mkaddr(pbm, bus, devfn, where);
+	if (!addr)
+		return PCIBIOS_SUCCESSFUL;
+
+	if (psycho_out_of_range(pbm, bus, devfn))
+		return PCIBIOS_SUCCESSFUL;
+	switch (size) {
+	case 1:
+		pci_config_read8((u8 *)addr, &tmp8);
+		*value = (u32) tmp8;
+		break;
+
+	case 2:
+		if (where & 0x01) {
+			printk("pci_read_config_word: misaligned reg [%x]\n",
+			       where);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		pci_config_read16((u16 *)addr, &tmp16);
+		*value = (u32) tmp16;
+		break;
+
+	case 4:
+		if (where & 0x03) {
+			printk("pci_read_config_dword: misaligned reg [%x]\n",
+			       where);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		pci_config_read32(addr, value);
+		break;
+	}
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int psycho_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
+				int where, int size, u32 value)
+{
+	struct pci_pbm_info *pbm = bus_dev->sysdata;
+	unsigned char bus = bus_dev->number;
+	u32 *addr;
+
+	addr = psycho_pci_config_mkaddr(pbm, bus, devfn, where);
+	if (!addr)
+		return PCIBIOS_SUCCESSFUL;
+
+	if (psycho_out_of_range(pbm, bus, devfn))
+		return PCIBIOS_SUCCESSFUL;
+
+	switch (size) {
+	case 1:
+		pci_config_write8((u8 *)addr, value);
+		break;
+
+	case 2:
+		if (where & 0x01) {
+			printk("pci_write_config_word: misaligned reg [%x]\n",
+			       where);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		pci_config_write16((u16 *)addr, value);
+		break;
+
+	case 4:
+		if (where & 0x03) {
+			printk("pci_write_config_dword: misaligned reg [%x]\n",
+			       where);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		pci_config_write32(addr, value);
+	}
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops psycho_ops = {
+	.read =		psycho_read_pci_cfg,
+	.write =	psycho_write_pci_cfg,
+};
+
+/* PSYCHO interrupt mapping support. */
+#define PSYCHO_IMAP_A_SLOT0	0x0c00UL
+#define PSYCHO_IMAP_B_SLOT0	0x0c20UL
+static unsigned long psycho_pcislot_imap_offset(unsigned long ino)
+{
+	unsigned int bus =  (ino & 0x10) >> 4;
+	unsigned int slot = (ino & 0x0c) >> 2;
+
+	if (bus == 0)
+		return PSYCHO_IMAP_A_SLOT0 + (slot * 8);
+	else
+		return PSYCHO_IMAP_B_SLOT0 + (slot * 8);
+}
+
+#define PSYCHO_IMAP_SCSI	0x1000UL
+#define PSYCHO_IMAP_ETH		0x1008UL
+#define PSYCHO_IMAP_BPP		0x1010UL
+#define PSYCHO_IMAP_AU_REC	0x1018UL
+#define PSYCHO_IMAP_AU_PLAY	0x1020UL
+#define PSYCHO_IMAP_PFAIL	0x1028UL
+#define PSYCHO_IMAP_KMS		0x1030UL
+#define PSYCHO_IMAP_FLPY	0x1038UL
+#define PSYCHO_IMAP_SHW		0x1040UL
+#define PSYCHO_IMAP_KBD		0x1048UL
+#define PSYCHO_IMAP_MS		0x1050UL
+#define PSYCHO_IMAP_SER		0x1058UL
+#define PSYCHO_IMAP_TIM0	0x1060UL
+#define PSYCHO_IMAP_TIM1	0x1068UL
+#define PSYCHO_IMAP_UE		0x1070UL
+#define PSYCHO_IMAP_CE		0x1078UL
+#define PSYCHO_IMAP_A_ERR	0x1080UL
+#define PSYCHO_IMAP_B_ERR	0x1088UL
+#define PSYCHO_IMAP_PMGMT	0x1090UL
+#define PSYCHO_IMAP_GFX		0x1098UL
+#define PSYCHO_IMAP_EUPA	0x10a0UL
+
+static unsigned long __onboard_imap_off[] = {
+/*0x20*/	PSYCHO_IMAP_SCSI,
+/*0x21*/	PSYCHO_IMAP_ETH,
+/*0x22*/	PSYCHO_IMAP_BPP,
+/*0x23*/	PSYCHO_IMAP_AU_REC,
+/*0x24*/	PSYCHO_IMAP_AU_PLAY,
+/*0x25*/	PSYCHO_IMAP_PFAIL,
+/*0x26*/	PSYCHO_IMAP_KMS,
+/*0x27*/	PSYCHO_IMAP_FLPY,
+/*0x28*/	PSYCHO_IMAP_SHW,
+/*0x29*/	PSYCHO_IMAP_KBD,
+/*0x2a*/	PSYCHO_IMAP_MS,
+/*0x2b*/	PSYCHO_IMAP_SER,
+/*0x2c*/	PSYCHO_IMAP_TIM0,
+/*0x2d*/	PSYCHO_IMAP_TIM1,
+/*0x2e*/	PSYCHO_IMAP_UE,
+/*0x2f*/	PSYCHO_IMAP_CE,
+/*0x30*/	PSYCHO_IMAP_A_ERR,
+/*0x31*/	PSYCHO_IMAP_B_ERR,
+/*0x32*/	PSYCHO_IMAP_PMGMT
+};
+#define PSYCHO_ONBOARD_IRQ_BASE		0x20
+#define PSYCHO_ONBOARD_IRQ_LAST		0x32
+#define psycho_onboard_imap_offset(__ino) \
+	__onboard_imap_off[(__ino) - PSYCHO_ONBOARD_IRQ_BASE]
+
+#define PSYCHO_ICLR_A_SLOT0	0x1400UL
+#define PSYCHO_ICLR_SCSI	0x1800UL
+
+#define psycho_iclr_offset(ino)					      \
+	((ino & 0x20) ? (PSYCHO_ICLR_SCSI + (((ino) & 0x1f) << 3)) :  \
+			(PSYCHO_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
+
+/* PCI PSYCHO INO number to Sparc PIL level. */
+static unsigned char psycho_pil_table[] = {
+/*0x00*/0, 0, 0, 0,	/* PCI A slot 0  Int A, B, C, D */
+/*0x04*/0, 0, 0, 0,	/* PCI A slot 1  Int A, B, C, D */
+/*0x08*/0, 0, 0, 0,	/* PCI A slot 2  Int A, B, C, D */
+/*0x0c*/0, 0, 0, 0,	/* PCI A slot 3  Int A, B, C, D */
+/*0x10*/0, 0, 0, 0,	/* PCI B slot 0  Int A, B, C, D */
+/*0x14*/0, 0, 0, 0,	/* PCI B slot 1  Int A, B, C, D */
+/*0x18*/0, 0, 0, 0,	/* PCI B slot 2  Int A, B, C, D */
+/*0x1c*/0, 0, 0, 0,	/* PCI B slot 3  Int A, B, C, D */
+/*0x20*/4,		/* SCSI				*/
+/*0x21*/5,		/* Ethernet			*/
+/*0x22*/8,		/* Parallel Port		*/
+/*0x23*/13,		/* Audio Record			*/
+/*0x24*/14,		/* Audio Playback		*/
+/*0x25*/15,		/* PowerFail			*/
+/*0x26*/4,		/* second SCSI			*/
+/*0x27*/11,		/* Floppy			*/
+/*0x28*/4,		/* Spare Hardware		*/
+/*0x29*/9,		/* Keyboard			*/
+/*0x2a*/4,		/* Mouse			*/
+/*0x2b*/12,		/* Serial			*/
+/*0x2c*/10,		/* Timer 0			*/
+/*0x2d*/11,		/* Timer 1			*/
+/*0x2e*/15,		/* Uncorrectable ECC		*/
+/*0x2f*/15,		/* Correctable ECC		*/
+/*0x30*/15,		/* PCI Bus A Error		*/
+/*0x31*/15,		/* PCI Bus B Error		*/
+/*0x32*/15,		/* Power Management		*/
+};
+
+static int __init psycho_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
+{
+	int ret;
+
+	ret = psycho_pil_table[ino];
+	if (ret == 0 && pdev == NULL) {
+		ret = 4;
+	} else if (ret == 0) {
+		switch ((pdev->class >> 16) & 0xff) {
+		case PCI_BASE_CLASS_STORAGE:
+			ret = 4;
+			break;
+
+		case PCI_BASE_CLASS_NETWORK:
+			ret = 6;
+			break;
+
+		case PCI_BASE_CLASS_DISPLAY:
+			ret = 9;
+			break;
+
+		case PCI_BASE_CLASS_MULTIMEDIA:
+		case PCI_BASE_CLASS_MEMORY:
+		case PCI_BASE_CLASS_BRIDGE:
+		case PCI_BASE_CLASS_SERIAL:
+			ret = 10;
+			break;
+
+		default:
+			ret = 4;
+			break;
+		};
+	}
+
+	return ret;
+}
+
+static unsigned int __init psycho_irq_build(struct pci_pbm_info *pbm,
+					    struct pci_dev *pdev,
+					    unsigned int ino)
+{
+	struct ino_bucket *bucket;
+	unsigned long imap, iclr;
+	unsigned long imap_off, iclr_off;
+	int pil, inofixup = 0;
+
+	ino &= PCI_IRQ_INO;
+	if (ino < PSYCHO_ONBOARD_IRQ_BASE) {
+		/* PCI slot */
+		imap_off = psycho_pcislot_imap_offset(ino);
+	} else {
+		/* Onboard device */
+		if (ino > PSYCHO_ONBOARD_IRQ_LAST) {
+			prom_printf("psycho_irq_build: Wacky INO [%x]\n", ino);
+			prom_halt();
+		}
+		imap_off = psycho_onboard_imap_offset(ino);
+	}
+
+	/* Now build the IRQ bucket. */
+	pil = psycho_ino_to_pil(pdev, ino);
+
+	if (PIL_RESERVED(pil))
+		BUG();
+
+	imap = pbm->controller_regs + imap_off;
+	imap += 4;
+
+	iclr_off = psycho_iclr_offset(ino);
+	iclr = pbm->controller_regs + iclr_off;
+	iclr += 4;
+
+	if ((ino & 0x20) == 0)
+		inofixup = ino & 0x03;
+
+	bucket = __bucket(build_irq(pil, inofixup, iclr, imap));
+	bucket->flags |= IBF_PCI;
+
+	return __irq(bucket);
+}
+
+/* PSYCHO error handling support. */
+enum psycho_error_type {
+	UE_ERR, CE_ERR, PCI_ERR
+};
+
+/* Helper function of IOMMU error checking, which checks out
+ * the state of the streaming buffers.  The IOMMU lock is
+ * held when this is called.
+ *
+ * For the PCI error case we know which PBM (and thus which
+ * streaming buffer) caused the error, but for the uncorrectable
+ * error case we do not.  So we always check both streaming caches.
+ */
+#define PSYCHO_STRBUF_CONTROL_A 0x2800UL
+#define PSYCHO_STRBUF_CONTROL_B 0x4800UL
+#define  PSYCHO_STRBUF_CTRL_LPTR    0x00000000000000f0UL /* LRU Lock Pointer */
+#define  PSYCHO_STRBUF_CTRL_LENAB   0x0000000000000008UL /* LRU Lock Enable */
+#define  PSYCHO_STRBUF_CTRL_RRDIS   0x0000000000000004UL /* Rerun Disable */
+#define  PSYCHO_STRBUF_CTRL_DENAB   0x0000000000000002UL /* Diagnostic Mode Enable */
+#define  PSYCHO_STRBUF_CTRL_ENAB    0x0000000000000001UL /* Streaming Buffer Enable */
+#define PSYCHO_STRBUF_FLUSH_A   0x2808UL
+#define PSYCHO_STRBUF_FLUSH_B   0x4808UL
+#define PSYCHO_STRBUF_FSYNC_A   0x2810UL
+#define PSYCHO_STRBUF_FSYNC_B   0x4810UL
+#define PSYCHO_STC_DATA_A	0xb000UL
+#define PSYCHO_STC_DATA_B	0xc000UL
+#define PSYCHO_STC_ERR_A	0xb400UL
+#define PSYCHO_STC_ERR_B	0xc400UL
+#define  PSYCHO_STCERR_WRITE	 0x0000000000000002UL	/* Write Error */
+#define  PSYCHO_STCERR_READ	 0x0000000000000001UL	/* Read Error */
+#define PSYCHO_STC_TAG_A	0xb800UL
+#define PSYCHO_STC_TAG_B	0xc800UL
+#define  PSYCHO_STCTAG_PPN	 0x0fffffff00000000UL	/* Physical Page Number */
+#define  PSYCHO_STCTAG_VPN	 0x00000000ffffe000UL	/* Virtual Page Number */
+#define  PSYCHO_STCTAG_VALID	 0x0000000000000002UL	/* Valid */
+#define  PSYCHO_STCTAG_WRITE	 0x0000000000000001UL	/* Writable */
+#define PSYCHO_STC_LINE_A	0xb900UL
+#define PSYCHO_STC_LINE_B	0xc900UL
+#define  PSYCHO_STCLINE_LINDX	 0x0000000001e00000UL	/* LRU Index */
+#define  PSYCHO_STCLINE_SPTR	 0x00000000001f8000UL	/* Dirty Data Start Pointer */
+#define  PSYCHO_STCLINE_LADDR	 0x0000000000007f00UL	/* Line Address */
+#define  PSYCHO_STCLINE_EPTR	 0x00000000000000fcUL	/* Dirty Data End Pointer */
+#define  PSYCHO_STCLINE_VALID	 0x0000000000000002UL	/* Valid */
+#define  PSYCHO_STCLINE_FOFN	 0x0000000000000001UL	/* Fetch Outstanding / Flush Necessary */
+
+static DEFINE_SPINLOCK(stc_buf_lock);
+static unsigned long stc_error_buf[128];
+static unsigned long stc_tag_buf[16];
+static unsigned long stc_line_buf[16];
+
+static void __psycho_check_one_stc(struct pci_controller_info *p,
+				   struct pci_pbm_info *pbm,
+				   int is_pbm_a)
+{
+	struct pci_strbuf *strbuf = &pbm->stc;
+	unsigned long regbase = p->pbm_A.controller_regs;
+	unsigned long err_base, tag_base, line_base;
+	u64 control;
+	int i;
+
+	if (is_pbm_a) {
+		err_base = regbase + PSYCHO_STC_ERR_A;
+		tag_base = regbase + PSYCHO_STC_TAG_A;
+		line_base = regbase + PSYCHO_STC_LINE_A;
+	} else {
+		err_base = regbase + PSYCHO_STC_ERR_B;
+		tag_base = regbase + PSYCHO_STC_TAG_B;
+		line_base = regbase + PSYCHO_STC_LINE_B;
+	}
+
+	spin_lock(&stc_buf_lock);
+
+	/* This is __REALLY__ dangerous.  When we put the
+	 * streaming buffer into diagnostic mode to probe
+	 * it's tags and error status, we _must_ clear all
+	 * of the line tag valid bits before re-enabling
+	 * the streaming buffer.  If any dirty data lives
+	 * in the STC when we do this, we will end up
+	 * invalidating it before it has a chance to reach
+	 * main memory.
+	 */
+	control = psycho_read(strbuf->strbuf_control);
+	psycho_write(strbuf->strbuf_control,
+		     (control | PSYCHO_STRBUF_CTRL_DENAB));
+	for (i = 0; i < 128; i++) {
+		unsigned long val;
+
+		val = psycho_read(err_base + (i * 8UL));
+		psycho_write(err_base + (i * 8UL), 0UL);
+		stc_error_buf[i] = val;
+	}
+	for (i = 0; i < 16; i++) {
+		stc_tag_buf[i] = psycho_read(tag_base + (i * 8UL));
+		stc_line_buf[i] = psycho_read(line_base + (i * 8UL));
+		psycho_write(tag_base + (i * 8UL), 0UL);
+		psycho_write(line_base + (i * 8UL), 0UL);
+	}
+
+	/* OK, state is logged, exit diagnostic mode. */
+	psycho_write(strbuf->strbuf_control, control);
+
+	for (i = 0; i < 16; i++) {
+		int j, saw_error, first, last;
+
+		saw_error = 0;
+		first = i * 8;
+		last = first + 8;
+		for (j = first; j < last; j++) {
+			unsigned long errval = stc_error_buf[j];
+			if (errval != 0) {
+				saw_error++;
+				printk("PSYCHO%d(PBM%c): STC_ERR(%d)[wr(%d)rd(%d)]\n",
+				       p->index,
+				       (is_pbm_a ? 'A' : 'B'),
+				       j,
+				       (errval & PSYCHO_STCERR_WRITE) ? 1 : 0,
+				       (errval & PSYCHO_STCERR_READ) ? 1 : 0);
+			}
+		}
+		if (saw_error != 0) {
+			unsigned long tagval = stc_tag_buf[i];
+			unsigned long lineval = stc_line_buf[i];
+			printk("PSYCHO%d(PBM%c): STC_TAG(%d)[PA(%016lx)VA(%08lx)V(%d)W(%d)]\n",
+			       p->index,
+			       (is_pbm_a ? 'A' : 'B'),
+			       i,
+			       ((tagval & PSYCHO_STCTAG_PPN) >> 19UL),
+			       (tagval & PSYCHO_STCTAG_VPN),
+			       ((tagval & PSYCHO_STCTAG_VALID) ? 1 : 0),
+			       ((tagval & PSYCHO_STCTAG_WRITE) ? 1 : 0));
+			printk("PSYCHO%d(PBM%c): STC_LINE(%d)[LIDX(%lx)SP(%lx)LADDR(%lx)EP(%lx)"
+			       "V(%d)FOFN(%d)]\n",
+			       p->index,
+			       (is_pbm_a ? 'A' : 'B'),
+			       i,
+			       ((lineval & PSYCHO_STCLINE_LINDX) >> 21UL),
+			       ((lineval & PSYCHO_STCLINE_SPTR) >> 15UL),
+			       ((lineval & PSYCHO_STCLINE_LADDR) >> 8UL),
+			       ((lineval & PSYCHO_STCLINE_EPTR) >> 2UL),
+			       ((lineval & PSYCHO_STCLINE_VALID) ? 1 : 0),
+			       ((lineval & PSYCHO_STCLINE_FOFN) ? 1 : 0));
+		}
+	}
+
+	spin_unlock(&stc_buf_lock);
+}
+
+static void __psycho_check_stc_error(struct pci_controller_info *p,
+				     unsigned long afsr,
+				     unsigned long afar,
+				     enum psycho_error_type type)
+{
+	struct pci_pbm_info *pbm;
+
+	pbm = &p->pbm_A;
+	if (pbm->stc.strbuf_enabled)
+		__psycho_check_one_stc(p, pbm, 1);
+
+	pbm = &p->pbm_B;
+	if (pbm->stc.strbuf_enabled)
+		__psycho_check_one_stc(p, pbm, 0);
+}
+
+/* When an Uncorrectable Error or a PCI Error happens, we
+ * interrogate the IOMMU state to see if it is the cause.
+ */
+#define PSYCHO_IOMMU_CONTROL	0x0200UL
+#define  PSYCHO_IOMMU_CTRL_RESV     0xfffffffff9000000UL /* Reserved                      */
+#define  PSYCHO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status      */
+#define  PSYCHO_IOMMU_CTRL_XLTEERR  0x0000000001000000UL /* Translation Error encountered */
+#define  PSYCHO_IOMMU_CTRL_LCKEN    0x0000000000800000UL /* Enable translation locking    */
+#define  PSYCHO_IOMMU_CTRL_LCKPTR   0x0000000000780000UL /* Translation lock pointer      */
+#define  PSYCHO_IOMMU_CTRL_TSBSZ    0x0000000000070000UL /* TSB Size                      */
+#define  PSYCHO_IOMMU_TSBSZ_1K      0x0000000000000000UL /* TSB Table 1024 8-byte entries */
+#define  PSYCHO_IOMMU_TSBSZ_2K      0x0000000000010000UL /* TSB Table 2048 8-byte entries */
+#define  PSYCHO_IOMMU_TSBSZ_4K      0x0000000000020000UL /* TSB Table 4096 8-byte entries */
+#define  PSYCHO_IOMMU_TSBSZ_8K      0x0000000000030000UL /* TSB Table 8192 8-byte entries */
+#define  PSYCHO_IOMMU_TSBSZ_16K     0x0000000000040000UL /* TSB Table 16k 8-byte entries  */
+#define  PSYCHO_IOMMU_TSBSZ_32K     0x0000000000050000UL /* TSB Table 32k 8-byte entries  */
+#define  PSYCHO_IOMMU_TSBSZ_64K     0x0000000000060000UL /* TSB Table 64k 8-byte entries  */
+#define  PSYCHO_IOMMU_TSBSZ_128K    0x0000000000070000UL /* TSB Table 128k 8-byte entries */
+#define  PSYCHO_IOMMU_CTRL_RESV2    0x000000000000fff8UL /* Reserved                      */
+#define  PSYCHO_IOMMU_CTRL_TBWSZ    0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */
+#define  PSYCHO_IOMMU_CTRL_DENAB    0x0000000000000002UL /* Diagnostic mode enable        */
+#define  PSYCHO_IOMMU_CTRL_ENAB     0x0000000000000001UL /* IOMMU Enable                  */
+#define PSYCHO_IOMMU_TSBBASE	0x0208UL
+#define PSYCHO_IOMMU_FLUSH	0x0210UL
+#define PSYCHO_IOMMU_TAG	0xa580UL
+#define  PSYCHO_IOMMU_TAG_ERRSTS (0x3UL << 23UL)
+#define  PSYCHO_IOMMU_TAG_ERR	 (0x1UL << 22UL)
+#define  PSYCHO_IOMMU_TAG_WRITE	 (0x1UL << 21UL)
+#define  PSYCHO_IOMMU_TAG_STREAM (0x1UL << 20UL)
+#define  PSYCHO_IOMMU_TAG_SIZE	 (0x1UL << 19UL)
+#define  PSYCHO_IOMMU_TAG_VPAGE	 0x7ffffUL
+#define PSYCHO_IOMMU_DATA	0xa600UL
+#define  PSYCHO_IOMMU_DATA_VALID (1UL << 30UL)
+#define  PSYCHO_IOMMU_DATA_CACHE (1UL << 28UL)
+#define  PSYCHO_IOMMU_DATA_PPAGE 0xfffffffUL
+static void psycho_check_iommu_error(struct pci_controller_info *p,
+				     unsigned long afsr,
+				     unsigned long afar,
+				     enum psycho_error_type type)
+{
+	struct pci_iommu *iommu = p->pbm_A.iommu;
+	unsigned long iommu_tag[16];
+	unsigned long iommu_data[16];
+	unsigned long flags;
+	u64 control;
+	int i;
+
+	spin_lock_irqsave(&iommu->lock, flags);
+	control = psycho_read(iommu->iommu_control);
+	if (control & PSYCHO_IOMMU_CTRL_XLTEERR) {
+		char *type_string;
+
+		/* Clear the error encountered bit. */
+		control &= ~PSYCHO_IOMMU_CTRL_XLTEERR;
+		psycho_write(iommu->iommu_control, control);
+
+		switch((control & PSYCHO_IOMMU_CTRL_XLTESTAT) >> 25UL) {
+		case 0:
+			type_string = "Protection Error";
+			break;
+		case 1:
+			type_string = "Invalid Error";
+			break;
+		case 2:
+			type_string = "TimeOut Error";
+			break;
+		case 3:
+		default:
+			type_string = "ECC Error";
+			break;
+		};
+		printk("PSYCHO%d: IOMMU Error, type[%s]\n",
+		       p->index, type_string);
+
+		/* Put the IOMMU into diagnostic mode and probe
+		 * it's TLB for entries with error status.
+		 *
+		 * It is very possible for another DVMA to occur
+		 * while we do this probe, and corrupt the system
+		 * further.  But we are so screwed at this point
+		 * that we are likely to crash hard anyways, so
+		 * get as much diagnostic information to the
+		 * console as we can.
+		 */
+		psycho_write(iommu->iommu_control,
+			     control | PSYCHO_IOMMU_CTRL_DENAB);
+		for (i = 0; i < 16; i++) {
+			unsigned long base = p->pbm_A.controller_regs;
+
+			iommu_tag[i] =
+				psycho_read(base + PSYCHO_IOMMU_TAG + (i * 8UL));
+			iommu_data[i] =
+				psycho_read(base + PSYCHO_IOMMU_DATA + (i * 8UL));
+
+			/* Now clear out the entry. */
+			psycho_write(base + PSYCHO_IOMMU_TAG + (i * 8UL), 0);
+			psycho_write(base + PSYCHO_IOMMU_DATA + (i * 8UL), 0);
+		}
+
+		/* Leave diagnostic mode. */
+		psycho_write(iommu->iommu_control, control);
+
+		for (i = 0; i < 16; i++) {
+			unsigned long tag, data;
+
+			tag = iommu_tag[i];
+			if (!(tag & PSYCHO_IOMMU_TAG_ERR))
+				continue;
+
+			data = iommu_data[i];
+			switch((tag & PSYCHO_IOMMU_TAG_ERRSTS) >> 23UL) {
+			case 0:
+				type_string = "Protection Error";
+				break;
+			case 1:
+				type_string = "Invalid Error";
+				break;
+			case 2:
+				type_string = "TimeOut Error";
+				break;
+			case 3:
+			default:
+				type_string = "ECC Error";
+				break;
+			};
+			printk("PSYCHO%d: IOMMU TAG(%d)[error(%s) wr(%d) str(%d) sz(%dK) vpg(%08lx)]\n",
+			       p->index, i, type_string,
+			       ((tag & PSYCHO_IOMMU_TAG_WRITE) ? 1 : 0),
+			       ((tag & PSYCHO_IOMMU_TAG_STREAM) ? 1 : 0),
+			       ((tag & PSYCHO_IOMMU_TAG_SIZE) ? 64 : 8),
+			       (tag & PSYCHO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT);
+			printk("PSYCHO%d: IOMMU DATA(%d)[valid(%d) cache(%d) ppg(%016lx)]\n",
+			       p->index, i,
+			       ((data & PSYCHO_IOMMU_DATA_VALID) ? 1 : 0),
+			       ((data & PSYCHO_IOMMU_DATA_CACHE) ? 1 : 0),
+			       (data & PSYCHO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT);
+		}
+	}
+	__psycho_check_stc_error(p, afsr, afar, type);
+	spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+/* Uncorrectable Errors.  Cause of the error and the address are
+ * recorded in the UE_AFSR and UE_AFAR of PSYCHO.  They are errors
+ * relating to UPA interface transactions.
+ */
+#define PSYCHO_UE_AFSR	0x0030UL
+#define  PSYCHO_UEAFSR_PPIO	0x8000000000000000UL /* Primary PIO is cause         */
+#define  PSYCHO_UEAFSR_PDRD	0x4000000000000000UL /* Primary DVMA read is cause   */
+#define  PSYCHO_UEAFSR_PDWR	0x2000000000000000UL /* Primary DVMA write is cause  */
+#define  PSYCHO_UEAFSR_SPIO	0x1000000000000000UL /* Secondary PIO is cause       */
+#define  PSYCHO_UEAFSR_SDRD	0x0800000000000000UL /* Secondary DVMA read is cause */
+#define  PSYCHO_UEAFSR_SDWR	0x0400000000000000UL /* Secondary DVMA write is cause*/
+#define  PSYCHO_UEAFSR_RESV1	0x03ff000000000000UL /* Reserved                     */
+#define  PSYCHO_UEAFSR_BMSK	0x0000ffff00000000UL /* Bytemask of failed transfer  */
+#define  PSYCHO_UEAFSR_DOFF	0x00000000e0000000UL /* Doubleword Offset            */
+#define  PSYCHO_UEAFSR_MID	0x000000001f000000UL /* UPA MID causing the fault    */
+#define  PSYCHO_UEAFSR_BLK	0x0000000000800000UL /* Trans was block operation    */
+#define  PSYCHO_UEAFSR_RESV2	0x00000000007fffffUL /* Reserved                     */
+#define PSYCHO_UE_AFAR	0x0038UL
+
+static irqreturn_t psycho_ue_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct pci_controller_info *p = dev_id;
+	unsigned long afsr_reg = p->pbm_A.controller_regs + PSYCHO_UE_AFSR;
+	unsigned long afar_reg = p->pbm_A.controller_regs + PSYCHO_UE_AFAR;
+	unsigned long afsr, afar, error_bits;
+	int reported;
+
+	/* Latch uncorrectable error status. */
+	afar = psycho_read(afar_reg);
+	afsr = psycho_read(afsr_reg);
+
+	/* Clear the primary/secondary error status bits. */
+	error_bits = afsr &
+		(PSYCHO_UEAFSR_PPIO | PSYCHO_UEAFSR_PDRD | PSYCHO_UEAFSR_PDWR |
+		 PSYCHO_UEAFSR_SPIO | PSYCHO_UEAFSR_SDRD | PSYCHO_UEAFSR_SDWR);
+	if (!error_bits)
+		return IRQ_NONE;
+	psycho_write(afsr_reg, error_bits);
+
+	/* Log the error. */
+	printk("PSYCHO%d: Uncorrectable Error, primary error type[%s]\n",
+	       p->index,
+	       (((error_bits & PSYCHO_UEAFSR_PPIO) ?
+		 "PIO" :
+		 ((error_bits & PSYCHO_UEAFSR_PDRD) ?
+		  "DMA Read" :
+		  ((error_bits & PSYCHO_UEAFSR_PDWR) ?
+		   "DMA Write" : "???")))));
+	printk("PSYCHO%d: bytemask[%04lx] dword_offset[%lx] UPA_MID[%02lx] was_block(%d)\n",
+	       p->index,
+	       (afsr & PSYCHO_UEAFSR_BMSK) >> 32UL,
+	       (afsr & PSYCHO_UEAFSR_DOFF) >> 29UL,
+	       (afsr & PSYCHO_UEAFSR_MID) >> 24UL,
+	       ((afsr & PSYCHO_UEAFSR_BLK) ? 1 : 0));
+	printk("PSYCHO%d: UE AFAR [%016lx]\n", p->index, afar);
+	printk("PSYCHO%d: UE Secondary errors [", p->index);
+	reported = 0;
+	if (afsr & PSYCHO_UEAFSR_SPIO) {
+		reported++;
+		printk("(PIO)");
+	}
+	if (afsr & PSYCHO_UEAFSR_SDRD) {
+		reported++;
+		printk("(DMA Read)");
+	}
+	if (afsr & PSYCHO_UEAFSR_SDWR) {
+		reported++;
+		printk("(DMA Write)");
+	}
+	if (!reported)
+		printk("(none)");
+	printk("]\n");
+
+	/* Interrogate IOMMU for error status. */
+	psycho_check_iommu_error(p, afsr, afar, UE_ERR);
+
+	return IRQ_HANDLED;
+}
+
+/* Correctable Errors. */
+#define PSYCHO_CE_AFSR	0x0040UL
+#define  PSYCHO_CEAFSR_PPIO	0x8000000000000000UL /* Primary PIO is cause         */
+#define  PSYCHO_CEAFSR_PDRD	0x4000000000000000UL /* Primary DVMA read is cause   */
+#define  PSYCHO_CEAFSR_PDWR	0x2000000000000000UL /* Primary DVMA write is cause  */
+#define  PSYCHO_CEAFSR_SPIO	0x1000000000000000UL /* Secondary PIO is cause       */
+#define  PSYCHO_CEAFSR_SDRD	0x0800000000000000UL /* Secondary DVMA read is cause */
+#define  PSYCHO_CEAFSR_SDWR	0x0400000000000000UL /* Secondary DVMA write is cause*/
+#define  PSYCHO_CEAFSR_RESV1	0x0300000000000000UL /* Reserved                     */
+#define  PSYCHO_CEAFSR_ESYND	0x00ff000000000000UL /* Syndrome Bits                */
+#define  PSYCHO_CEAFSR_BMSK	0x0000ffff00000000UL /* Bytemask of failed transfer  */
+#define  PSYCHO_CEAFSR_DOFF	0x00000000e0000000UL /* Double Offset                */
+#define  PSYCHO_CEAFSR_MID	0x000000001f000000UL /* UPA MID causing the fault    */
+#define  PSYCHO_CEAFSR_BLK	0x0000000000800000UL /* Trans was block operation    */
+#define  PSYCHO_CEAFSR_RESV2	0x00000000007fffffUL /* Reserved                     */
+#define PSYCHO_CE_AFAR	0x0040UL
+
+static irqreturn_t psycho_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct pci_controller_info *p = dev_id;
+	unsigned long afsr_reg = p->pbm_A.controller_regs + PSYCHO_CE_AFSR;
+	unsigned long afar_reg = p->pbm_A.controller_regs + PSYCHO_CE_AFAR;
+	unsigned long afsr, afar, error_bits;
+	int reported;
+
+	/* Latch error status. */
+	afar = psycho_read(afar_reg);
+	afsr = psycho_read(afsr_reg);
+
+	/* Clear primary/secondary error status bits. */
+	error_bits = afsr &
+		(PSYCHO_CEAFSR_PPIO | PSYCHO_CEAFSR_PDRD | PSYCHO_CEAFSR_PDWR |
+		 PSYCHO_CEAFSR_SPIO | PSYCHO_CEAFSR_SDRD | PSYCHO_CEAFSR_SDWR);
+	if (!error_bits)
+		return IRQ_NONE;
+	psycho_write(afsr_reg, error_bits);
+
+	/* Log the error. */
+	printk("PSYCHO%d: Correctable Error, primary error type[%s]\n",
+	       p->index,
+	       (((error_bits & PSYCHO_CEAFSR_PPIO) ?
+		 "PIO" :
+		 ((error_bits & PSYCHO_CEAFSR_PDRD) ?
+		  "DMA Read" :
+		  ((error_bits & PSYCHO_CEAFSR_PDWR) ?
+		   "DMA Write" : "???")))));
+
+	/* XXX Use syndrome and afar to print out module string just like
+	 * XXX UDB CE trap handler does... -DaveM
+	 */
+	printk("PSYCHO%d: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] "
+	       "UPA_MID[%02lx] was_block(%d)\n",
+	       p->index,
+	       (afsr & PSYCHO_CEAFSR_ESYND) >> 48UL,
+	       (afsr & PSYCHO_CEAFSR_BMSK) >> 32UL,
+	       (afsr & PSYCHO_CEAFSR_DOFF) >> 29UL,
+	       (afsr & PSYCHO_CEAFSR_MID) >> 24UL,
+	       ((afsr & PSYCHO_CEAFSR_BLK) ? 1 : 0));
+	printk("PSYCHO%d: CE AFAR [%016lx]\n", p->index, afar);
+	printk("PSYCHO%d: CE Secondary errors [", p->index);
+	reported = 0;
+	if (afsr & PSYCHO_CEAFSR_SPIO) {
+		reported++;
+		printk("(PIO)");
+	}
+	if (afsr & PSYCHO_CEAFSR_SDRD) {
+		reported++;
+		printk("(DMA Read)");
+	}
+	if (afsr & PSYCHO_CEAFSR_SDWR) {
+		reported++;
+		printk("(DMA Write)");
+	}
+	if (!reported)
+		printk("(none)");
+	printk("]\n");
+
+	return IRQ_HANDLED;
+}
+
+/* PCI Errors.  They are signalled by the PCI bus module since they
+ * are associated with a specific bus segment.
+ */
+#define PSYCHO_PCI_AFSR_A	0x2010UL
+#define PSYCHO_PCI_AFSR_B	0x4010UL
+#define  PSYCHO_PCIAFSR_PMA	0x8000000000000000UL /* Primary Master Abort Error   */
+#define  PSYCHO_PCIAFSR_PTA	0x4000000000000000UL /* Primary Target Abort Error   */
+#define  PSYCHO_PCIAFSR_PRTRY	0x2000000000000000UL /* Primary Excessive Retries    */
+#define  PSYCHO_PCIAFSR_PPERR	0x1000000000000000UL /* Primary Parity Error         */
+#define  PSYCHO_PCIAFSR_SMA	0x0800000000000000UL /* Secondary Master Abort Error */
+#define  PSYCHO_PCIAFSR_STA	0x0400000000000000UL /* Secondary Target Abort Error */
+#define  PSYCHO_PCIAFSR_SRTRY	0x0200000000000000UL /* Secondary Excessive Retries  */
+#define  PSYCHO_PCIAFSR_SPERR	0x0100000000000000UL /* Secondary Parity Error       */
+#define  PSYCHO_PCIAFSR_RESV1	0x00ff000000000000UL /* Reserved                     */
+#define  PSYCHO_PCIAFSR_BMSK	0x0000ffff00000000UL /* Bytemask of failed transfer  */
+#define  PSYCHO_PCIAFSR_BLK	0x0000000080000000UL /* Trans was block operation    */
+#define  PSYCHO_PCIAFSR_RESV2	0x0000000040000000UL /* Reserved                     */
+#define  PSYCHO_PCIAFSR_MID	0x000000003e000000UL /* MID causing the error        */
+#define  PSYCHO_PCIAFSR_RESV3	0x0000000001ffffffUL /* Reserved                     */
+#define PSYCHO_PCI_AFAR_A	0x2018UL
+#define PSYCHO_PCI_AFAR_B	0x4018UL
+
+static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm, int is_pbm_a)
+{
+	unsigned long csr_reg, csr, csr_error_bits;
+	irqreturn_t ret = IRQ_NONE;
+	u16 stat;
+
+	if (is_pbm_a) {
+		csr_reg = pbm->controller_regs + PSYCHO_PCIA_CTRL;
+	} else {
+		csr_reg = pbm->controller_regs + PSYCHO_PCIB_CTRL;
+	}
+	csr = psycho_read(csr_reg);
+	csr_error_bits =
+		csr & (PSYCHO_PCICTRL_SBH_ERR | PSYCHO_PCICTRL_SERR);
+	if (csr_error_bits) {
+		/* Clear the errors.  */
+		psycho_write(csr_reg, csr);
+
+		/* Log 'em.  */
+		if (csr_error_bits & PSYCHO_PCICTRL_SBH_ERR)
+			printk("%s: PCI streaming byte hole error asserted.\n",
+			       pbm->name);
+		if (csr_error_bits & PSYCHO_PCICTRL_SERR)
+			printk("%s: PCI SERR signal asserted.\n", pbm->name);
+		ret = IRQ_HANDLED;
+	}
+	pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
+	if (stat & (PCI_STATUS_PARITY |
+		    PCI_STATUS_SIG_TARGET_ABORT |
+		    PCI_STATUS_REC_TARGET_ABORT |
+		    PCI_STATUS_REC_MASTER_ABORT |
+		    PCI_STATUS_SIG_SYSTEM_ERROR)) {
+		printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
+		       pbm->name, stat);
+		pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
+		ret = IRQ_HANDLED;
+	}
+	return ret;
+}
+
+static irqreturn_t psycho_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct pci_pbm_info *pbm = dev_id;
+	struct pci_controller_info *p = pbm->parent;
+	unsigned long afsr_reg, afar_reg;
+	unsigned long afsr, afar, error_bits;
+	int is_pbm_a, reported;
+
+	is_pbm_a = (pbm == &pbm->parent->pbm_A);
+	if (is_pbm_a) {
+		afsr_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFSR_A;
+		afar_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFAR_A;
+	} else {
+		afsr_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFSR_B;
+		afar_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFAR_B;
+	}
+
+	/* Latch error status. */
+	afar = psycho_read(afar_reg);
+	afsr = psycho_read(afsr_reg);
+
+	/* Clear primary/secondary error status bits. */
+	error_bits = afsr &
+		(PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_PTA |
+		 PSYCHO_PCIAFSR_PRTRY | PSYCHO_PCIAFSR_PPERR |
+		 PSYCHO_PCIAFSR_SMA | PSYCHO_PCIAFSR_STA |
+		 PSYCHO_PCIAFSR_SRTRY | PSYCHO_PCIAFSR_SPERR);
+	if (!error_bits)
+		return psycho_pcierr_intr_other(pbm, is_pbm_a);
+	psycho_write(afsr_reg, error_bits);
+
+	/* Log the error. */
+	printk("PSYCHO%d(PBM%c): PCI Error, primary error type[%s]\n",
+	       p->index, (is_pbm_a ? 'A' : 'B'),
+	       (((error_bits & PSYCHO_PCIAFSR_PMA) ?
+		 "Master Abort" :
+		 ((error_bits & PSYCHO_PCIAFSR_PTA) ?
+		  "Target Abort" :
+		  ((error_bits & PSYCHO_PCIAFSR_PRTRY) ?
+		   "Excessive Retries" :
+		   ((error_bits & PSYCHO_PCIAFSR_PPERR) ?
+		    "Parity Error" : "???"))))));
+	printk("PSYCHO%d(PBM%c): bytemask[%04lx] UPA_MID[%02lx] was_block(%d)\n",
+	       p->index, (is_pbm_a ? 'A' : 'B'),
+	       (afsr & PSYCHO_PCIAFSR_BMSK) >> 32UL,
+	       (afsr & PSYCHO_PCIAFSR_MID) >> 25UL,
+	       (afsr & PSYCHO_PCIAFSR_BLK) ? 1 : 0);
+	printk("PSYCHO%d(PBM%c): PCI AFAR [%016lx]\n",
+	       p->index, (is_pbm_a ? 'A' : 'B'), afar);
+	printk("PSYCHO%d(PBM%c): PCI Secondary errors [",
+	       p->index, (is_pbm_a ? 'A' : 'B'));
+	reported = 0;
+	if (afsr & PSYCHO_PCIAFSR_SMA) {
+		reported++;
+		printk("(Master Abort)");
+	}
+	if (afsr & PSYCHO_PCIAFSR_STA) {
+		reported++;
+		printk("(Target Abort)");
+	}
+	if (afsr & PSYCHO_PCIAFSR_SRTRY) {
+		reported++;
+		printk("(Excessive Retries)");
+	}
+	if (afsr & PSYCHO_PCIAFSR_SPERR) {
+		reported++;
+		printk("(Parity Error)");
+	}
+	if (!reported)
+		printk("(none)");
+	printk("]\n");
+
+	/* For the error types shown, scan PBM's PCI bus for devices
+	 * which have logged that error type.
+	 */
+
+	/* If we see a Target Abort, this could be the result of an
+	 * IOMMU translation error of some sort.  It is extremely
+	 * useful to log this information as usually it indicates
+	 * a bug in the IOMMU support code or a PCI device driver.
+	 */
+	if (error_bits & (PSYCHO_PCIAFSR_PTA | PSYCHO_PCIAFSR_STA)) {
+		psycho_check_iommu_error(p, afsr, afar, PCI_ERR);
+		pci_scan_for_target_abort(p, pbm, pbm->pci_bus);
+	}
+	if (error_bits & (PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_SMA))
+		pci_scan_for_master_abort(p, pbm, pbm->pci_bus);
+
+	/* For excessive retries, PSYCHO/PBM will abort the device
+	 * and there is no way to specifically check for excessive
+	 * retries in the config space status registers.  So what
+	 * we hope is that we'll catch it via the master/target
+	 * abort events.
+	 */
+
+	if (error_bits & (PSYCHO_PCIAFSR_PPERR | PSYCHO_PCIAFSR_SPERR))
+		pci_scan_for_parity_error(p, pbm, pbm->pci_bus);
+
+	return IRQ_HANDLED;
+}
+
+/* XXX What about PowerFail/PowerManagement??? -DaveM */
+#define PSYCHO_ECC_CTRL		0x0020
+#define  PSYCHO_ECCCTRL_EE	 0x8000000000000000UL /* Enable ECC Checking */
+#define  PSYCHO_ECCCTRL_UE	 0x4000000000000000UL /* Enable UE Interrupts */
+#define  PSYCHO_ECCCTRL_CE	 0x2000000000000000UL /* Enable CE INterrupts */
+#define PSYCHO_UE_INO		0x2e
+#define PSYCHO_CE_INO		0x2f
+#define PSYCHO_PCIERR_A_INO	0x30
+#define PSYCHO_PCIERR_B_INO	0x31
+static void __init psycho_register_error_handlers(struct pci_controller_info *p)
+{
+	struct pci_pbm_info *pbm = &p->pbm_A; /* arbitrary */
+	unsigned long base = p->pbm_A.controller_regs;
+	unsigned int irq, portid = pbm->portid;
+	u64 tmp;
+
+	/* Build IRQs and register handlers. */
+	irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_UE_INO);
+	if (request_irq(irq, psycho_ue_intr,
+			SA_SHIRQ, "PSYCHO UE", p) < 0) {
+		prom_printf("PSYCHO%d: Cannot register UE interrupt.\n",
+			    p->index);
+		prom_halt();
+	}
+
+	irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_CE_INO);
+	if (request_irq(irq, psycho_ce_intr,
+			SA_SHIRQ, "PSYCHO CE", p) < 0) {
+		prom_printf("PSYCHO%d: Cannot register CE interrupt.\n",
+			    p->index);
+		prom_halt();
+	}
+
+	pbm = &p->pbm_A;
+	irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_PCIERR_A_INO);
+	if (request_irq(irq, psycho_pcierr_intr,
+			SA_SHIRQ, "PSYCHO PCIERR", &p->pbm_A) < 0) {
+		prom_printf("PSYCHO%d(PBMA): Cannot register PciERR interrupt.\n",
+			    p->index);
+		prom_halt();
+	}
+
+	pbm = &p->pbm_B;
+	irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_PCIERR_B_INO);
+	if (request_irq(irq, psycho_pcierr_intr,
+			SA_SHIRQ, "PSYCHO PCIERR", &p->pbm_B) < 0) {
+		prom_printf("PSYCHO%d(PBMB): Cannot register PciERR interrupt.\n",
+			    p->index);
+		prom_halt();
+	}
+
+	/* Enable UE and CE interrupts for controller. */
+	psycho_write(base + PSYCHO_ECC_CTRL,
+		     (PSYCHO_ECCCTRL_EE |
+		      PSYCHO_ECCCTRL_UE |
+		      PSYCHO_ECCCTRL_CE));
+
+	/* Enable PCI Error interrupts and clear error
+	 * bits for each PBM.
+	 */
+	tmp = psycho_read(base + PSYCHO_PCIA_CTRL);
+	tmp |= (PSYCHO_PCICTRL_SERR |
+		PSYCHO_PCICTRL_SBH_ERR |
+		PSYCHO_PCICTRL_EEN);
+	tmp &= ~(PSYCHO_PCICTRL_SBH_INT);
+	psycho_write(base + PSYCHO_PCIA_CTRL, tmp);
+		     
+	tmp = psycho_read(base + PSYCHO_PCIB_CTRL);
+	tmp |= (PSYCHO_PCICTRL_SERR |
+		PSYCHO_PCICTRL_SBH_ERR |
+		PSYCHO_PCICTRL_EEN);
+	tmp &= ~(PSYCHO_PCICTRL_SBH_INT);
+	psycho_write(base + PSYCHO_PCIB_CTRL, tmp);
+}
+
+/* PSYCHO boot time probing and initialization. */
+static void __init psycho_resource_adjust(struct pci_dev *pdev,
+					  struct resource *res,
+					  struct resource *root)
+{
+	res->start += root->start;
+	res->end += root->start;
+}
+
+static void __init psycho_base_address_update(struct pci_dev *pdev, int resource)
+{
+	struct pcidev_cookie *pcp = pdev->sysdata;
+	struct pci_pbm_info *pbm = pcp->pbm;
+	struct resource *res, *root;
+	u32 reg;
+	int where, size, is_64bit;
+
+	res = &pdev->resource[resource];
+	if (resource < 6) {
+		where = PCI_BASE_ADDRESS_0 + (resource * 4);
+	} else if (resource == PCI_ROM_RESOURCE) {
+		where = pdev->rom_base_reg;
+	} else {
+		/* Somebody might have asked allocation of a non-standard resource */
+		return;
+	}
+
+	is_64bit = 0;
+	if (res->flags & IORESOURCE_IO)
+		root = &pbm->io_space;
+	else {
+		root = &pbm->mem_space;
+		if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
+		    == PCI_BASE_ADDRESS_MEM_TYPE_64)
+			is_64bit = 1;
+	}
+
+	size = res->end - res->start;
+	pci_read_config_dword(pdev, where, &reg);
+	reg = ((reg & size) |
+	       (((u32)(res->start - root->start)) & ~size));
+	if (resource == PCI_ROM_RESOURCE) {
+		reg |= PCI_ROM_ADDRESS_ENABLE;
+		res->flags |= IORESOURCE_ROM_ENABLE;
+	}
+	pci_write_config_dword(pdev, where, reg);
+
+	/* This knows that the upper 32-bits of the address
+	 * must be zero.  Our PCI common layer enforces this.
+	 */
+	if (is_64bit)
+		pci_write_config_dword(pdev, where + 4, 0);
+}
+
+static void __init pbm_config_busmastering(struct pci_pbm_info *pbm)
+{
+	u8 *addr;
+
+	/* Set cache-line size to 64 bytes, this is actually
+	 * a nop but I do it for completeness.
+	 */
+	addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
+					0, PCI_CACHE_LINE_SIZE);
+	pci_config_write8(addr, 64 / sizeof(u32));
+
+	/* Set PBM latency timer to 64 PCI clocks. */
+	addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
+					0, PCI_LATENCY_TIMER);
+	pci_config_write8(addr, 64);
+}
+
+static void __init pbm_scan_bus(struct pci_controller_info *p,
+				struct pci_pbm_info *pbm)
+{
+	struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
+
+	if (!cookie) {
+		prom_printf("PSYCHO: Critical allocation failure.\n");
+		prom_halt();
+	}
+
+	/* All we care about is the PBM. */
+	memset(cookie, 0, sizeof(*cookie));
+	cookie->pbm = pbm;
+
+	pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
+				    p->pci_ops,
+				    pbm);
+	pci_fixup_host_bridge_self(pbm->pci_bus);
+	pbm->pci_bus->self->sysdata = cookie;
+
+	pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
+	pci_record_assignments(pbm, pbm->pci_bus);
+	pci_assign_unassigned(pbm, pbm->pci_bus);
+	pci_fixup_irq(pbm, pbm->pci_bus);
+	pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
+	pci_setup_busmastering(pbm, pbm->pci_bus);
+}
+
+static void __init psycho_scan_bus(struct pci_controller_info *p)
+{
+	pbm_config_busmastering(&p->pbm_B);
+	p->pbm_B.is_66mhz_capable = 0;
+	pbm_config_busmastering(&p->pbm_A);
+	p->pbm_A.is_66mhz_capable = 1;
+	pbm_scan_bus(p, &p->pbm_B);
+	pbm_scan_bus(p, &p->pbm_A);
+
+	/* After the PCI bus scan is complete, we can register
+	 * the error interrupt handlers.
+	 */
+	psycho_register_error_handlers(p);
+}
+
+static void __init psycho_iommu_init(struct pci_controller_info *p)
+{
+	struct pci_iommu *iommu = p->pbm_A.iommu;
+	unsigned long tsbbase, i;
+	u64 control;
+
+	/* Setup initial software IOMMU state. */
+	spin_lock_init(&iommu->lock);
+	iommu->iommu_cur_ctx = 0;
+
+	/* Register addresses. */
+	iommu->iommu_control  = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL;
+	iommu->iommu_tsbbase  = p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE;
+	iommu->iommu_flush    = p->pbm_A.controller_regs + PSYCHO_IOMMU_FLUSH;
+	/* PSYCHO's IOMMU lacks ctx flushing. */
+	iommu->iommu_ctxflush = 0;
+
+	/* We use the main control register of PSYCHO as the write
+	 * completion register.
+	 */
+	iommu->write_complete_reg = p->pbm_A.controller_regs + PSYCHO_CONTROL;
+
+	/*
+	 * Invalidate TLB Entries.
+	 */
+	control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL);
+	control |= PSYCHO_IOMMU_CTRL_DENAB;
+	psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control);
+	for(i = 0; i < 16; i++) {
+		psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TAG + (i * 8UL), 0);
+		psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_DATA + (i * 8UL), 0);
+	}
+
+	/* Leave diag mode enabled for full-flushing done
+	 * in pci_iommu.c
+	 */
+
+	iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
+	if (!iommu->dummy_page) {
+		prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
+		prom_halt();
+	}
+	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
+	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
+
+	/* Using assumed page size 8K with 128K entries we need 1MB iommu page
+	 * table (128K ioptes * 8 bytes per iopte).  This is
+	 * page order 7 on UltraSparc.
+	 */
+	tsbbase = __get_free_pages(GFP_KERNEL, get_order(IO_TSB_SIZE));
+	if (!tsbbase) {
+		prom_printf("PSYCHO_IOMMU: Error, gfp(tsb) failed.\n");
+		prom_halt();
+	}
+	iommu->page_table = (iopte_t *)tsbbase;
+	iommu->page_table_sz_bits = 17;
+	iommu->page_table_map_base = 0xc0000000;
+	iommu->dma_addr_mask = 0xffffffff;
+	pci_iommu_table_init(iommu, IO_TSB_SIZE);
+
+	/* We start with no consistent mappings. */
+	iommu->lowest_consistent_map =
+		1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
+
+	for (i = 0; i < PBM_NCLUSTERS; i++) {
+		iommu->alloc_info[i].flush = 0;
+		iommu->alloc_info[i].next = 0;
+	}
+
+	psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE, __pa(tsbbase));
+
+	control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL);
+	control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ);
+	control |= (PSYCHO_IOMMU_TSBSZ_128K | PSYCHO_IOMMU_CTRL_ENAB);
+	psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control);
+
+	/* If necessary, hook us up for starfire IRQ translations. */
+	if(this_is_starfire)
+		p->starfire_cookie = starfire_hookup(p->pbm_A.portid);
+	else
+		p->starfire_cookie = NULL;
+}
+
+#define PSYCHO_IRQ_RETRY	0x1a00UL
+#define PSYCHO_PCIA_DIAG	0x2020UL
+#define PSYCHO_PCIB_DIAG	0x4020UL
+#define  PSYCHO_PCIDIAG_RESV	 0xffffffffffffff80UL /* Reserved                     */
+#define  PSYCHO_PCIDIAG_DRETRY	 0x0000000000000040UL /* Disable retry limit          */
+#define  PSYCHO_PCIDIAG_DISYNC	 0x0000000000000020UL /* Disable DMA wr / irq sync    */
+#define  PSYCHO_PCIDIAG_DDWSYNC	 0x0000000000000010UL /* Disable DMA wr / PIO rd sync */
+#define  PSYCHO_PCIDIAG_IDDPAR	 0x0000000000000008UL /* Invert DMA data parity       */
+#define  PSYCHO_PCIDIAG_IPDPAR	 0x0000000000000004UL /* Invert PIO data parity       */
+#define  PSYCHO_PCIDIAG_IPAPAR	 0x0000000000000002UL /* Invert PIO address parity    */
+#define  PSYCHO_PCIDIAG_LPBACK	 0x0000000000000001UL /* Enable loopback mode         */
+
+static void psycho_controller_hwinit(struct pci_controller_info *p)
+{
+	u64 tmp;
+
+	/* PROM sets the IRQ retry value too low, increase it. */
+	psycho_write(p->pbm_A.controller_regs + PSYCHO_IRQ_RETRY, 0xff);
+
+	/* Enable arbiter for all PCI slots. */
+	tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIA_CTRL);
+	tmp |= PSYCHO_PCICTRL_AEN;
+	psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIA_CTRL, tmp);
+
+	tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIB_CTRL);
+	tmp |= PSYCHO_PCICTRL_AEN;
+	psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIB_CTRL, tmp);
+
+	/* Disable DMA write / PIO read synchronization on
+	 * both PCI bus segments.
+	 * [ U2P Erratum 1243770, STP2223BGA data sheet ]
+	 */
+	tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIA_DIAG);
+	tmp |= PSYCHO_PCIDIAG_DDWSYNC;
+	psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIA_DIAG, tmp);
+
+	tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIB_DIAG);
+	tmp |= PSYCHO_PCIDIAG_DDWSYNC;
+	psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIB_DIAG, tmp);
+}
+
+static void __init pbm_register_toplevel_resources(struct pci_controller_info *p,
+						   struct pci_pbm_info *pbm)
+{
+	char *name = pbm->name;
+
+	sprintf(name, "PSYCHO%d PBM%c",
+		p->index,
+		(pbm == &p->pbm_A ? 'A' : 'B'));
+	pbm->io_space.name = pbm->mem_space.name = name;
+
+	request_resource(&ioport_resource, &pbm->io_space);
+	request_resource(&iomem_resource, &pbm->mem_space);
+	pci_register_legacy_regions(&pbm->io_space,
+				    &pbm->mem_space);
+}
+
+static void psycho_pbm_strbuf_init(struct pci_controller_info *p,
+				   struct pci_pbm_info *pbm,
+				   int is_pbm_a)
+{
+	unsigned long base = pbm->controller_regs;
+	u64 control;
+
+	if (is_pbm_a) {
+		pbm->stc.strbuf_control  = base + PSYCHO_STRBUF_CONTROL_A;
+		pbm->stc.strbuf_pflush   = base + PSYCHO_STRBUF_FLUSH_A;
+		pbm->stc.strbuf_fsync    = base + PSYCHO_STRBUF_FSYNC_A;
+	} else {
+		pbm->stc.strbuf_control  = base + PSYCHO_STRBUF_CONTROL_B;
+		pbm->stc.strbuf_pflush   = base + PSYCHO_STRBUF_FLUSH_B;
+		pbm->stc.strbuf_fsync    = base + PSYCHO_STRBUF_FSYNC_B;
+	}
+	/* PSYCHO's streaming buffer lacks ctx flushing. */
+	pbm->stc.strbuf_ctxflush      = 0;
+	pbm->stc.strbuf_ctxmatch_base = 0;
+
+	pbm->stc.strbuf_flushflag = (volatile unsigned long *)
+		((((unsigned long)&pbm->stc.__flushflag_buf[0])
+		  + 63UL)
+		 & ~63UL);
+	pbm->stc.strbuf_flushflag_pa = (unsigned long)
+		__pa(pbm->stc.strbuf_flushflag);
+
+	/* Enable the streaming buffer.  We have to be careful
+	 * just in case OBP left it with LRU locking enabled.
+	 *
+	 * It is possible to control if PBM will be rerun on
+	 * line misses.  Currently I just retain whatever setting
+	 * OBP left us with.  All checks so far show it having
+	 * a value of zero.
+	 */
+#undef PSYCHO_STRBUF_RERUN_ENABLE
+#undef PSYCHO_STRBUF_RERUN_DISABLE
+	control = psycho_read(pbm->stc.strbuf_control);
+	control |= PSYCHO_STRBUF_CTRL_ENAB;
+	control &= ~(PSYCHO_STRBUF_CTRL_LENAB | PSYCHO_STRBUF_CTRL_LPTR);
+#ifdef PSYCHO_STRBUF_RERUN_ENABLE
+	control &= ~(PSYCHO_STRBUF_CTRL_RRDIS);
+#else
+#ifdef PSYCHO_STRBUF_RERUN_DISABLE
+	control |= PSYCHO_STRBUF_CTRL_RRDIS;
+#endif
+#endif
+	psycho_write(pbm->stc.strbuf_control, control);
+
+	pbm->stc.strbuf_enabled = 1;
+}
+
+#define PSYCHO_IOSPACE_A	0x002000000UL
+#define PSYCHO_IOSPACE_B	0x002010000UL
+#define PSYCHO_IOSPACE_SIZE	0x00000ffffUL
+#define PSYCHO_MEMSPACE_A	0x100000000UL
+#define PSYCHO_MEMSPACE_B	0x180000000UL
+#define PSYCHO_MEMSPACE_SIZE	0x07fffffffUL
+
+static void psycho_pbm_init(struct pci_controller_info *p,
+			    int prom_node, int is_pbm_a)
+{
+	unsigned int busrange[2];
+	struct pci_pbm_info *pbm;
+	int err;
+
+	if (is_pbm_a) {
+		pbm = &p->pbm_A;
+		pbm->pci_first_slot = 1;
+		pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_A;
+		pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_A;
+	} else {
+		pbm = &p->pbm_B;
+		pbm->pci_first_slot = 2;
+		pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_B;
+		pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_B;
+	}
+
+	pbm->chip_type = PBM_CHIP_TYPE_PSYCHO;
+	pbm->chip_version =
+		prom_getintdefault(prom_node, "version#", 0);
+	pbm->chip_revision =
+		prom_getintdefault(prom_node, "module-revision#", 0);
+
+	pbm->io_space.end = pbm->io_space.start + PSYCHO_IOSPACE_SIZE;
+	pbm->io_space.flags = IORESOURCE_IO;
+	pbm->mem_space.end = pbm->mem_space.start + PSYCHO_MEMSPACE_SIZE;
+	pbm->mem_space.flags = IORESOURCE_MEM;
+	pbm_register_toplevel_resources(p, pbm);
+
+	pbm->parent = p;
+	pbm->prom_node = prom_node;
+	prom_getstring(prom_node, "name",
+		       pbm->prom_name,
+		       sizeof(pbm->prom_name));
+
+	err = prom_getproperty(prom_node, "ranges",
+			       (char *)pbm->pbm_ranges,
+			       sizeof(pbm->pbm_ranges));
+	if (err != -1)
+		pbm->num_pbm_ranges =
+			(err / sizeof(struct linux_prom_pci_ranges));
+	else
+		pbm->num_pbm_ranges = 0;
+
+	err = prom_getproperty(prom_node, "interrupt-map",
+			       (char *)pbm->pbm_intmap,
+			       sizeof(pbm->pbm_intmap));
+	if (err != -1) {
+		pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
+		err = prom_getproperty(prom_node, "interrupt-map-mask",
+				       (char *)&pbm->pbm_intmask,
+				       sizeof(pbm->pbm_intmask));
+		if (err == -1) {
+			prom_printf("PSYCHO-PBM: Fatal error, no "
+				    "interrupt-map-mask.\n");
+			prom_halt();
+		}
+	} else {
+		pbm->num_pbm_intmap = 0;
+		memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
+	}
+
+	err = prom_getproperty(prom_node, "bus-range",
+			       (char *)&busrange[0],
+			       sizeof(busrange));
+	if (err == 0 || err == -1) {
+		prom_printf("PSYCHO-PBM: Fatal error, no bus-range.\n");
+		prom_halt();
+	}
+	pbm->pci_first_busno = busrange[0];
+	pbm->pci_last_busno = busrange[1];
+
+	psycho_pbm_strbuf_init(p, pbm, is_pbm_a);
+}
+
+#define PSYCHO_CONFIGSPACE	0x001000000UL
+
+void __init psycho_init(int node, char *model_name)
+{
+	struct linux_prom64_registers pr_regs[3];
+	struct pci_controller_info *p;
+	struct pci_iommu *iommu;
+	u32 upa_portid;
+	int is_pbm_a, err;
+
+	upa_portid = prom_getintdefault(node, "upa-portid", 0xff);
+
+	for(p = pci_controller_root; p; p = p->next) {
+		if (p->pbm_A.portid == upa_portid) {
+			is_pbm_a = (p->pbm_A.prom_node == 0);
+			psycho_pbm_init(p, node, is_pbm_a);
+			return;
+		}
+	}
+
+	p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
+	if (!p) {
+		prom_printf("PSYCHO: Fatal memory allocation error.\n");
+		prom_halt();
+	}
+	memset(p, 0, sizeof(*p));
+	iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
+	if (!iommu) {
+		prom_printf("PSYCHO: Fatal memory allocation error.\n");
+		prom_halt();
+	}
+	memset(iommu, 0, sizeof(*iommu));
+	p->pbm_A.iommu = p->pbm_B.iommu = iommu;
+
+	p->next = pci_controller_root;
+	pci_controller_root = p;
+
+	p->pbm_A.portid = upa_portid;
+	p->pbm_B.portid = upa_portid;
+	p->index = pci_num_controllers++;
+	p->pbms_same_domain = 0;
+	p->scan_bus = psycho_scan_bus;
+	p->irq_build = psycho_irq_build;
+	p->base_address_update = psycho_base_address_update;
+	p->resource_adjust = psycho_resource_adjust;
+	p->pci_ops = &psycho_ops;
+
+	err = prom_getproperty(node, "reg",
+			       (char *)&pr_regs[0],
+			       sizeof(pr_regs));
+	if (err == 0 || err == -1) {
+		prom_printf("PSYCHO: Fatal error, no reg property.\n");
+		prom_halt();
+	}
+
+	p->pbm_A.controller_regs = pr_regs[2].phys_addr;
+	p->pbm_B.controller_regs = pr_regs[2].phys_addr;
+	printk("PCI: Found PSYCHO, control regs at %016lx\n",
+	       p->pbm_A.controller_regs);
+
+	p->pbm_A.config_space = p->pbm_B.config_space =
+		(pr_regs[2].phys_addr + PSYCHO_CONFIGSPACE);
+	printk("PSYCHO: Shared PCI config space at %016lx\n",
+	       p->pbm_A.config_space);
+
+	/*
+	 * Psycho's PCI MEM space is mapped to a 2GB aligned area, so
+	 * we need to adjust our MEM space mask.
+	 */
+	pci_memspace_mask = 0x7fffffffUL;
+
+	psycho_controller_hwinit(p);
+
+	psycho_iommu_init(p);
+
+	is_pbm_a = ((pr_regs[0].phys_addr & 0x6000) == 0x2000);
+	psycho_pbm_init(p, node, is_pbm_a);
+}
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
new file mode 100644
index 0000000..5525d1e
--- /dev/null
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -0,0 +1,1702 @@
+/* $Id: pci_sabre.c,v 1.42 2002/01/23 11:27:32 davem Exp $
+ * pci_sabre.c: Sabre specific PCI controller support.
+ *
+ * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
+ * Copyright (C) 1998, 1999 Eddie C. Dost   (ecd@skynet.be)
+ * Copyright (C) 1999 Jakub Jelinek   (jakub@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <asm/apb.h>
+#include <asm/pbm.h>
+#include <asm/iommu.h>
+#include <asm/irq.h>
+#include <asm/smp.h>
+#include <asm/oplib.h>
+
+#include "pci_impl.h"
+#include "iommu_common.h"
+
+/* All SABRE registers are 64-bits.  The following accessor
+ * routines are how they are accessed.  The REG parameter
+ * is a physical address.
+ */
+#define sabre_read(__reg) \
+({	u64 __ret; \
+	__asm__ __volatile__("ldxa [%1] %2, %0" \
+			     : "=r" (__ret) \
+			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
+			     : "memory"); \
+	__ret; \
+})
+#define sabre_write(__reg, __val) \
+	__asm__ __volatile__("stxa %0, [%1] %2" \
+			     : /* no outputs */ \
+			     : "r" (__val), "r" (__reg), \
+			       "i" (ASI_PHYS_BYPASS_EC_E) \
+			     : "memory")
+
+/* SABRE PCI controller register offsets and definitions. */
+#define SABRE_UE_AFSR		0x0030UL
+#define  SABRE_UEAFSR_PDRD	 0x4000000000000000UL	/* Primary PCI DMA Read */
+#define  SABRE_UEAFSR_PDWR	 0x2000000000000000UL	/* Primary PCI DMA Write */
+#define  SABRE_UEAFSR_SDRD	 0x0800000000000000UL	/* Secondary PCI DMA Read */
+#define  SABRE_UEAFSR_SDWR	 0x0400000000000000UL	/* Secondary PCI DMA Write */
+#define  SABRE_UEAFSR_SDTE	 0x0200000000000000UL	/* Secondary DMA Translation Error */
+#define  SABRE_UEAFSR_PDTE	 0x0100000000000000UL	/* Primary DMA Translation Error */
+#define  SABRE_UEAFSR_BMSK	 0x0000ffff00000000UL	/* Bytemask */
+#define  SABRE_UEAFSR_OFF	 0x00000000e0000000UL	/* Offset (AFAR bits [5:3] */
+#define  SABRE_UEAFSR_BLK	 0x0000000000800000UL	/* Was block operation */
+#define SABRE_UECE_AFAR		0x0038UL
+#define SABRE_CE_AFSR		0x0040UL
+#define  SABRE_CEAFSR_PDRD	 0x4000000000000000UL	/* Primary PCI DMA Read */
+#define  SABRE_CEAFSR_PDWR	 0x2000000000000000UL	/* Primary PCI DMA Write */
+#define  SABRE_CEAFSR_SDRD	 0x0800000000000000UL	/* Secondary PCI DMA Read */
+#define  SABRE_CEAFSR_SDWR	 0x0400000000000000UL	/* Secondary PCI DMA Write */
+#define  SABRE_CEAFSR_ESYND	 0x00ff000000000000UL	/* ECC Syndrome */
+#define  SABRE_CEAFSR_BMSK	 0x0000ffff00000000UL	/* Bytemask */
+#define  SABRE_CEAFSR_OFF	 0x00000000e0000000UL	/* Offset */
+#define  SABRE_CEAFSR_BLK	 0x0000000000800000UL	/* Was block operation */
+#define SABRE_UECE_AFAR_ALIAS	0x0048UL	/* Aliases to 0x0038 */
+#define SABRE_IOMMU_CONTROL	0x0200UL
+#define  SABRE_IOMMUCTRL_ERRSTS	 0x0000000006000000UL	/* Error status bits */
+#define  SABRE_IOMMUCTRL_ERR	 0x0000000001000000UL	/* Error present in IOTLB */
+#define  SABRE_IOMMUCTRL_LCKEN	 0x0000000000800000UL	/* IOTLB lock enable */
+#define  SABRE_IOMMUCTRL_LCKPTR	 0x0000000000780000UL	/* IOTLB lock pointer */
+#define  SABRE_IOMMUCTRL_TSBSZ	 0x0000000000070000UL	/* TSB Size */
+#define  SABRE_IOMMU_TSBSZ_1K   0x0000000000000000
+#define  SABRE_IOMMU_TSBSZ_2K   0x0000000000010000
+#define  SABRE_IOMMU_TSBSZ_4K   0x0000000000020000
+#define  SABRE_IOMMU_TSBSZ_8K   0x0000000000030000
+#define  SABRE_IOMMU_TSBSZ_16K  0x0000000000040000
+#define  SABRE_IOMMU_TSBSZ_32K  0x0000000000050000
+#define  SABRE_IOMMU_TSBSZ_64K  0x0000000000060000
+#define  SABRE_IOMMU_TSBSZ_128K 0x0000000000070000
+#define  SABRE_IOMMUCTRL_TBWSZ	 0x0000000000000004UL	/* TSB assumed page size */
+#define  SABRE_IOMMUCTRL_DENAB	 0x0000000000000002UL	/* Diagnostic Mode Enable */
+#define  SABRE_IOMMUCTRL_ENAB	 0x0000000000000001UL	/* IOMMU Enable */
+#define SABRE_IOMMU_TSBBASE	0x0208UL
+#define SABRE_IOMMU_FLUSH	0x0210UL
+#define SABRE_IMAP_A_SLOT0	0x0c00UL
+#define SABRE_IMAP_B_SLOT0	0x0c20UL
+#define SABRE_IMAP_SCSI		0x1000UL
+#define SABRE_IMAP_ETH		0x1008UL
+#define SABRE_IMAP_BPP		0x1010UL
+#define SABRE_IMAP_AU_REC	0x1018UL
+#define SABRE_IMAP_AU_PLAY	0x1020UL
+#define SABRE_IMAP_PFAIL	0x1028UL
+#define SABRE_IMAP_KMS		0x1030UL
+#define SABRE_IMAP_FLPY		0x1038UL
+#define SABRE_IMAP_SHW		0x1040UL
+#define SABRE_IMAP_KBD		0x1048UL
+#define SABRE_IMAP_MS		0x1050UL
+#define SABRE_IMAP_SER		0x1058UL
+#define SABRE_IMAP_UE		0x1070UL
+#define SABRE_IMAP_CE		0x1078UL
+#define SABRE_IMAP_PCIERR	0x1080UL
+#define SABRE_IMAP_GFX		0x1098UL
+#define SABRE_IMAP_EUPA		0x10a0UL
+#define SABRE_ICLR_A_SLOT0	0x1400UL
+#define SABRE_ICLR_B_SLOT0	0x1480UL
+#define SABRE_ICLR_SCSI		0x1800UL
+#define SABRE_ICLR_ETH		0x1808UL
+#define SABRE_ICLR_BPP		0x1810UL
+#define SABRE_ICLR_AU_REC	0x1818UL
+#define SABRE_ICLR_AU_PLAY	0x1820UL
+#define SABRE_ICLR_PFAIL	0x1828UL
+#define SABRE_ICLR_KMS		0x1830UL
+#define SABRE_ICLR_FLPY		0x1838UL
+#define SABRE_ICLR_SHW		0x1840UL
+#define SABRE_ICLR_KBD		0x1848UL
+#define SABRE_ICLR_MS		0x1850UL
+#define SABRE_ICLR_SER		0x1858UL
+#define SABRE_ICLR_UE		0x1870UL
+#define SABRE_ICLR_CE		0x1878UL
+#define SABRE_ICLR_PCIERR	0x1880UL
+#define SABRE_WRSYNC		0x1c20UL
+#define SABRE_PCICTRL		0x2000UL
+#define  SABRE_PCICTRL_MRLEN	 0x0000001000000000UL	/* Use MemoryReadLine for block loads/stores */
+#define  SABRE_PCICTRL_SERR	 0x0000000400000000UL	/* Set when SERR asserted on PCI bus */
+#define  SABRE_PCICTRL_ARBPARK	 0x0000000000200000UL	/* Bus Parking 0=Ultra-IIi 1=prev-bus-owner */
+#define  SABRE_PCICTRL_CPUPRIO	 0x0000000000100000UL	/* Ultra-IIi granted every other bus cycle */
+#define  SABRE_PCICTRL_ARBPRIO	 0x00000000000f0000UL	/* Slot which is granted every other bus cycle */
+#define  SABRE_PCICTRL_ERREN	 0x0000000000000100UL	/* PCI Error Interrupt Enable */
+#define  SABRE_PCICTRL_RTRYWE	 0x0000000000000080UL	/* DMA Flow Control 0=wait-if-possible 1=retry */
+#define  SABRE_PCICTRL_AEN	 0x000000000000000fUL	/* Slot PCI arbitration enables */
+#define SABRE_PIOAFSR		0x2010UL
+#define  SABRE_PIOAFSR_PMA	 0x8000000000000000UL	/* Primary Master Abort */
+#define  SABRE_PIOAFSR_PTA	 0x4000000000000000UL	/* Primary Target Abort */
+#define  SABRE_PIOAFSR_PRTRY	 0x2000000000000000UL	/* Primary Excessive Retries */
+#define  SABRE_PIOAFSR_PPERR	 0x1000000000000000UL	/* Primary Parity Error */
+#define  SABRE_PIOAFSR_SMA	 0x0800000000000000UL	/* Secondary Master Abort */
+#define  SABRE_PIOAFSR_STA	 0x0400000000000000UL	/* Secondary Target Abort */
+#define  SABRE_PIOAFSR_SRTRY	 0x0200000000000000UL	/* Secondary Excessive Retries */
+#define  SABRE_PIOAFSR_SPERR	 0x0100000000000000UL	/* Secondary Parity Error */
+#define  SABRE_PIOAFSR_BMSK	 0x0000ffff00000000UL	/* Byte Mask */
+#define  SABRE_PIOAFSR_BLK	 0x0000000080000000UL	/* Was Block Operation */
+#define SABRE_PIOAFAR		0x2018UL
+#define SABRE_PCIDIAG		0x2020UL
+#define  SABRE_PCIDIAG_DRTRY	 0x0000000000000040UL	/* Disable PIO Retry Limit */
+#define  SABRE_PCIDIAG_IPAPAR	 0x0000000000000008UL	/* Invert PIO Address Parity */
+#define  SABRE_PCIDIAG_IPDPAR	 0x0000000000000004UL	/* Invert PIO Data Parity */
+#define  SABRE_PCIDIAG_IDDPAR	 0x0000000000000002UL	/* Invert DMA Data Parity */
+#define  SABRE_PCIDIAG_ELPBK	 0x0000000000000001UL	/* Loopback Enable - not supported */
+#define SABRE_PCITASR		0x2028UL
+#define  SABRE_PCITASR_EF	 0x0000000000000080UL	/* Respond to 0xe0000000-0xffffffff */
+#define  SABRE_PCITASR_CD	 0x0000000000000040UL	/* Respond to 0xc0000000-0xdfffffff */
+#define  SABRE_PCITASR_AB	 0x0000000000000020UL	/* Respond to 0xa0000000-0xbfffffff */
+#define  SABRE_PCITASR_89	 0x0000000000000010UL	/* Respond to 0x80000000-0x9fffffff */
+#define  SABRE_PCITASR_67	 0x0000000000000008UL	/* Respond to 0x60000000-0x7fffffff */
+#define  SABRE_PCITASR_45	 0x0000000000000004UL	/* Respond to 0x40000000-0x5fffffff */
+#define  SABRE_PCITASR_23	 0x0000000000000002UL	/* Respond to 0x20000000-0x3fffffff */
+#define  SABRE_PCITASR_01	 0x0000000000000001UL	/* Respond to 0x00000000-0x1fffffff */
+#define SABRE_PIOBUF_DIAG	0x5000UL
+#define SABRE_DMABUF_DIAGLO	0x5100UL
+#define SABRE_DMABUF_DIAGHI	0x51c0UL
+#define SABRE_IMAP_GFX_ALIAS	0x6000UL	/* Aliases to 0x1098 */
+#define SABRE_IMAP_EUPA_ALIAS	0x8000UL	/* Aliases to 0x10a0 */
+#define SABRE_IOMMU_VADIAG	0xa400UL
+#define SABRE_IOMMU_TCDIAG	0xa408UL
+#define SABRE_IOMMU_TAG		0xa580UL
+#define  SABRE_IOMMUTAG_ERRSTS	 0x0000000001800000UL	/* Error status bits */
+#define  SABRE_IOMMUTAG_ERR	 0x0000000000400000UL	/* Error present */
+#define  SABRE_IOMMUTAG_WRITE	 0x0000000000200000UL	/* Page is writable */
+#define  SABRE_IOMMUTAG_STREAM	 0x0000000000100000UL	/* Streamable bit - unused */
+#define  SABRE_IOMMUTAG_SIZE	 0x0000000000080000UL	/* 0=8k 1=16k */
+#define  SABRE_IOMMUTAG_VPN	 0x000000000007ffffUL	/* Virtual Page Number [31:13] */
+#define SABRE_IOMMU_DATA	0xa600UL
+#define SABRE_IOMMUDATA_VALID	 0x0000000040000000UL	/* Valid */
+#define SABRE_IOMMUDATA_USED	 0x0000000020000000UL	/* Used (for LRU algorithm) */
+#define SABRE_IOMMUDATA_CACHE	 0x0000000010000000UL	/* Cacheable */
+#define SABRE_IOMMUDATA_PPN	 0x00000000001fffffUL	/* Physical Page Number [33:13] */
+#define SABRE_PCI_IRQSTATE	0xa800UL
+#define SABRE_OBIO_IRQSTATE	0xa808UL
+#define SABRE_FFBCFG		0xf000UL
+#define  SABRE_FFBCFG_SPRQS	 0x000000000f000000	/* Slave P_RQST queue size */
+#define  SABRE_FFBCFG_ONEREAD	 0x0000000000004000	/* Slave supports one outstanding read */
+#define SABRE_MCCTRL0		0xf010UL
+#define  SABRE_MCCTRL0_RENAB	 0x0000000080000000	/* Refresh Enable */
+#define  SABRE_MCCTRL0_EENAB	 0x0000000010000000	/* Enable all ECC functions */
+#define  SABRE_MCCTRL0_11BIT	 0x0000000000001000	/* Enable 11-bit column addressing */
+#define  SABRE_MCCTRL0_DPP	 0x0000000000000f00	/* DIMM Pair Present Bits */
+#define  SABRE_MCCTRL0_RINTVL	 0x00000000000000ff	/* Refresh Interval */
+#define SABRE_MCCTRL1		0xf018UL
+#define  SABRE_MCCTRL1_AMDC	 0x0000000038000000	/* Advance Memdata Clock */
+#define  SABRE_MCCTRL1_ARDC	 0x0000000007000000	/* Advance DRAM Read Data Clock */
+#define  SABRE_MCCTRL1_CSR	 0x0000000000e00000	/* CAS to RAS delay for CBR refresh */
+#define  SABRE_MCCTRL1_CASRW	 0x00000000001c0000	/* CAS length for read/write */
+#define  SABRE_MCCTRL1_RCD	 0x0000000000038000	/* RAS to CAS delay */
+#define  SABRE_MCCTRL1_CP	 0x0000000000007000	/* CAS Precharge */
+#define  SABRE_MCCTRL1_RP	 0x0000000000000e00	/* RAS Precharge */
+#define  SABRE_MCCTRL1_RAS	 0x00000000000001c0	/* Length of RAS for refresh */
+#define  SABRE_MCCTRL1_CASRW2	 0x0000000000000038	/* Must be same as CASRW */
+#define  SABRE_MCCTRL1_RSC	 0x0000000000000007	/* RAS after CAS hold time */
+#define SABRE_RESETCTRL		0xf020UL
+
+#define SABRE_CONFIGSPACE	0x001000000UL
+#define SABRE_IOSPACE		0x002000000UL
+#define SABRE_IOSPACE_SIZE	0x000ffffffUL
+#define SABRE_MEMSPACE		0x100000000UL
+#define SABRE_MEMSPACE_SIZE	0x07fffffffUL
+
+/* UltraSparc-IIi Programmer's Manual, page 325, PCI
+ * configuration space address format:
+ * 
+ *  32             24 23 16 15    11 10       8 7   2  1 0
+ * ---------------------------------------------------------
+ * |0 0 0 0 0 0 0 0 1| bus | device | function | reg | 0 0 |
+ * ---------------------------------------------------------
+ */
+#define SABRE_CONFIG_BASE(PBM)	\
+	((PBM)->config_space | (1UL << 24))
+#define SABRE_CONFIG_ENCODE(BUS, DEVFN, REG)	\
+	(((unsigned long)(BUS)   << 16) |	\
+	 ((unsigned long)(DEVFN) << 8)  |	\
+	 ((unsigned long)(REG)))
+
+static int hummingbird_p;
+static struct pci_bus *sabre_root_bus;
+
+static void *sabre_pci_config_mkaddr(struct pci_pbm_info *pbm,
+				     unsigned char bus,
+				     unsigned int devfn,
+				     int where)
+{
+	if (!pbm)
+		return NULL;
+	return (void *)
+		(SABRE_CONFIG_BASE(pbm) |
+		 SABRE_CONFIG_ENCODE(bus, devfn, where));
+}
+
+static int sabre_out_of_range(unsigned char devfn)
+{
+	if (hummingbird_p)
+		return 0;
+
+	return (((PCI_SLOT(devfn) == 0) && (PCI_FUNC(devfn) > 0)) ||
+		((PCI_SLOT(devfn) == 1) && (PCI_FUNC(devfn) > 1)) ||
+		(PCI_SLOT(devfn) > 1));
+}
+
+static int __sabre_out_of_range(struct pci_pbm_info *pbm,
+				unsigned char bus,
+				unsigned char devfn)
+{
+	if (hummingbird_p)
+		return 0;
+
+	return ((pbm->parent == 0) ||
+		((pbm == &pbm->parent->pbm_B) &&
+		 (bus == pbm->pci_first_busno) &&
+		 PCI_SLOT(devfn) > 8) ||
+		((pbm == &pbm->parent->pbm_A) &&
+		 (bus == pbm->pci_first_busno) &&
+		 PCI_SLOT(devfn) > 8));
+}
+
+static int __sabre_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
+				int where, int size, u32 *value)
+{
+	struct pci_pbm_info *pbm = bus_dev->sysdata;
+	unsigned char bus = bus_dev->number;
+	u32 *addr;
+	u16 tmp16;
+	u8 tmp8;
+
+	switch (size) {
+	case 1:
+		*value = 0xff;
+		break;
+	case 2:
+		*value = 0xffff;
+		break;
+	case 4:
+		*value = 0xffffffff;
+		break;
+	}
+
+	addr = sabre_pci_config_mkaddr(pbm, bus, devfn, where);
+	if (!addr)
+		return PCIBIOS_SUCCESSFUL;
+
+	if (__sabre_out_of_range(pbm, bus, devfn))
+		return PCIBIOS_SUCCESSFUL;
+
+	switch (size) {
+	case 1:
+		pci_config_read8((u8 *) addr, &tmp8);
+		*value = tmp8;
+		break;
+
+	case 2:
+		if (where & 0x01) {
+			printk("pci_read_config_word: misaligned reg [%x]\n",
+			       where);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		pci_config_read16((u16 *) addr, &tmp16);
+		*value = tmp16;
+		break;
+
+	case 4:
+		if (where & 0x03) {
+			printk("pci_read_config_dword: misaligned reg [%x]\n",
+			       where);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		pci_config_read32(addr, value);
+		break;
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int sabre_read_pci_cfg(struct pci_bus *bus, unsigned int devfn,
+			      int where, int size, u32 *value)
+{
+	if (!bus->number && sabre_out_of_range(devfn)) {
+		switch (size) {
+		case 1:
+			*value = 0xff;
+			break;
+		case 2:
+			*value = 0xffff;
+			break;
+		case 4:
+			*value = 0xffffffff;
+			break;
+		}
+		return PCIBIOS_SUCCESSFUL;
+	}
+
+	if (bus->number || PCI_SLOT(devfn))
+		return __sabre_read_pci_cfg(bus, devfn, where, size, value);
+
+	/* When accessing PCI config space of the PCI controller itself (bus
+	 * 0, device slot 0, function 0) there are restrictions.  Each
+	 * register must be accessed as it's natural size.  Thus, for example
+	 * the Vendor ID must be accessed as a 16-bit quantity.
+	 */
+
+	switch (size) {
+	case 1:
+		if (where < 8) {
+			u32 tmp32;
+			u16 tmp16;
+
+			__sabre_read_pci_cfg(bus, devfn, where & ~1, 2, &tmp32);
+			tmp16 = (u16) tmp32;
+			if (where & 1)
+				*value = tmp16 >> 8;
+			else
+				*value = tmp16 & 0xff;
+		} else
+			return __sabre_read_pci_cfg(bus, devfn, where, 1, value);
+		break;
+
+	case 2:
+		if (where < 8)
+			return __sabre_read_pci_cfg(bus, devfn, where, 2, value);
+		else {
+			u32 tmp32;
+			u8 tmp8;
+
+			__sabre_read_pci_cfg(bus, devfn, where, 1, &tmp32);
+			tmp8 = (u8) tmp32;
+			*value = tmp8;
+			__sabre_read_pci_cfg(bus, devfn, where + 1, 1, &tmp32);
+			tmp8 = (u8) tmp32;
+			*value |= tmp8 << 8;
+		}
+		break;
+
+	case 4: {
+		u32 tmp32;
+		u16 tmp16;
+
+		sabre_read_pci_cfg(bus, devfn, where, 2, &tmp32);
+		tmp16 = (u16) tmp32;
+		*value = tmp16;
+		sabre_read_pci_cfg(bus, devfn, where + 2, 2, &tmp32);
+		tmp16 = (u16) tmp32;
+		*value |= tmp16 << 16;
+		break;
+	}
+	}
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int __sabre_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
+				 int where, int size, u32 value)
+{
+	struct pci_pbm_info *pbm = bus_dev->sysdata;
+	unsigned char bus = bus_dev->number;
+	u32 *addr;
+
+	addr = sabre_pci_config_mkaddr(pbm, bus, devfn, where);
+	if (!addr)
+		return PCIBIOS_SUCCESSFUL;
+
+	if (__sabre_out_of_range(pbm, bus, devfn))
+		return PCIBIOS_SUCCESSFUL;
+
+	switch (size) {
+	case 1:
+		pci_config_write8((u8 *) addr, value);
+		break;
+
+	case 2:
+		if (where & 0x01) {
+			printk("pci_write_config_word: misaligned reg [%x]\n",
+			       where);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		pci_config_write16((u16 *) addr, value);
+		break;
+
+	case 4:
+		if (where & 0x03) {
+			printk("pci_write_config_dword: misaligned reg [%x]\n",
+			       where);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		pci_config_write32(addr, value);
+		break;
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int sabre_write_pci_cfg(struct pci_bus *bus, unsigned int devfn,
+			       int where, int size, u32 value)
+{
+	if (bus->number)
+		return __sabre_write_pci_cfg(bus, devfn, where, size, value);
+
+	if (sabre_out_of_range(devfn))
+		return PCIBIOS_SUCCESSFUL;
+
+	switch (size) {
+	case 1:
+		if (where < 8) {
+			u32 tmp32;
+			u16 tmp16;
+
+			__sabre_read_pci_cfg(bus, devfn, where & ~1, 2, &tmp32);
+			tmp16 = (u16) tmp32;
+			if (where & 1) {
+				value &= 0x00ff;
+				value |= tmp16 << 8;
+			} else {
+				value &= 0xff00;
+				value |= tmp16;
+			}
+			tmp32 = (u32) tmp16;
+			return __sabre_write_pci_cfg(bus, devfn, where & ~1, 2, tmp32);
+		} else
+			return __sabre_write_pci_cfg(bus, devfn, where, 1, value);
+		break;
+	case 2:
+		if (where < 8)
+			return __sabre_write_pci_cfg(bus, devfn, where, 2, value);
+		else {
+			__sabre_write_pci_cfg(bus, devfn, where, 1, value & 0xff);
+			__sabre_write_pci_cfg(bus, devfn, where + 1, 1, value >> 8);
+		}
+		break;
+	case 4:
+		sabre_write_pci_cfg(bus, devfn, where, 2, value & 0xffff);
+		sabre_write_pci_cfg(bus, devfn, where + 2, 2, value >> 16);
+		break;
+	}
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops sabre_ops = {
+	.read =		sabre_read_pci_cfg,
+	.write =	sabre_write_pci_cfg,
+};
+
+static unsigned long sabre_pcislot_imap_offset(unsigned long ino)
+{
+	unsigned int bus =  (ino & 0x10) >> 4;
+	unsigned int slot = (ino & 0x0c) >> 2;
+
+	if (bus == 0)
+		return SABRE_IMAP_A_SLOT0 + (slot * 8);
+	else
+		return SABRE_IMAP_B_SLOT0 + (slot * 8);
+}
+
+static unsigned long __onboard_imap_off[] = {
+/*0x20*/	SABRE_IMAP_SCSI,
+/*0x21*/	SABRE_IMAP_ETH,
+/*0x22*/	SABRE_IMAP_BPP,
+/*0x23*/	SABRE_IMAP_AU_REC,
+/*0x24*/	SABRE_IMAP_AU_PLAY,
+/*0x25*/	SABRE_IMAP_PFAIL,
+/*0x26*/	SABRE_IMAP_KMS,
+/*0x27*/	SABRE_IMAP_FLPY,
+/*0x28*/	SABRE_IMAP_SHW,
+/*0x29*/	SABRE_IMAP_KBD,
+/*0x2a*/	SABRE_IMAP_MS,
+/*0x2b*/	SABRE_IMAP_SER,
+/*0x2c*/	0 /* reserved */,
+/*0x2d*/	0 /* reserved */,
+/*0x2e*/	SABRE_IMAP_UE,
+/*0x2f*/	SABRE_IMAP_CE,
+/*0x30*/	SABRE_IMAP_PCIERR,
+};
+#define SABRE_ONBOARD_IRQ_BASE		0x20
+#define SABRE_ONBOARD_IRQ_LAST		0x30
+#define sabre_onboard_imap_offset(__ino) \
+	__onboard_imap_off[(__ino) - SABRE_ONBOARD_IRQ_BASE]
+
+#define sabre_iclr_offset(ino)					      \
+	((ino & 0x20) ? (SABRE_ICLR_SCSI + (((ino) & 0x1f) << 3)) :  \
+			(SABRE_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
+
+/* PCI SABRE INO number to Sparc PIL level. */
+static unsigned char sabre_pil_table[] = {
+/*0x00*/0, 0, 0, 0,	/* PCI A slot 0  Int A, B, C, D */
+/*0x04*/0, 0, 0, 0,	/* PCI A slot 1  Int A, B, C, D */
+/*0x08*/0, 0, 0, 0,	/* PCI A slot 2  Int A, B, C, D */
+/*0x0c*/0, 0, 0, 0,	/* PCI A slot 3  Int A, B, C, D */
+/*0x10*/0, 0, 0, 0,	/* PCI B slot 0  Int A, B, C, D */
+/*0x14*/0, 0, 0, 0,	/* PCI B slot 1  Int A, B, C, D */
+/*0x18*/0, 0, 0, 0,	/* PCI B slot 2  Int A, B, C, D */
+/*0x1c*/0, 0, 0, 0,	/* PCI B slot 3  Int A, B, C, D */
+/*0x20*/4,		/* SCSI				*/
+/*0x21*/5,		/* Ethernet			*/
+/*0x22*/8,		/* Parallel Port		*/
+/*0x23*/13,		/* Audio Record			*/
+/*0x24*/14,		/* Audio Playback		*/
+/*0x25*/15,		/* PowerFail			*/
+/*0x26*/4,		/* second SCSI			*/
+/*0x27*/11,		/* Floppy			*/
+/*0x28*/4,		/* Spare Hardware		*/
+/*0x29*/9,		/* Keyboard			*/
+/*0x2a*/4,		/* Mouse			*/
+/*0x2b*/12,		/* Serial			*/
+/*0x2c*/10,		/* Timer 0			*/
+/*0x2d*/11,		/* Timer 1			*/
+/*0x2e*/15,		/* Uncorrectable ECC		*/
+/*0x2f*/15,		/* Correctable ECC		*/
+/*0x30*/15,		/* PCI Bus A Error		*/
+/*0x31*/15,		/* PCI Bus B Error		*/
+/*0x32*/15,		/* Power Management		*/
+};
+
+static int __init sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
+{
+	int ret;
+
+	if (pdev &&
+	    pdev->vendor == PCI_VENDOR_ID_SUN &&
+	    pdev->device == PCI_DEVICE_ID_SUN_RIO_USB)
+		return 9;
+
+	ret = sabre_pil_table[ino];
+	if (ret == 0 && pdev == NULL) {
+		ret = 4;
+	} else if (ret == 0) {
+		switch ((pdev->class >> 16) & 0xff) {
+		case PCI_BASE_CLASS_STORAGE:
+			ret = 4;
+			break;
+
+		case PCI_BASE_CLASS_NETWORK:
+			ret = 6;
+			break;
+
+		case PCI_BASE_CLASS_DISPLAY:
+			ret = 9;
+			break;
+
+		case PCI_BASE_CLASS_MULTIMEDIA:
+		case PCI_BASE_CLASS_MEMORY:
+		case PCI_BASE_CLASS_BRIDGE:
+		case PCI_BASE_CLASS_SERIAL:
+			ret = 10;
+			break;
+
+		default:
+			ret = 4;
+			break;
+		};
+	}
+	return ret;
+}
+
+static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm,
+					   struct pci_dev *pdev,
+					   unsigned int ino)
+{
+	struct ino_bucket *bucket;
+	unsigned long imap, iclr;
+	unsigned long imap_off, iclr_off;
+	int pil, inofixup = 0;
+
+	ino &= PCI_IRQ_INO;
+	if (ino < SABRE_ONBOARD_IRQ_BASE) {
+		/* PCI slot */
+		imap_off = sabre_pcislot_imap_offset(ino);
+	} else {
+		/* onboard device */
+		if (ino > SABRE_ONBOARD_IRQ_LAST) {
+			prom_printf("sabre_irq_build: Wacky INO [%x]\n", ino);
+			prom_halt();
+		}
+		imap_off = sabre_onboard_imap_offset(ino);
+	}
+
+	/* Now build the IRQ bucket. */
+	pil = sabre_ino_to_pil(pdev, ino);
+
+	if (PIL_RESERVED(pil))
+		BUG();
+
+	imap = pbm->controller_regs + imap_off;
+	imap += 4;
+
+	iclr_off = sabre_iclr_offset(ino);
+	iclr = pbm->controller_regs + iclr_off;
+	iclr += 4;
+
+	if ((ino & 0x20) == 0)
+		inofixup = ino & 0x03;
+
+	bucket = __bucket(build_irq(pil, inofixup, iclr, imap));
+	bucket->flags |= IBF_PCI;
+
+	if (pdev) {
+		struct pcidev_cookie *pcp = pdev->sysdata;
+
+		/* When a device lives behind a bridge deeper in the
+		 * PCI bus topology than APB, a special sequence must
+		 * run to make sure all pending DMA transfers at the
+		 * time of IRQ delivery are visible in the coherency
+		 * domain by the cpu.  This sequence is to perform
+		 * a read on the far side of the non-APB bridge, then
+		 * perform a read of Sabre's DMA write-sync register.
+		 *
+		 * Currently, the PCI_CONFIG register for the device
+		 * is used for this read from the far side of the bridge.
+		 */
+		if (pdev->bus->number != pcp->pbm->pci_first_busno) {
+			bucket->flags |= IBF_DMA_SYNC;
+			bucket->synctab_ent = dma_sync_reg_table_entry++;
+			dma_sync_reg_table[bucket->synctab_ent] =
+				(unsigned long) sabre_pci_config_mkaddr(
+					pcp->pbm,
+					pdev->bus->number, pdev->devfn, PCI_COMMAND);
+		}
+	}
+	return __irq(bucket);
+}
+
+/* SABRE error handling support. */
+static void sabre_check_iommu_error(struct pci_controller_info *p,
+				    unsigned long afsr,
+				    unsigned long afar)
+{
+	struct pci_iommu *iommu = p->pbm_A.iommu;
+	unsigned long iommu_tag[16];
+	unsigned long iommu_data[16];
+	unsigned long flags;
+	u64 control;
+	int i;
+
+	spin_lock_irqsave(&iommu->lock, flags);
+	control = sabre_read(iommu->iommu_control);
+	if (control & SABRE_IOMMUCTRL_ERR) {
+		char *type_string;
+
+		/* Clear the error encountered bit.
+		 * NOTE: On Sabre this is write 1 to clear,
+		 *       which is different from Psycho.
+		 */
+		sabre_write(iommu->iommu_control, control);
+		switch((control & SABRE_IOMMUCTRL_ERRSTS) >> 25UL) {
+		case 1:
+			type_string = "Invalid Error";
+			break;
+		case 3:
+			type_string = "ECC Error";
+			break;
+		default:
+			type_string = "Unknown";
+			break;
+		};
+		printk("SABRE%d: IOMMU Error, type[%s]\n",
+		       p->index, type_string);
+
+		/* Enter diagnostic mode and probe for error'd
+		 * entries in the IOTLB.
+		 */
+		control &= ~(SABRE_IOMMUCTRL_ERRSTS | SABRE_IOMMUCTRL_ERR);
+		sabre_write(iommu->iommu_control,
+			    (control | SABRE_IOMMUCTRL_DENAB));
+		for (i = 0; i < 16; i++) {
+			unsigned long base = p->pbm_A.controller_regs;
+
+			iommu_tag[i] =
+				sabre_read(base + SABRE_IOMMU_TAG + (i * 8UL));
+			iommu_data[i] =
+				sabre_read(base + SABRE_IOMMU_DATA + (i * 8UL));
+			sabre_write(base + SABRE_IOMMU_TAG + (i * 8UL), 0);
+			sabre_write(base + SABRE_IOMMU_DATA + (i * 8UL), 0);
+		}
+		sabre_write(iommu->iommu_control, control);
+
+		for (i = 0; i < 16; i++) {
+			unsigned long tag, data;
+
+			tag = iommu_tag[i];
+			if (!(tag & SABRE_IOMMUTAG_ERR))
+				continue;
+
+			data = iommu_data[i];
+			switch((tag & SABRE_IOMMUTAG_ERRSTS) >> 23UL) {
+			case 1:
+				type_string = "Invalid Error";
+				break;
+			case 3:
+				type_string = "ECC Error";
+				break;
+			default:
+				type_string = "Unknown";
+				break;
+			};
+			printk("SABRE%d: IOMMU TAG(%d)[RAW(%016lx)error(%s)wr(%d)sz(%dK)vpg(%08lx)]\n",
+			       p->index, i, tag, type_string,
+			       ((tag & SABRE_IOMMUTAG_WRITE) ? 1 : 0),
+			       ((tag & SABRE_IOMMUTAG_SIZE) ? 64 : 8),
+			       ((tag & SABRE_IOMMUTAG_VPN) << IOMMU_PAGE_SHIFT));
+			printk("SABRE%d: IOMMU DATA(%d)[RAW(%016lx)valid(%d)used(%d)cache(%d)ppg(%016lx)\n",
+			       p->index, i, data,
+			       ((data & SABRE_IOMMUDATA_VALID) ? 1 : 0),
+			       ((data & SABRE_IOMMUDATA_USED) ? 1 : 0),
+			       ((data & SABRE_IOMMUDATA_CACHE) ? 1 : 0),
+			       ((data & SABRE_IOMMUDATA_PPN) << IOMMU_PAGE_SHIFT));
+		}
+	}
+	spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static irqreturn_t sabre_ue_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct pci_controller_info *p = dev_id;
+	unsigned long afsr_reg = p->pbm_A.controller_regs + SABRE_UE_AFSR;
+	unsigned long afar_reg = p->pbm_A.controller_regs + SABRE_UECE_AFAR;
+	unsigned long afsr, afar, error_bits;
+	int reported;
+
+	/* Latch uncorrectable error status. */
+	afar = sabre_read(afar_reg);
+	afsr = sabre_read(afsr_reg);
+
+	/* Clear the primary/secondary error status bits. */
+	error_bits = afsr &
+		(SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR |
+		 SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR |
+		 SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE);
+	if (!error_bits)
+		return IRQ_NONE;
+	sabre_write(afsr_reg, error_bits);
+
+	/* Log the error. */
+	printk("SABRE%d: Uncorrectable Error, primary error type[%s%s]\n",
+	       p->index,
+	       ((error_bits & SABRE_UEAFSR_PDRD) ?
+		"DMA Read" :
+		((error_bits & SABRE_UEAFSR_PDWR) ?
+		 "DMA Write" : "???")),
+	       ((error_bits & SABRE_UEAFSR_PDTE) ?
+		":Translation Error" : ""));
+	printk("SABRE%d: bytemask[%04lx] dword_offset[%lx] was_block(%d)\n",
+	       p->index,
+	       (afsr & SABRE_UEAFSR_BMSK) >> 32UL,
+	       (afsr & SABRE_UEAFSR_OFF) >> 29UL,
+	       ((afsr & SABRE_UEAFSR_BLK) ? 1 : 0));
+	printk("SABRE%d: UE AFAR [%016lx]\n", p->index, afar);
+	printk("SABRE%d: UE Secondary errors [", p->index);
+	reported = 0;
+	if (afsr & SABRE_UEAFSR_SDRD) {
+		reported++;
+		printk("(DMA Read)");
+	}
+	if (afsr & SABRE_UEAFSR_SDWR) {
+		reported++;
+		printk("(DMA Write)");
+	}
+	if (afsr & SABRE_UEAFSR_SDTE) {
+		reported++;
+		printk("(Translation Error)");
+	}
+	if (!reported)
+		printk("(none)");
+	printk("]\n");
+
+	/* Interrogate IOMMU for error status. */
+	sabre_check_iommu_error(p, afsr, afar);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t sabre_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct pci_controller_info *p = dev_id;
+	unsigned long afsr_reg = p->pbm_A.controller_regs + SABRE_CE_AFSR;
+	unsigned long afar_reg = p->pbm_A.controller_regs + SABRE_UECE_AFAR;
+	unsigned long afsr, afar, error_bits;
+	int reported;
+
+	/* Latch error status. */
+	afar = sabre_read(afar_reg);
+	afsr = sabre_read(afsr_reg);
+
+	/* Clear primary/secondary error status bits. */
+	error_bits = afsr &
+		(SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR |
+		 SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR);
+	if (!error_bits)
+		return IRQ_NONE;
+	sabre_write(afsr_reg, error_bits);
+
+	/* Log the error. */
+	printk("SABRE%d: Correctable Error, primary error type[%s]\n",
+	       p->index,
+	       ((error_bits & SABRE_CEAFSR_PDRD) ?
+		"DMA Read" :
+		((error_bits & SABRE_CEAFSR_PDWR) ?
+		 "DMA Write" : "???")));
+
+	/* XXX Use syndrome and afar to print out module string just like
+	 * XXX UDB CE trap handler does... -DaveM
+	 */
+	printk("SABRE%d: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] "
+	       "was_block(%d)\n",
+	       p->index,
+	       (afsr & SABRE_CEAFSR_ESYND) >> 48UL,
+	       (afsr & SABRE_CEAFSR_BMSK) >> 32UL,
+	       (afsr & SABRE_CEAFSR_OFF) >> 29UL,
+	       ((afsr & SABRE_CEAFSR_BLK) ? 1 : 0));
+	printk("SABRE%d: CE AFAR [%016lx]\n", p->index, afar);
+	printk("SABRE%d: CE Secondary errors [", p->index);
+	reported = 0;
+	if (afsr & SABRE_CEAFSR_SDRD) {
+		reported++;
+		printk("(DMA Read)");
+	}
+	if (afsr & SABRE_CEAFSR_SDWR) {
+		reported++;
+		printk("(DMA Write)");
+	}
+	if (!reported)
+		printk("(none)");
+	printk("]\n");
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t sabre_pcierr_intr_other(struct pci_controller_info *p)
+{
+	unsigned long csr_reg, csr, csr_error_bits;
+	irqreturn_t ret = IRQ_NONE;
+	u16 stat;
+
+	csr_reg = p->pbm_A.controller_regs + SABRE_PCICTRL;
+	csr = sabre_read(csr_reg);
+	csr_error_bits =
+		csr & SABRE_PCICTRL_SERR;
+	if (csr_error_bits) {
+		/* Clear the errors.  */
+		sabre_write(csr_reg, csr);
+
+		/* Log 'em.  */
+		if (csr_error_bits & SABRE_PCICTRL_SERR)
+			printk("SABRE%d: PCI SERR signal asserted.\n",
+			       p->index);
+		ret = IRQ_HANDLED;
+	}
+	pci_read_config_word(sabre_root_bus->self,
+			     PCI_STATUS, &stat);
+	if (stat & (PCI_STATUS_PARITY |
+		    PCI_STATUS_SIG_TARGET_ABORT |
+		    PCI_STATUS_REC_TARGET_ABORT |
+		    PCI_STATUS_REC_MASTER_ABORT |
+		    PCI_STATUS_SIG_SYSTEM_ERROR)) {
+		printk("SABRE%d: PCI bus error, PCI_STATUS[%04x]\n",
+		       p->index, stat);
+		pci_write_config_word(sabre_root_bus->self,
+				      PCI_STATUS, 0xffff);
+		ret = IRQ_HANDLED;
+	}
+	return ret;
+}
+
+static irqreturn_t sabre_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct pci_controller_info *p = dev_id;
+	unsigned long afsr_reg, afar_reg;
+	unsigned long afsr, afar, error_bits;
+	int reported;
+
+	afsr_reg = p->pbm_A.controller_regs + SABRE_PIOAFSR;
+	afar_reg = p->pbm_A.controller_regs + SABRE_PIOAFAR;
+
+	/* Latch error status. */
+	afar = sabre_read(afar_reg);
+	afsr = sabre_read(afsr_reg);
+
+	/* Clear primary/secondary error status bits. */
+	error_bits = afsr &
+		(SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_PTA |
+		 SABRE_PIOAFSR_PRTRY | SABRE_PIOAFSR_PPERR |
+		 SABRE_PIOAFSR_SMA | SABRE_PIOAFSR_STA |
+		 SABRE_PIOAFSR_SRTRY | SABRE_PIOAFSR_SPERR);
+	if (!error_bits)
+		return sabre_pcierr_intr_other(p);
+	sabre_write(afsr_reg, error_bits);
+
+	/* Log the error. */
+	printk("SABRE%d: PCI Error, primary error type[%s]\n",
+	       p->index,
+	       (((error_bits & SABRE_PIOAFSR_PMA) ?
+		 "Master Abort" :
+		 ((error_bits & SABRE_PIOAFSR_PTA) ?
+		  "Target Abort" :
+		  ((error_bits & SABRE_PIOAFSR_PRTRY) ?
+		   "Excessive Retries" :
+		   ((error_bits & SABRE_PIOAFSR_PPERR) ?
+		    "Parity Error" : "???"))))));
+	printk("SABRE%d: bytemask[%04lx] was_block(%d)\n",
+	       p->index,
+	       (afsr & SABRE_PIOAFSR_BMSK) >> 32UL,
+	       (afsr & SABRE_PIOAFSR_BLK) ? 1 : 0);
+	printk("SABRE%d: PCI AFAR [%016lx]\n", p->index, afar);
+	printk("SABRE%d: PCI Secondary errors [", p->index);
+	reported = 0;
+	if (afsr & SABRE_PIOAFSR_SMA) {
+		reported++;
+		printk("(Master Abort)");
+	}
+	if (afsr & SABRE_PIOAFSR_STA) {
+		reported++;
+		printk("(Target Abort)");
+	}
+	if (afsr & SABRE_PIOAFSR_SRTRY) {
+		reported++;
+		printk("(Excessive Retries)");
+	}
+	if (afsr & SABRE_PIOAFSR_SPERR) {
+		reported++;
+		printk("(Parity Error)");
+	}
+	if (!reported)
+		printk("(none)");
+	printk("]\n");
+
+	/* For the error types shown, scan both PCI buses for devices
+	 * which have logged that error type.
+	 */
+
+	/* If we see a Target Abort, this could be the result of an
+	 * IOMMU translation error of some sort.  It is extremely
+	 * useful to log this information as usually it indicates
+	 * a bug in the IOMMU support code or a PCI device driver.
+	 */
+	if (error_bits & (SABRE_PIOAFSR_PTA | SABRE_PIOAFSR_STA)) {
+		sabre_check_iommu_error(p, afsr, afar);
+		pci_scan_for_target_abort(p, &p->pbm_A, p->pbm_A.pci_bus);
+		pci_scan_for_target_abort(p, &p->pbm_B, p->pbm_B.pci_bus);
+	}
+	if (error_bits & (SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_SMA)) {
+		pci_scan_for_master_abort(p, &p->pbm_A, p->pbm_A.pci_bus);
+		pci_scan_for_master_abort(p, &p->pbm_B, p->pbm_B.pci_bus);
+	}
+	/* For excessive retries, SABRE/PBM will abort the device
+	 * and there is no way to specifically check for excessive
+	 * retries in the config space status registers.  So what
+	 * we hope is that we'll catch it via the master/target
+	 * abort events.
+	 */
+
+	if (error_bits & (SABRE_PIOAFSR_PPERR | SABRE_PIOAFSR_SPERR)) {
+		pci_scan_for_parity_error(p, &p->pbm_A, p->pbm_A.pci_bus);
+		pci_scan_for_parity_error(p, &p->pbm_B, p->pbm_B.pci_bus);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/* XXX What about PowerFail/PowerManagement??? -DaveM */
+#define SABRE_UE_INO		0x2e
+#define SABRE_CE_INO		0x2f
+#define SABRE_PCIERR_INO	0x30
+static void __init sabre_register_error_handlers(struct pci_controller_info *p)
+{
+	struct pci_pbm_info *pbm = &p->pbm_A; /* arbitrary */
+	unsigned long base = pbm->controller_regs;
+	unsigned long irq, portid = pbm->portid;
+	u64 tmp;
+
+	/* We clear the error bits in the appropriate AFSR before
+	 * registering the handler so that we don't get spurious
+	 * interrupts.
+	 */
+	sabre_write(base + SABRE_UE_AFSR,
+		    (SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR |
+		     SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR |
+		     SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE));
+	irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_UE_INO);
+	if (request_irq(irq, sabre_ue_intr,
+			SA_SHIRQ, "SABRE UE", p) < 0) {
+		prom_printf("SABRE%d: Cannot register UE interrupt.\n",
+			    p->index);
+		prom_halt();
+	}
+
+	sabre_write(base + SABRE_CE_AFSR,
+		    (SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR |
+		     SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR));
+	irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_CE_INO);
+	if (request_irq(irq, sabre_ce_intr,
+			SA_SHIRQ, "SABRE CE", p) < 0) {
+		prom_printf("SABRE%d: Cannot register CE interrupt.\n",
+			    p->index);
+		prom_halt();
+	}
+
+	irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_PCIERR_INO);
+	if (request_irq(irq, sabre_pcierr_intr,
+			SA_SHIRQ, "SABRE PCIERR", p) < 0) {
+		prom_printf("SABRE%d: Cannot register PciERR interrupt.\n",
+			    p->index);
+		prom_halt();
+	}
+
+	tmp = sabre_read(base + SABRE_PCICTRL);
+	tmp |= SABRE_PCICTRL_ERREN;
+	sabre_write(base + SABRE_PCICTRL, tmp);
+}
+
+static void __init sabre_resource_adjust(struct pci_dev *pdev,
+					 struct resource *res,
+					 struct resource *root)
+{
+	struct pci_pbm_info *pbm = pdev->bus->sysdata;
+	unsigned long base;
+
+	if (res->flags & IORESOURCE_IO)
+		base = pbm->controller_regs + SABRE_IOSPACE;
+	else
+		base = pbm->controller_regs + SABRE_MEMSPACE;
+
+	res->start += base;
+	res->end += base;
+}
+
+static void __init sabre_base_address_update(struct pci_dev *pdev, int resource)
+{
+	struct pcidev_cookie *pcp = pdev->sysdata;
+	struct pci_pbm_info *pbm = pcp->pbm;
+	struct resource *res;
+	unsigned long base;
+	u32 reg;
+	int where, size, is_64bit;
+
+	res = &pdev->resource[resource];
+	if (resource < 6) {
+		where = PCI_BASE_ADDRESS_0 + (resource * 4);
+	} else if (resource == PCI_ROM_RESOURCE) {
+		where = pdev->rom_base_reg;
+	} else {
+		/* Somebody might have asked allocation of a non-standard resource */
+		return;
+	}
+
+	is_64bit = 0;
+	if (res->flags & IORESOURCE_IO)
+		base = pbm->controller_regs + SABRE_IOSPACE;
+	else {
+		base = pbm->controller_regs + SABRE_MEMSPACE;
+		if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
+		    == PCI_BASE_ADDRESS_MEM_TYPE_64)
+			is_64bit = 1;
+	}
+
+	size = res->end - res->start;
+	pci_read_config_dword(pdev, where, &reg);
+	reg = ((reg & size) |
+	       (((u32)(res->start - base)) & ~size));
+	if (resource == PCI_ROM_RESOURCE) {
+		reg |= PCI_ROM_ADDRESS_ENABLE;
+		res->flags |= IORESOURCE_ROM_ENABLE;
+	}
+	pci_write_config_dword(pdev, where, reg);
+
+	/* This knows that the upper 32-bits of the address
+	 * must be zero.  Our PCI common layer enforces this.
+	 */
+	if (is_64bit)
+		pci_write_config_dword(pdev, where + 4, 0);
+}
+
+static void __init apb_init(struct pci_controller_info *p, struct pci_bus *sabre_bus)
+{
+	struct pci_dev *pdev;
+
+	list_for_each_entry(pdev, &sabre_bus->devices, bus_list) {
+
+		if (pdev->vendor == PCI_VENDOR_ID_SUN &&
+		    pdev->device == PCI_DEVICE_ID_SUN_SIMBA) {
+			u32 word32;
+			u16 word16;
+
+			sabre_read_pci_cfg(pdev->bus, pdev->devfn,
+					   PCI_COMMAND, 2, &word32);
+			word16 = (u16) word32;
+			word16 |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
+				PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY |
+				PCI_COMMAND_IO;
+			word32 = (u32) word16;
+			sabre_write_pci_cfg(pdev->bus, pdev->devfn,
+					    PCI_COMMAND, 2, word32);
+
+			/* Status register bits are "write 1 to clear". */
+			sabre_write_pci_cfg(pdev->bus, pdev->devfn,
+					    PCI_STATUS, 2, 0xffff);
+			sabre_write_pci_cfg(pdev->bus, pdev->devfn,
+					    PCI_SEC_STATUS, 2, 0xffff);
+
+			/* Use a primary/seconday latency timer value
+			 * of 64.
+			 */
+			sabre_write_pci_cfg(pdev->bus, pdev->devfn,
+					    PCI_LATENCY_TIMER, 1, 64);
+			sabre_write_pci_cfg(pdev->bus, pdev->devfn,
+					    PCI_SEC_LATENCY_TIMER, 1, 64);
+
+			/* Enable reporting/forwarding of master aborts,
+			 * parity, and SERR.
+			 */
+			sabre_write_pci_cfg(pdev->bus, pdev->devfn,
+					    PCI_BRIDGE_CONTROL, 1,
+					    (PCI_BRIDGE_CTL_PARITY |
+					     PCI_BRIDGE_CTL_SERR |
+					     PCI_BRIDGE_CTL_MASTER_ABORT));
+		}
+	}
+}
+
+static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm)
+{
+	struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
+
+	if (!cookie) {
+		prom_printf("SABRE: Critical allocation failure.\n");
+		prom_halt();
+	}
+
+	/* All we care about is the PBM. */
+	memset(cookie, 0, sizeof(*cookie));
+	cookie->pbm = pbm;
+
+	return cookie;
+}
+
+static void __init sabre_scan_bus(struct pci_controller_info *p)
+{
+	static int once;
+	struct pci_bus *sabre_bus, *pbus;
+	struct pci_pbm_info *pbm;
+	struct pcidev_cookie *cookie;
+	int sabres_scanned;
+
+	/* The APB bridge speaks to the Sabre host PCI bridge
+	 * at 66Mhz, but the front side of APB runs at 33Mhz
+	 * for both segments.
+	 */
+	p->pbm_A.is_66mhz_capable = 0;
+	p->pbm_B.is_66mhz_capable = 0;
+
+	/* This driver has not been verified to handle
+	 * multiple SABREs yet, so trap this.
+	 *
+	 * Also note that the SABRE host bridge is hardwired
+	 * to live at bus 0.
+	 */
+	if (once != 0) {
+		prom_printf("SABRE: Multiple controllers unsupported.\n");
+		prom_halt();
+	}
+	once++;
+
+	cookie = alloc_bridge_cookie(&p->pbm_A);
+
+	sabre_bus = pci_scan_bus(p->pci_first_busno,
+				 p->pci_ops,
+				 &p->pbm_A);
+	pci_fixup_host_bridge_self(sabre_bus);
+	sabre_bus->self->sysdata = cookie;
+
+	sabre_root_bus = sabre_bus;
+
+	apb_init(p, sabre_bus);
+
+	sabres_scanned = 0;
+
+	list_for_each_entry(pbus, &sabre_bus->children, node) {
+
+		if (pbus->number == p->pbm_A.pci_first_busno) {
+			pbm = &p->pbm_A;
+		} else if (pbus->number == p->pbm_B.pci_first_busno) {
+			pbm = &p->pbm_B;
+		} else
+			continue;
+
+		cookie = alloc_bridge_cookie(pbm);
+		pbus->self->sysdata = cookie;
+
+		sabres_scanned++;
+
+		pbus->sysdata = pbm;
+		pbm->pci_bus = pbus;
+		pci_fill_in_pbm_cookies(pbus, pbm, pbm->prom_node);
+		pci_record_assignments(pbm, pbus);
+		pci_assign_unassigned(pbm, pbus);
+		pci_fixup_irq(pbm, pbus);
+		pci_determine_66mhz_disposition(pbm, pbus);
+		pci_setup_busmastering(pbm, pbus);
+	}
+
+	if (!sabres_scanned) {
+		/* Hummingbird, no APBs. */
+		pbm = &p->pbm_A;
+		sabre_bus->sysdata = pbm;
+		pbm->pci_bus = sabre_bus;
+		pci_fill_in_pbm_cookies(sabre_bus, pbm, pbm->prom_node);
+		pci_record_assignments(pbm, sabre_bus);
+		pci_assign_unassigned(pbm, sabre_bus);
+		pci_fixup_irq(pbm, sabre_bus);
+		pci_determine_66mhz_disposition(pbm, sabre_bus);
+		pci_setup_busmastering(pbm, sabre_bus);
+	}
+
+	sabre_register_error_handlers(p);
+}
+
+static void __init sabre_iommu_init(struct pci_controller_info *p,
+				    int tsbsize, unsigned long dvma_offset,
+				    u32 dma_mask)
+{
+	struct pci_iommu *iommu = p->pbm_A.iommu;
+	unsigned long tsbbase, i, order;
+	u64 control;
+
+	/* Setup initial software IOMMU state. */
+	spin_lock_init(&iommu->lock);
+	iommu->iommu_cur_ctx = 0;
+
+	/* Register addresses. */
+	iommu->iommu_control  = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL;
+	iommu->iommu_tsbbase  = p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE;
+	iommu->iommu_flush    = p->pbm_A.controller_regs + SABRE_IOMMU_FLUSH;
+	iommu->write_complete_reg = p->pbm_A.controller_regs + SABRE_WRSYNC;
+	/* Sabre's IOMMU lacks ctx flushing. */
+	iommu->iommu_ctxflush = 0;
+                                        
+	/* Invalidate TLB Entries. */
+	control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL);
+	control |= SABRE_IOMMUCTRL_DENAB;
+	sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
+
+	for(i = 0; i < 16; i++) {
+		sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TAG + (i * 8UL), 0);
+		sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_DATA + (i * 8UL), 0);
+	}
+
+	/* Leave diag mode enabled for full-flushing done
+	 * in pci_iommu.c
+	 */
+
+	iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
+	if (!iommu->dummy_page) {
+		prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
+		prom_halt();
+	}
+	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
+	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
+
+	tsbbase = __get_free_pages(GFP_KERNEL, order = get_order(tsbsize * 1024 * 8));
+	if (!tsbbase) {
+		prom_printf("SABRE_IOMMU: Error, gfp(tsb) failed.\n");
+		prom_halt();
+	}
+	iommu->page_table = (iopte_t *)tsbbase;
+	iommu->page_table_map_base = dvma_offset;
+	iommu->dma_addr_mask = dma_mask;
+	pci_iommu_table_init(iommu, PAGE_SIZE << order);
+
+	sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE, __pa(tsbbase));
+
+	control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL);
+	control &= ~(SABRE_IOMMUCTRL_TSBSZ | SABRE_IOMMUCTRL_TBWSZ);
+	control |= SABRE_IOMMUCTRL_ENAB;
+	switch(tsbsize) {
+	case 64:
+		control |= SABRE_IOMMU_TSBSZ_64K;
+		iommu->page_table_sz_bits = 16;
+		break;
+	case 128:
+		control |= SABRE_IOMMU_TSBSZ_128K;
+		iommu->page_table_sz_bits = 17;
+		break;
+	default:
+		prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
+		prom_halt();
+		break;
+	}
+	sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
+
+	/* We start with no consistent mappings. */
+	iommu->lowest_consistent_map =
+		1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
+
+	for (i = 0; i < PBM_NCLUSTERS; i++) {
+		iommu->alloc_info[i].flush = 0;
+		iommu->alloc_info[i].next = 0;
+	}
+}
+
+static void __init pbm_register_toplevel_resources(struct pci_controller_info *p,
+						   struct pci_pbm_info *pbm)
+{
+	char *name = pbm->name;
+	unsigned long ibase = p->pbm_A.controller_regs + SABRE_IOSPACE;
+	unsigned long mbase = p->pbm_A.controller_regs + SABRE_MEMSPACE;
+	unsigned int devfn;
+	unsigned long first, last, i;
+	u8 *addr, map;
+
+	sprintf(name, "SABRE%d PBM%c",
+		p->index,
+		(pbm == &p->pbm_A ? 'A' : 'B'));
+	pbm->io_space.name = pbm->mem_space.name = name;
+
+	devfn = PCI_DEVFN(1, (pbm == &p->pbm_A) ? 0 : 1);
+	addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_IO_ADDRESS_MAP);
+	map = 0;
+	pci_config_read8(addr, &map);
+
+	first = 8;
+	last = 0;
+	for (i = 0; i < 8; i++) {
+		if ((map & (1 << i)) != 0) {
+			if (first > i)
+				first = i;
+			if (last < i)
+				last = i;
+		}
+	}
+	pbm->io_space.start = ibase + (first << 21UL);
+	pbm->io_space.end   = ibase + (last << 21UL) + ((1 << 21UL) - 1);
+	pbm->io_space.flags = IORESOURCE_IO;
+
+	addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_MEM_ADDRESS_MAP);
+	map = 0;
+	pci_config_read8(addr, &map);
+
+	first = 8;
+	last = 0;
+	for (i = 0; i < 8; i++) {
+		if ((map & (1 << i)) != 0) {
+			if (first > i)
+				first = i;
+			if (last < i)
+				last = i;
+		}
+	}
+	pbm->mem_space.start = mbase + (first << 29UL);
+	pbm->mem_space.end   = mbase + (last << 29UL) + ((1 << 29UL) - 1);
+	pbm->mem_space.flags = IORESOURCE_MEM;
+
+	if (request_resource(&ioport_resource, &pbm->io_space) < 0) {
+		prom_printf("Cannot register PBM-%c's IO space.\n",
+			    (pbm == &p->pbm_A ? 'A' : 'B'));
+		prom_halt();
+	}
+	if (request_resource(&iomem_resource, &pbm->mem_space) < 0) {
+		prom_printf("Cannot register PBM-%c's MEM space.\n",
+			    (pbm == &p->pbm_A ? 'A' : 'B'));
+		prom_halt();
+	}
+
+	/* Register legacy regions if this PBM covers that area. */
+	if (pbm->io_space.start == ibase &&
+	    pbm->mem_space.start == mbase)
+		pci_register_legacy_regions(&pbm->io_space,
+					    &pbm->mem_space);
+}
+
+static void __init sabre_pbm_init(struct pci_controller_info *p, int sabre_node, u32 dma_begin)
+{
+	struct pci_pbm_info *pbm;
+	char namebuf[128];
+	u32 busrange[2];
+	int node, simbas_found;
+
+	simbas_found = 0;
+	node = prom_getchild(sabre_node);
+	while ((node = prom_searchsiblings(node, "pci")) != 0) {
+		int err;
+
+		err = prom_getproperty(node, "model", namebuf, sizeof(namebuf));
+		if ((err <= 0) || strncmp(namebuf, "SUNW,simba", err))
+			goto next_pci;
+
+		err = prom_getproperty(node, "bus-range",
+				       (char *)&busrange[0], sizeof(busrange));
+		if (err == 0 || err == -1) {
+			prom_printf("APB: Error, cannot get PCI bus-range.\n");
+			prom_halt();
+		}
+
+		simbas_found++;
+		if (busrange[0] == 1)
+			pbm = &p->pbm_B;
+		else
+			pbm = &p->pbm_A;
+		pbm->chip_type = PBM_CHIP_TYPE_SABRE;
+		pbm->parent = p;
+		pbm->prom_node = node;
+		pbm->pci_first_slot = 1;
+		pbm->pci_first_busno = busrange[0];
+		pbm->pci_last_busno = busrange[1];
+
+		prom_getstring(node, "name", pbm->prom_name, sizeof(pbm->prom_name));
+		err = prom_getproperty(node, "ranges",
+				       (char *)pbm->pbm_ranges,
+				       sizeof(pbm->pbm_ranges));
+		if (err != -1)
+			pbm->num_pbm_ranges =
+				(err / sizeof(struct linux_prom_pci_ranges));
+		else
+			pbm->num_pbm_ranges = 0;
+
+		err = prom_getproperty(node, "interrupt-map",
+				       (char *)pbm->pbm_intmap,
+				       sizeof(pbm->pbm_intmap));
+		if (err != -1) {
+			pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
+			err = prom_getproperty(node, "interrupt-map-mask",
+					       (char *)&pbm->pbm_intmask,
+					       sizeof(pbm->pbm_intmask));
+			if (err == -1) {
+				prom_printf("APB: Fatal error, no interrupt-map-mask.\n");
+				prom_halt();
+			}
+		} else {
+			pbm->num_pbm_intmap = 0;
+			memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
+		}
+
+		pbm_register_toplevel_resources(p, pbm);
+
+	next_pci:
+		node = prom_getsibling(node);
+		if (!node)
+			break;
+	}
+	if (simbas_found == 0) {
+		int err;
+
+		/* No APBs underneath, probably this is a hummingbird
+		 * system.
+		 */
+		pbm = &p->pbm_A;
+		pbm->parent = p;
+		pbm->prom_node = sabre_node;
+		pbm->pci_first_busno = p->pci_first_busno;
+		pbm->pci_last_busno = p->pci_last_busno;
+
+		prom_getstring(sabre_node, "name", pbm->prom_name, sizeof(pbm->prom_name));
+		err = prom_getproperty(sabre_node, "ranges",
+				       (char *) pbm->pbm_ranges,
+				       sizeof(pbm->pbm_ranges));
+		if (err != -1)
+			pbm->num_pbm_ranges =
+				(err / sizeof(struct linux_prom_pci_ranges));
+		else
+			pbm->num_pbm_ranges = 0;
+
+		err = prom_getproperty(sabre_node, "interrupt-map",
+				       (char *) pbm->pbm_intmap,
+				       sizeof(pbm->pbm_intmap));
+
+		if (err != -1) {
+			pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
+			err = prom_getproperty(sabre_node, "interrupt-map-mask",
+					       (char *)&pbm->pbm_intmask,
+					       sizeof(pbm->pbm_intmask));
+			if (err == -1) {
+				prom_printf("Hummingbird: Fatal error, no interrupt-map-mask.\n");
+				prom_halt();
+			}
+		} else {
+			pbm->num_pbm_intmap = 0;
+			memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
+		}
+
+
+		sprintf(pbm->name, "SABRE%d PBM%c", p->index,
+			(pbm == &p->pbm_A ? 'A' : 'B'));
+		pbm->io_space.name = pbm->mem_space.name = pbm->name;
+
+		/* Hack up top-level resources. */
+		pbm->io_space.start = p->pbm_A.controller_regs + SABRE_IOSPACE;
+		pbm->io_space.end   = pbm->io_space.start + (1UL << 24) - 1UL;
+		pbm->io_space.flags = IORESOURCE_IO;
+
+		pbm->mem_space.start = p->pbm_A.controller_regs + SABRE_MEMSPACE;
+		pbm->mem_space.end   = pbm->mem_space.start + (unsigned long)dma_begin - 1UL;
+		pbm->mem_space.flags = IORESOURCE_MEM;
+
+		if (request_resource(&ioport_resource, &pbm->io_space) < 0) {
+			prom_printf("Cannot register Hummingbird's IO space.\n");
+			prom_halt();
+		}
+		if (request_resource(&iomem_resource, &pbm->mem_space) < 0) {
+			prom_printf("Cannot register Hummingbird's MEM space.\n");
+			prom_halt();
+		}
+
+		pci_register_legacy_regions(&pbm->io_space,
+					    &pbm->mem_space);
+	}
+}
+
+void __init sabre_init(int pnode, char *model_name)
+{
+	struct linux_prom64_registers pr_regs[2];
+	struct pci_controller_info *p;
+	struct pci_iommu *iommu;
+	int tsbsize, err;
+	u32 busrange[2];
+	u32 vdma[2];
+	u32 upa_portid, dma_mask;
+	u64 clear_irq;
+
+	hummingbird_p = 0;
+	if (!strcmp(model_name, "pci108e,a001"))
+		hummingbird_p = 1;
+	else if (!strcmp(model_name, "SUNW,sabre")) {
+		char compat[64];
+
+		if (prom_getproperty(pnode, "compatible",
+				     compat, sizeof(compat)) > 0 &&
+		    !strcmp(compat, "pci108e,a001")) {
+			hummingbird_p = 1;
+		} else {
+			int cpu_node;
+
+			/* Of course, Sun has to encode things a thousand
+			 * different ways, inconsistently.
+			 */
+			cpu_find_by_instance(0, &cpu_node, NULL);
+			if (prom_getproperty(cpu_node, "name",
+					     compat, sizeof(compat)) > 0 &&
+			    !strcmp(compat, "SUNW,UltraSPARC-IIe"))
+				hummingbird_p = 1;
+		}
+	}
+
+	p = kmalloc(sizeof(*p), GFP_ATOMIC);
+	if (!p) {
+		prom_printf("SABRE: Error, kmalloc(pci_controller_info) failed.\n");
+		prom_halt();
+	}
+	memset(p, 0, sizeof(*p));
+
+	iommu = kmalloc(sizeof(*iommu), GFP_ATOMIC);
+	if (!iommu) {
+		prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n");
+		prom_halt();
+	}
+	memset(iommu, 0, sizeof(*iommu));
+	p->pbm_A.iommu = p->pbm_B.iommu = iommu;
+
+	upa_portid = prom_getintdefault(pnode, "upa-portid", 0xff);
+
+	p->next = pci_controller_root;
+	pci_controller_root = p;
+
+	p->pbm_A.portid = upa_portid;
+	p->pbm_B.portid = upa_portid;
+	p->index = pci_num_controllers++;
+	p->pbms_same_domain = 1;
+	p->scan_bus = sabre_scan_bus;
+	p->irq_build = sabre_irq_build;
+	p->base_address_update = sabre_base_address_update;
+	p->resource_adjust = sabre_resource_adjust;
+	p->pci_ops = &sabre_ops;
+
+	/*
+	 * Map in SABRE register set and report the presence of this SABRE.
+	 */
+	err = prom_getproperty(pnode, "reg",
+			       (char *)&pr_regs[0], sizeof(pr_regs));
+	if(err == 0 || err == -1) {
+		prom_printf("SABRE: Error, cannot get U2P registers "
+			    "from PROM.\n");
+		prom_halt();
+	}
+
+	/*
+	 * First REG in property is base of entire SABRE register space.
+	 */
+	p->pbm_A.controller_regs = pr_regs[0].phys_addr;
+	p->pbm_B.controller_regs = pr_regs[0].phys_addr;
+	pci_dma_wsync = p->pbm_A.controller_regs + SABRE_WRSYNC;
+
+	printk("PCI: Found SABRE, main regs at %016lx, wsync at %016lx\n",
+	       p->pbm_A.controller_regs, pci_dma_wsync);
+
+	/* Clear interrupts */
+
+	/* PCI first */
+	for (clear_irq = SABRE_ICLR_A_SLOT0; clear_irq < SABRE_ICLR_B_SLOT0 + 0x80; clear_irq += 8)
+		sabre_write(p->pbm_A.controller_regs + clear_irq, 0x0UL);
+
+	/* Then OBIO */
+	for (clear_irq = SABRE_ICLR_SCSI; clear_irq < SABRE_ICLR_SCSI + 0x80; clear_irq += 8)
+		sabre_write(p->pbm_A.controller_regs + clear_irq, 0x0UL);
+
+	/* Error interrupts are enabled later after the bus scan. */
+	sabre_write(p->pbm_A.controller_regs + SABRE_PCICTRL,
+		    (SABRE_PCICTRL_MRLEN   | SABRE_PCICTRL_SERR |
+		     SABRE_PCICTRL_ARBPARK | SABRE_PCICTRL_AEN));
+
+	/* Now map in PCI config space for entire SABRE. */
+	p->pbm_A.config_space = p->pbm_B.config_space =
+		(p->pbm_A.controller_regs + SABRE_CONFIGSPACE);
+	printk("SABRE: Shared PCI config space at %016lx\n",
+	       p->pbm_A.config_space);
+
+	err = prom_getproperty(pnode, "virtual-dma",
+			       (char *)&vdma[0], sizeof(vdma));
+	if(err == 0 || err == -1) {
+		prom_printf("SABRE: Error, cannot get virtual-dma property "
+			    "from PROM.\n");
+		prom_halt();
+	}
+
+	dma_mask = vdma[0];
+	switch(vdma[1]) {
+		case 0x20000000:
+			dma_mask |= 0x1fffffff;
+			tsbsize = 64;
+			break;
+		case 0x40000000:
+			dma_mask |= 0x3fffffff;
+			tsbsize = 128;
+			break;
+
+		case 0x80000000:
+			dma_mask |= 0x7fffffff;
+			tsbsize = 128;
+			break;
+		default:
+			prom_printf("SABRE: strange virtual-dma size.\n");
+			prom_halt();
+	}
+
+	sabre_iommu_init(p, tsbsize, vdma[0], dma_mask);
+
+	printk("SABRE: DVMA at %08x [%08x]\n", vdma[0], vdma[1]);
+
+	err = prom_getproperty(pnode, "bus-range",
+				       (char *)&busrange[0], sizeof(busrange));
+	if(err == 0 || err == -1) {
+		prom_printf("SABRE: Error, cannot get PCI bus-range "
+			    " from PROM.\n");
+		prom_halt();
+	}
+
+	p->pci_first_busno = busrange[0];
+	p->pci_last_busno = busrange[1];
+
+	/*
+	 * Look for APB underneath.
+	 */
+	sabre_pbm_init(p, pnode, vdma[0]);
+}
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
new file mode 100644
index 0000000..e93fcad
--- /dev/null
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -0,0 +1,2187 @@
+/* $Id: pci_schizo.c,v 1.24 2002/01/23 11:27:32 davem Exp $
+ * pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support.
+ *
+ * Copyright (C) 2001, 2002, 2003 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <asm/pbm.h>
+#include <asm/iommu.h>
+#include <asm/irq.h>
+#include <asm/upa.h>
+
+#include "pci_impl.h"
+#include "iommu_common.h"
+
+/* All SCHIZO registers are 64-bits.  The following accessor
+ * routines are how they are accessed.  The REG parameter
+ * is a physical address.
+ */
+#define schizo_read(__reg) \
+({	u64 __ret; \
+	__asm__ __volatile__("ldxa [%1] %2, %0" \
+			     : "=r" (__ret) \
+			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
+			     : "memory"); \
+	__ret; \
+})
+#define schizo_write(__reg, __val) \
+	__asm__ __volatile__("stxa %0, [%1] %2" \
+			     : /* no outputs */ \
+			     : "r" (__val), "r" (__reg), \
+			       "i" (ASI_PHYS_BYPASS_EC_E) \
+			     : "memory")
+
+/* This is a convention that at least Excalibur and Merlin
+ * follow.  I suppose the SCHIZO used in Starcat and friends
+ * will do similar.
+ *
+ * The only way I could see this changing is if the newlink
+ * block requires more space in Schizo's address space than
+ * they predicted, thus requiring an address space reorg when
+ * the newer Schizo is taped out.
+ */
+
+/* Streaming buffer control register. */
+#define SCHIZO_STRBUF_CTRL_LPTR    0x00000000000000f0UL /* LRU Lock Pointer */
+#define SCHIZO_STRBUF_CTRL_LENAB   0x0000000000000008UL /* LRU Lock Enable */
+#define SCHIZO_STRBUF_CTRL_RRDIS   0x0000000000000004UL /* Rerun Disable */
+#define SCHIZO_STRBUF_CTRL_DENAB   0x0000000000000002UL /* Diagnostic Mode Enable */
+#define SCHIZO_STRBUF_CTRL_ENAB    0x0000000000000001UL /* Streaming Buffer Enable */
+
+/* IOMMU control register. */
+#define SCHIZO_IOMMU_CTRL_RESV     0xfffffffff9000000UL /* Reserved                      */
+#define SCHIZO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status      */
+#define SCHIZO_IOMMU_CTRL_XLTEERR  0x0000000001000000UL /* Translation Error encountered */
+#define SCHIZO_IOMMU_CTRL_LCKEN    0x0000000000800000UL /* Enable translation locking    */
+#define SCHIZO_IOMMU_CTRL_LCKPTR   0x0000000000780000UL /* Translation lock pointer      */
+#define SCHIZO_IOMMU_CTRL_TSBSZ    0x0000000000070000UL /* TSB Size                      */
+#define SCHIZO_IOMMU_TSBSZ_1K      0x0000000000000000UL /* TSB Table 1024 8-byte entries */
+#define SCHIZO_IOMMU_TSBSZ_2K      0x0000000000010000UL /* TSB Table 2048 8-byte entries */
+#define SCHIZO_IOMMU_TSBSZ_4K      0x0000000000020000UL /* TSB Table 4096 8-byte entries */
+#define SCHIZO_IOMMU_TSBSZ_8K      0x0000000000030000UL /* TSB Table 8192 8-byte entries */
+#define SCHIZO_IOMMU_TSBSZ_16K     0x0000000000040000UL /* TSB Table 16k 8-byte entries  */
+#define SCHIZO_IOMMU_TSBSZ_32K     0x0000000000050000UL /* TSB Table 32k 8-byte entries  */
+#define SCHIZO_IOMMU_TSBSZ_64K     0x0000000000060000UL /* TSB Table 64k 8-byte entries  */
+#define SCHIZO_IOMMU_TSBSZ_128K    0x0000000000070000UL /* TSB Table 128k 8-byte entries */
+#define SCHIZO_IOMMU_CTRL_RESV2    0x000000000000fff8UL /* Reserved                      */
+#define SCHIZO_IOMMU_CTRL_TBWSZ    0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */
+#define SCHIZO_IOMMU_CTRL_DENAB    0x0000000000000002UL /* Diagnostic mode enable        */
+#define SCHIZO_IOMMU_CTRL_ENAB     0x0000000000000001UL /* IOMMU Enable                  */
+
+/* Schizo config space address format is nearly identical to
+ * that of PSYCHO:
+ *
+ *  32             24 23 16 15    11 10       8 7   2  1 0
+ * ---------------------------------------------------------
+ * |0 0 0 0 0 0 0 0 0| bus | device | function | reg | 0 0 |
+ * ---------------------------------------------------------
+ */
+#define SCHIZO_CONFIG_BASE(PBM)	((PBM)->config_space)
+#define SCHIZO_CONFIG_ENCODE(BUS, DEVFN, REG)	\
+	(((unsigned long)(BUS)   << 16) |	\
+	 ((unsigned long)(DEVFN) << 8)  |	\
+	 ((unsigned long)(REG)))
+
+static void *schizo_pci_config_mkaddr(struct pci_pbm_info *pbm,
+				      unsigned char bus,
+				      unsigned int devfn,
+				      int where)
+{
+	if (!pbm)
+		return NULL;
+	bus -= pbm->pci_first_busno;
+	return (void *)
+		(SCHIZO_CONFIG_BASE(pbm) |
+		 SCHIZO_CONFIG_ENCODE(bus, devfn, where));
+}
+
+/* Just make sure the bus number is in range.  */
+static int schizo_out_of_range(struct pci_pbm_info *pbm,
+			       unsigned char bus,
+			       unsigned char devfn)
+{
+	if (bus < pbm->pci_first_busno ||
+	    bus > pbm->pci_last_busno)
+		return 1;
+	return 0;
+}
+
+/* SCHIZO PCI configuration space accessors. */
+
+static int schizo_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
+			       int where, int size, u32 *value)
+{
+	struct pci_pbm_info *pbm = bus_dev->sysdata;
+	unsigned char bus = bus_dev->number;
+	u32 *addr;
+	u16 tmp16;
+	u8 tmp8;
+
+	switch (size) {
+	case 1:
+		*value = 0xff;
+		break;
+	case 2:
+		*value = 0xffff;
+		break;
+	case 4:
+		*value = 0xffffffff;
+		break;
+	}
+
+	addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where);
+	if (!addr)
+		return PCIBIOS_SUCCESSFUL;
+
+	if (schizo_out_of_range(pbm, bus, devfn))
+		return PCIBIOS_SUCCESSFUL;
+	switch (size) {
+	case 1:
+		pci_config_read8((u8 *)addr, &tmp8);
+		*value = tmp8;
+		break;
+
+	case 2:
+		if (where & 0x01) {
+			printk("pci_read_config_word: misaligned reg [%x]\n",
+			       where);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		pci_config_read16((u16 *)addr, &tmp16);
+		*value = tmp16;
+		break;
+
+	case 4:
+		if (where & 0x03) {
+			printk("pci_read_config_dword: misaligned reg [%x]\n",
+			       where);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		pci_config_read32(addr, value);
+		break;
+	}
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int schizo_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
+				int where, int size, u32 value)
+{
+	struct pci_pbm_info *pbm = bus_dev->sysdata;
+	unsigned char bus = bus_dev->number;
+	u32 *addr;
+
+	addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where);
+	if (!addr)
+		return PCIBIOS_SUCCESSFUL;
+
+	if (schizo_out_of_range(pbm, bus, devfn))
+		return PCIBIOS_SUCCESSFUL;
+
+	switch (size) {
+	case 1:
+		pci_config_write8((u8 *)addr, value);
+		break;
+
+	case 2:
+		if (where & 0x01) {
+			printk("pci_write_config_word: misaligned reg [%x]\n",
+			       where);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		pci_config_write16((u16 *)addr, value);
+		break;
+
+	case 4:
+		if (where & 0x03) {
+			printk("pci_write_config_dword: misaligned reg [%x]\n",
+			       where);
+			return PCIBIOS_SUCCESSFUL;
+		}
+
+		pci_config_write32(addr, value);
+	}
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops schizo_ops = {
+	.read =		schizo_read_pci_cfg,
+	.write =	schizo_write_pci_cfg,
+};
+
+/* SCHIZO interrupt mapping support.  Unlike Psycho, for this controller the
+ * imap/iclr registers are per-PBM.
+ */
+#define SCHIZO_IMAP_BASE	0x1000UL
+#define SCHIZO_ICLR_BASE	0x1400UL
+
+static unsigned long schizo_imap_offset(unsigned long ino)
+{
+	return SCHIZO_IMAP_BASE + (ino * 8UL);
+}
+
+static unsigned long schizo_iclr_offset(unsigned long ino)
+{
+	return SCHIZO_ICLR_BASE + (ino * 8UL);
+}
+
+/* PCI SCHIZO INO number to Sparc PIL level.  This table only matters for
+ * INOs which will not have an associated PCI device struct, ie. onboard
+ * EBUS devices and PCI controller internal error interrupts.
+ */
+static unsigned char schizo_pil_table[] = {
+/*0x00*/0, 0, 0, 0,	/* PCI slot 0  Int A, B, C, D	*/
+/*0x04*/0, 0, 0, 0,	/* PCI slot 1  Int A, B, C, D	*/
+/*0x08*/0, 0, 0, 0,	/* PCI slot 2  Int A, B, C, D	*/
+/*0x0c*/0, 0, 0, 0,	/* PCI slot 3  Int A, B, C, D	*/
+/*0x10*/0, 0, 0, 0,	/* PCI slot 4  Int A, B, C, D	*/
+/*0x14*/0, 0, 0, 0,	/* PCI slot 5  Int A, B, C, D	*/
+/*0x18*/4,		/* SCSI				*/
+/*0x19*/4,		/* second SCSI			*/
+/*0x1a*/0,		/* UNKNOWN			*/
+/*0x1b*/0,		/* UNKNOWN			*/
+/*0x1c*/8,		/* Parallel			*/
+/*0x1d*/5,		/* Ethernet			*/
+/*0x1e*/8,		/* Firewire-1394		*/
+/*0x1f*/9,		/* USB				*/
+/*0x20*/13,		/* Audio Record			*/
+/*0x21*/14,		/* Audio Playback		*/
+/*0x22*/12,		/* Serial			*/
+/*0x23*/4,		/* EBUS I2C 			*/
+/*0x24*/10,		/* RTC Clock			*/
+/*0x25*/11,		/* Floppy			*/
+/*0x26*/0,		/* UNKNOWN			*/
+/*0x27*/0,		/* UNKNOWN			*/
+/*0x28*/0,		/* UNKNOWN			*/
+/*0x29*/0,		/* UNKNOWN			*/
+/*0x2a*/10,		/* UPA 1			*/
+/*0x2b*/10,		/* UPA 2			*/
+/*0x2c*/0,		/* UNKNOWN			*/
+/*0x2d*/0,		/* UNKNOWN			*/
+/*0x2e*/0,		/* UNKNOWN			*/
+/*0x2f*/0,		/* UNKNOWN			*/
+/*0x30*/15,		/* Uncorrectable ECC		*/
+/*0x31*/15,		/* Correctable ECC		*/
+/*0x32*/15,		/* PCI Bus A Error		*/
+/*0x33*/15,		/* PCI Bus B Error		*/
+/*0x34*/15,		/* Safari Bus Error		*/
+/*0x35*/0,		/* Reserved			*/
+/*0x36*/0,		/* Reserved			*/
+/*0x37*/0,		/* Reserved			*/
+/*0x38*/0,		/* Reserved for NewLink		*/
+/*0x39*/0,		/* Reserved for NewLink		*/
+/*0x3a*/0,		/* Reserved for NewLink		*/
+/*0x3b*/0,		/* Reserved for NewLink		*/
+/*0x3c*/0,		/* Reserved for NewLink		*/
+/*0x3d*/0,		/* Reserved for NewLink		*/
+/*0x3e*/0,		/* Reserved for NewLink		*/
+/*0x3f*/0,		/* Reserved for NewLink		*/
+};
+
+static int __init schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
+{
+	int ret;
+
+	if (pdev &&
+	    pdev->vendor == PCI_VENDOR_ID_SUN &&
+	    pdev->device == PCI_DEVICE_ID_SUN_RIO_USB)
+		return 9;
+
+	ret = schizo_pil_table[ino];
+	if (ret == 0 && pdev == NULL) {
+		ret = 4;
+	} else if (ret == 0) {
+		switch ((pdev->class >> 16) & 0xff) {
+		case PCI_BASE_CLASS_STORAGE:
+			ret = 4;
+			break;
+
+		case PCI_BASE_CLASS_NETWORK:
+			ret = 6;
+			break;
+
+		case PCI_BASE_CLASS_DISPLAY:
+			ret = 9;
+			break;
+
+		case PCI_BASE_CLASS_MULTIMEDIA:
+		case PCI_BASE_CLASS_MEMORY:
+		case PCI_BASE_CLASS_BRIDGE:
+		case PCI_BASE_CLASS_SERIAL:
+			ret = 10;
+			break;
+
+		default:
+			ret = 4;
+			break;
+		};
+	}
+
+	return ret;
+}
+
+static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
+				     struct pci_dev *pdev,
+				     unsigned int ino)
+{
+	struct ino_bucket *bucket;
+	unsigned long imap, iclr;
+	unsigned long imap_off, iclr_off;
+	int pil, ign_fixup;
+
+	ino &= PCI_IRQ_INO;
+	imap_off = schizo_imap_offset(ino);
+
+	/* Now build the IRQ bucket. */
+	pil = schizo_ino_to_pil(pdev, ino);
+
+	if (PIL_RESERVED(pil))
+		BUG();
+
+	imap = pbm->pbm_regs + imap_off;
+	imap += 4;
+
+	iclr_off = schizo_iclr_offset(ino);
+	iclr = pbm->pbm_regs + iclr_off;
+	iclr += 4;
+
+	/* On Schizo, no inofixup occurs.  This is because each
+	 * INO has it's own IMAP register.  On Psycho and Sabre
+	 * there is only one IMAP register for each PCI slot even
+	 * though four different INOs can be generated by each
+	 * PCI slot.
+	 *
+	 * But, for JBUS variants (essentially, Tomatillo), we have
+	 * to fixup the lowest bit of the interrupt group number.
+	 */
+	ign_fixup = 0;
+	if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
+		if (pbm->portid & 1)
+			ign_fixup = (1 << 6);
+	}
+
+	bucket = __bucket(build_irq(pil, ign_fixup, iclr, imap));
+	bucket->flags |= IBF_PCI;
+
+	return __irq(bucket);
+}
+
+/* SCHIZO error handling support. */
+enum schizo_error_type {
+	UE_ERR, CE_ERR, PCI_ERR, SAFARI_ERR
+};
+
+static DEFINE_SPINLOCK(stc_buf_lock);
+static unsigned long stc_error_buf[128];
+static unsigned long stc_tag_buf[16];
+static unsigned long stc_line_buf[16];
+
+#define SCHIZO_UE_INO		0x30 /* Uncorrectable ECC error */
+#define SCHIZO_CE_INO		0x31 /* Correctable ECC error */
+#define SCHIZO_PCIERR_A_INO	0x32 /* PBM A PCI bus error */
+#define SCHIZO_PCIERR_B_INO	0x33 /* PBM B PCI bus error */
+#define SCHIZO_SERR_INO		0x34 /* Safari interface error */
+
+struct pci_pbm_info *pbm_for_ino(struct pci_controller_info *p, u32 ino)
+{
+	ino &= IMAP_INO;
+	if (p->pbm_A.ino_bitmap & (1UL << ino))
+		return &p->pbm_A;
+	if (p->pbm_B.ino_bitmap & (1UL << ino))
+		return &p->pbm_B;
+
+	printk("PCI%d: No ino_bitmap entry for ino[%x], bitmaps "
+	       "PBM_A[%016lx] PBM_B[%016lx]",
+	       p->index, ino,
+	       p->pbm_A.ino_bitmap,
+	       p->pbm_B.ino_bitmap);
+	printk("PCI%d: Using PBM_A, report this problem immediately.\n",
+	       p->index);
+
+	return &p->pbm_A;
+}
+
+static void schizo_clear_other_err_intr(struct pci_controller_info *p, int irq)
+{
+	struct pci_pbm_info *pbm;
+	struct ino_bucket *bucket;
+	unsigned long iclr;
+
+	/* Do not clear the interrupt for the other PCI bus.
+	 *
+	 * This "ACK both PBM IRQs" only needs to be performed
+	 * for chip-wide error interrupts.
+	 */
+	if ((irq & IMAP_INO) == SCHIZO_PCIERR_A_INO ||
+	    (irq & IMAP_INO) == SCHIZO_PCIERR_B_INO)
+		return;
+
+	pbm = pbm_for_ino(p, irq);
+	if (pbm == &p->pbm_A)
+		pbm = &p->pbm_B;
+	else
+		pbm = &p->pbm_A;
+
+	irq = schizo_irq_build(pbm, NULL,
+			       (pbm->portid << 6) | (irq & IMAP_INO));
+	bucket = __bucket(irq);
+	iclr = bucket->iclr;
+
+	upa_writel(ICLR_IDLE, iclr);
+}
+
+#define SCHIZO_STC_ERR	0xb800UL /* --> 0xba00 */
+#define SCHIZO_STC_TAG	0xba00UL /* --> 0xba80 */
+#define SCHIZO_STC_LINE	0xbb00UL /* --> 0xbb80 */
+
+#define SCHIZO_STCERR_WRITE	0x2UL
+#define SCHIZO_STCERR_READ	0x1UL
+
+#define SCHIZO_STCTAG_PPN	0x3fffffff00000000UL
+#define SCHIZO_STCTAG_VPN	0x00000000ffffe000UL
+#define SCHIZO_STCTAG_VALID	0x8000000000000000UL
+#define SCHIZO_STCTAG_READ	0x4000000000000000UL
+
+#define SCHIZO_STCLINE_LINDX	0x0000000007800000UL
+#define SCHIZO_STCLINE_SPTR	0x000000000007e000UL
+#define SCHIZO_STCLINE_LADDR	0x0000000000001fc0UL
+#define SCHIZO_STCLINE_EPTR	0x000000000000003fUL
+#define SCHIZO_STCLINE_VALID	0x0000000000600000UL
+#define SCHIZO_STCLINE_FOFN	0x0000000000180000UL
+
+static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm,
+					 enum schizo_error_type type)
+{
+	struct pci_strbuf *strbuf = &pbm->stc;
+	unsigned long regbase = pbm->pbm_regs;
+	unsigned long err_base, tag_base, line_base;
+	u64 control;
+	int i;
+
+	err_base = regbase + SCHIZO_STC_ERR;
+	tag_base = regbase + SCHIZO_STC_TAG;
+	line_base = regbase + SCHIZO_STC_LINE;
+
+	spin_lock(&stc_buf_lock);
+
+	/* This is __REALLY__ dangerous.  When we put the
+	 * streaming buffer into diagnostic mode to probe
+	 * it's tags and error status, we _must_ clear all
+	 * of the line tag valid bits before re-enabling
+	 * the streaming buffer.  If any dirty data lives
+	 * in the STC when we do this, we will end up
+	 * invalidating it before it has a chance to reach
+	 * main memory.
+	 */
+	control = schizo_read(strbuf->strbuf_control);
+	schizo_write(strbuf->strbuf_control,
+		     (control | SCHIZO_STRBUF_CTRL_DENAB));
+	for (i = 0; i < 128; i++) {
+		unsigned long val;
+
+		val = schizo_read(err_base + (i * 8UL));
+		schizo_write(err_base + (i * 8UL), 0UL);
+		stc_error_buf[i] = val;
+	}
+	for (i = 0; i < 16; i++) {
+		stc_tag_buf[i] = schizo_read(tag_base + (i * 8UL));
+		stc_line_buf[i] = schizo_read(line_base + (i * 8UL));
+		schizo_write(tag_base + (i * 8UL), 0UL);
+		schizo_write(line_base + (i * 8UL), 0UL);
+	}
+
+	/* OK, state is logged, exit diagnostic mode. */
+	schizo_write(strbuf->strbuf_control, control);
+
+	for (i = 0; i < 16; i++) {
+		int j, saw_error, first, last;
+
+		saw_error = 0;
+		first = i * 8;
+		last = first + 8;
+		for (j = first; j < last; j++) {
+			unsigned long errval = stc_error_buf[j];
+			if (errval != 0) {
+				saw_error++;
+				printk("%s: STC_ERR(%d)[wr(%d)rd(%d)]\n",
+				       pbm->name,
+				       j,
+				       (errval & SCHIZO_STCERR_WRITE) ? 1 : 0,
+				       (errval & SCHIZO_STCERR_READ) ? 1 : 0);
+			}
+		}
+		if (saw_error != 0) {
+			unsigned long tagval = stc_tag_buf[i];
+			unsigned long lineval = stc_line_buf[i];
+			printk("%s: STC_TAG(%d)[PA(%016lx)VA(%08lx)V(%d)R(%d)]\n",
+			       pbm->name,
+			       i,
+			       ((tagval & SCHIZO_STCTAG_PPN) >> 19UL),
+			       (tagval & SCHIZO_STCTAG_VPN),
+			       ((tagval & SCHIZO_STCTAG_VALID) ? 1 : 0),
+			       ((tagval & SCHIZO_STCTAG_READ) ? 1 : 0));
+
+			/* XXX Should spit out per-bank error information... -DaveM */
+			printk("%s: STC_LINE(%d)[LIDX(%lx)SP(%lx)LADDR(%lx)EP(%lx)"
+			       "V(%d)FOFN(%d)]\n",
+			       pbm->name,
+			       i,
+			       ((lineval & SCHIZO_STCLINE_LINDX) >> 23UL),
+			       ((lineval & SCHIZO_STCLINE_SPTR) >> 13UL),
+			       ((lineval & SCHIZO_STCLINE_LADDR) >> 6UL),
+			       ((lineval & SCHIZO_STCLINE_EPTR) >> 0UL),
+			       ((lineval & SCHIZO_STCLINE_VALID) ? 1 : 0),
+			       ((lineval & SCHIZO_STCLINE_FOFN) ? 1 : 0));
+		}
+	}
+
+	spin_unlock(&stc_buf_lock);
+}
+
+/* IOMMU is per-PBM in Schizo, so interrogate both for anonymous
+ * controller level errors.
+ */
+
+#define SCHIZO_IOMMU_TAG	0xa580UL
+#define SCHIZO_IOMMU_DATA	0xa600UL
+
+#define SCHIZO_IOMMU_TAG_CTXT	0x0000001ffe000000UL
+#define SCHIZO_IOMMU_TAG_ERRSTS	0x0000000001800000UL
+#define SCHIZO_IOMMU_TAG_ERR	0x0000000000400000UL
+#define SCHIZO_IOMMU_TAG_WRITE	0x0000000000200000UL
+#define SCHIZO_IOMMU_TAG_STREAM	0x0000000000100000UL
+#define SCHIZO_IOMMU_TAG_SIZE	0x0000000000080000UL
+#define SCHIZO_IOMMU_TAG_VPAGE	0x000000000007ffffUL
+
+#define SCHIZO_IOMMU_DATA_VALID	0x0000000100000000UL
+#define SCHIZO_IOMMU_DATA_CACHE	0x0000000040000000UL
+#define SCHIZO_IOMMU_DATA_PPAGE	0x000000003fffffffUL
+
+static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
+					 enum schizo_error_type type)
+{
+	struct pci_iommu *iommu = pbm->iommu;
+	unsigned long iommu_tag[16];
+	unsigned long iommu_data[16];
+	unsigned long flags;
+	u64 control;
+	int i;
+
+	spin_lock_irqsave(&iommu->lock, flags);
+	control = schizo_read(iommu->iommu_control);
+	if (control & SCHIZO_IOMMU_CTRL_XLTEERR) {
+		unsigned long base;
+		char *type_string;
+
+		/* Clear the error encountered bit. */
+		control &= ~SCHIZO_IOMMU_CTRL_XLTEERR;
+		schizo_write(iommu->iommu_control, control);
+
+		switch((control & SCHIZO_IOMMU_CTRL_XLTESTAT) >> 25UL) {
+		case 0:
+			type_string = "Protection Error";
+			break;
+		case 1:
+			type_string = "Invalid Error";
+			break;
+		case 2:
+			type_string = "TimeOut Error";
+			break;
+		case 3:
+		default:
+			type_string = "ECC Error";
+			break;
+		};
+		printk("%s: IOMMU Error, type[%s]\n",
+		       pbm->name, type_string);
+
+		/* Put the IOMMU into diagnostic mode and probe
+		 * it's TLB for entries with error status.
+		 *
+		 * It is very possible for another DVMA to occur
+		 * while we do this probe, and corrupt the system
+		 * further.  But we are so screwed at this point
+		 * that we are likely to crash hard anyways, so
+		 * get as much diagnostic information to the
+		 * console as we can.
+		 */
+		schizo_write(iommu->iommu_control,
+			     control | SCHIZO_IOMMU_CTRL_DENAB);
+
+		base = pbm->pbm_regs;
+
+		for (i = 0; i < 16; i++) {
+			iommu_tag[i] =
+				schizo_read(base + SCHIZO_IOMMU_TAG + (i * 8UL));
+			iommu_data[i] =
+				schizo_read(base + SCHIZO_IOMMU_DATA + (i * 8UL));
+
+			/* Now clear out the entry. */
+			schizo_write(base + SCHIZO_IOMMU_TAG + (i * 8UL), 0);
+			schizo_write(base + SCHIZO_IOMMU_DATA + (i * 8UL), 0);
+		}
+
+		/* Leave diagnostic mode. */
+		schizo_write(iommu->iommu_control, control);
+
+		for (i = 0; i < 16; i++) {
+			unsigned long tag, data;
+
+			tag = iommu_tag[i];
+			if (!(tag & SCHIZO_IOMMU_TAG_ERR))
+				continue;
+
+			data = iommu_data[i];
+			switch((tag & SCHIZO_IOMMU_TAG_ERRSTS) >> 23UL) {
+			case 0:
+				type_string = "Protection Error";
+				break;
+			case 1:
+				type_string = "Invalid Error";
+				break;
+			case 2:
+				type_string = "TimeOut Error";
+				break;
+			case 3:
+			default:
+				type_string = "ECC Error";
+				break;
+			};
+			printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) "
+			       "sz(%dK) vpg(%08lx)]\n",
+			       pbm->name, i, type_string,
+			       (int)((tag & SCHIZO_IOMMU_TAG_CTXT) >> 25UL),
+			       ((tag & SCHIZO_IOMMU_TAG_WRITE) ? 1 : 0),
+			       ((tag & SCHIZO_IOMMU_TAG_STREAM) ? 1 : 0),
+			       ((tag & SCHIZO_IOMMU_TAG_SIZE) ? 64 : 8),
+			       (tag & SCHIZO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT);
+			printk("%s: IOMMU DATA(%d)[valid(%d) cache(%d) ppg(%016lx)]\n",
+			       pbm->name, i,
+			       ((data & SCHIZO_IOMMU_DATA_VALID) ? 1 : 0),
+			       ((data & SCHIZO_IOMMU_DATA_CACHE) ? 1 : 0),
+			       (data & SCHIZO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT);
+		}
+	}
+	if (pbm->stc.strbuf_enabled)
+		__schizo_check_stc_error_pbm(pbm, type);
+	spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static void schizo_check_iommu_error(struct pci_controller_info *p,
+				     enum schizo_error_type type)
+{
+	schizo_check_iommu_error_pbm(&p->pbm_A, type);
+	schizo_check_iommu_error_pbm(&p->pbm_B, type);
+}
+
+/* Uncorrectable ECC error status gathering. */
+#define SCHIZO_UE_AFSR	0x10030UL
+#define SCHIZO_UE_AFAR	0x10038UL
+
+#define SCHIZO_UEAFSR_PPIO	0x8000000000000000UL /* Safari */
+#define SCHIZO_UEAFSR_PDRD	0x4000000000000000UL /* Safari/Tomatillo */
+#define SCHIZO_UEAFSR_PDWR	0x2000000000000000UL /* Safari */
+#define SCHIZO_UEAFSR_SPIO	0x1000000000000000UL /* Safari */
+#define SCHIZO_UEAFSR_SDMA	0x0800000000000000UL /* Safari/Tomatillo */
+#define SCHIZO_UEAFSR_ERRPNDG	0x0300000000000000UL /* Safari */
+#define SCHIZO_UEAFSR_BMSK	0x000003ff00000000UL /* Safari */
+#define SCHIZO_UEAFSR_QOFF	0x00000000c0000000UL /* Safari/Tomatillo */
+#define SCHIZO_UEAFSR_AID	0x000000001f000000UL /* Safari/Tomatillo */
+#define SCHIZO_UEAFSR_PARTIAL	0x0000000000800000UL /* Safari */
+#define SCHIZO_UEAFSR_OWNEDIN	0x0000000000400000UL /* Safari */
+#define SCHIZO_UEAFSR_MTAGSYND	0x00000000000f0000UL /* Safari */
+#define SCHIZO_UEAFSR_MTAG	0x000000000000e000UL /* Safari */
+#define SCHIZO_UEAFSR_ECCSYND	0x00000000000001ffUL /* Safari */
+
+static irqreturn_t schizo_ue_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct pci_controller_info *p = dev_id;
+	unsigned long afsr_reg = p->pbm_B.controller_regs + SCHIZO_UE_AFSR;
+	unsigned long afar_reg = p->pbm_B.controller_regs + SCHIZO_UE_AFAR;
+	unsigned long afsr, afar, error_bits;
+	int reported, limit;
+
+	/* Latch uncorrectable error status. */
+	afar = schizo_read(afar_reg);
+
+	/* If either of the error pending bits are set in the
+	 * AFSR, the error status is being actively updated by
+	 * the hardware and we must re-read to get a clean value.
+	 */
+	limit = 1000;
+	do {
+		afsr = schizo_read(afsr_reg);
+	} while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit);
+
+	/* Clear the primary/secondary error status bits. */
+	error_bits = afsr &
+		(SCHIZO_UEAFSR_PPIO | SCHIZO_UEAFSR_PDRD | SCHIZO_UEAFSR_PDWR |
+		 SCHIZO_UEAFSR_SPIO | SCHIZO_UEAFSR_SDMA);
+	if (!error_bits)
+		return IRQ_NONE;
+	schizo_write(afsr_reg, error_bits);
+
+	/* Log the error. */
+	printk("PCI%d: Uncorrectable Error, primary error type[%s]\n",
+	       p->index,
+	       (((error_bits & SCHIZO_UEAFSR_PPIO) ?
+		 "PIO" :
+		 ((error_bits & SCHIZO_UEAFSR_PDRD) ?
+		  "DMA Read" :
+		  ((error_bits & SCHIZO_UEAFSR_PDWR) ?
+		   "DMA Write" : "???")))));
+	printk("PCI%d: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n",
+	       p->index,
+	       (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL,
+	       (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL,
+	       (afsr & SCHIZO_UEAFSR_AID) >> 24UL);
+	printk("PCI%d: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n",
+	       p->index,
+	       (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0,
+	       (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0,
+	       (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL,
+	       (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL,
+	       (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL);
+	printk("PCI%d: UE AFAR [%016lx]\n", p->index, afar);
+	printk("PCI%d: UE Secondary errors [", p->index);
+	reported = 0;
+	if (afsr & SCHIZO_UEAFSR_SPIO) {
+		reported++;
+		printk("(PIO)");
+	}
+	if (afsr & SCHIZO_UEAFSR_SDMA) {
+		reported++;
+		printk("(DMA)");
+	}
+	if (!reported)
+		printk("(none)");
+	printk("]\n");
+
+	/* Interrogate IOMMU for error status. */
+	schizo_check_iommu_error(p, UE_ERR);
+
+	schizo_clear_other_err_intr(p, irq);
+
+	return IRQ_HANDLED;
+}
+
+#define SCHIZO_CE_AFSR	0x10040UL
+#define SCHIZO_CE_AFAR	0x10048UL
+
+#define SCHIZO_CEAFSR_PPIO	0x8000000000000000UL
+#define SCHIZO_CEAFSR_PDRD	0x4000000000000000UL
+#define SCHIZO_CEAFSR_PDWR	0x2000000000000000UL
+#define SCHIZO_CEAFSR_SPIO	0x1000000000000000UL
+#define SCHIZO_CEAFSR_SDMA	0x0800000000000000UL
+#define SCHIZO_CEAFSR_ERRPNDG	0x0300000000000000UL
+#define SCHIZO_CEAFSR_BMSK	0x000003ff00000000UL
+#define SCHIZO_CEAFSR_QOFF	0x00000000c0000000UL
+#define SCHIZO_CEAFSR_AID	0x000000001f000000UL
+#define SCHIZO_CEAFSR_PARTIAL	0x0000000000800000UL
+#define SCHIZO_CEAFSR_OWNEDIN	0x0000000000400000UL
+#define SCHIZO_CEAFSR_MTAGSYND	0x00000000000f0000UL
+#define SCHIZO_CEAFSR_MTAG	0x000000000000e000UL
+#define SCHIZO_CEAFSR_ECCSYND	0x00000000000001ffUL
+
+static irqreturn_t schizo_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct pci_controller_info *p = dev_id;
+	unsigned long afsr_reg = p->pbm_B.controller_regs + SCHIZO_CE_AFSR;
+	unsigned long afar_reg = p->pbm_B.controller_regs + SCHIZO_CE_AFAR;
+	unsigned long afsr, afar, error_bits;
+	int reported, limit;
+
+	/* Latch error status. */
+	afar = schizo_read(afar_reg);
+
+	/* If either of the error pending bits are set in the
+	 * AFSR, the error status is being actively updated by
+	 * the hardware and we must re-read to get a clean value.
+	 */
+	limit = 1000;
+	do {
+		afsr = schizo_read(afsr_reg);
+	} while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit);
+
+	/* Clear primary/secondary error status bits. */
+	error_bits = afsr &
+		(SCHIZO_CEAFSR_PPIO | SCHIZO_CEAFSR_PDRD | SCHIZO_CEAFSR_PDWR |
+		 SCHIZO_CEAFSR_SPIO | SCHIZO_CEAFSR_SDMA);
+	if (!error_bits)
+		return IRQ_NONE;
+	schizo_write(afsr_reg, error_bits);
+
+	/* Log the error. */
+	printk("PCI%d: Correctable Error, primary error type[%s]\n",
+	       p->index,
+	       (((error_bits & SCHIZO_CEAFSR_PPIO) ?
+		 "PIO" :
+		 ((error_bits & SCHIZO_CEAFSR_PDRD) ?
+		  "DMA Read" :
+		  ((error_bits & SCHIZO_CEAFSR_PDWR) ?
+		   "DMA Write" : "???")))));
+
+	/* XXX Use syndrome and afar to print out module string just like
+	 * XXX UDB CE trap handler does... -DaveM
+	 */
+	printk("PCI%d: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n",
+	       p->index,
+	       (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL,
+	       (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL,
+	       (afsr & SCHIZO_UEAFSR_AID) >> 24UL);
+	printk("PCI%d: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n",
+	       p->index,
+	       (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0,
+	       (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0,
+	       (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL,
+	       (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL,
+	       (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL);
+	printk("PCI%d: CE AFAR [%016lx]\n", p->index, afar);
+	printk("PCI%d: CE Secondary errors [", p->index);
+	reported = 0;
+	if (afsr & SCHIZO_CEAFSR_SPIO) {
+		reported++;
+		printk("(PIO)");
+	}
+	if (afsr & SCHIZO_CEAFSR_SDMA) {
+		reported++;
+		printk("(DMA)");
+	}
+	if (!reported)
+		printk("(none)");
+	printk("]\n");
+
+	schizo_clear_other_err_intr(p, irq);
+
+	return IRQ_HANDLED;
+}
+
+#define SCHIZO_PCI_AFSR	0x2010UL
+#define SCHIZO_PCI_AFAR	0x2018UL
+
+#define SCHIZO_PCIAFSR_PMA	0x8000000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_PTA	0x4000000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_PRTRY	0x2000000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_PPERR	0x1000000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_PTTO	0x0800000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_PUNUS	0x0400000000000000UL /* Schizo */
+#define SCHIZO_PCIAFSR_SMA	0x0200000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_STA	0x0100000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_SRTRY	0x0080000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_SPERR	0x0040000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_STTO	0x0020000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_SUNUS	0x0010000000000000UL /* Schizo */
+#define SCHIZO_PCIAFSR_BMSK	0x000003ff00000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_BLK	0x0000000080000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_CFG	0x0000000040000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_MEM	0x0000000020000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_IO	0x0000000010000000UL /* Schizo/Tomatillo */
+
+#define SCHIZO_PCI_CTRL		(0x2000UL)
+#define SCHIZO_PCICTRL_BUS_UNUS	(1UL << 63UL) /* Safari */
+#define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL) /* Tomatillo */
+#define SCHIZO_PCICTRL_ESLCK	(1UL << 51UL) /* Safari */
+#define SCHIZO_PCICTRL_ERRSLOT	(7UL << 48UL) /* Safari */
+#define SCHIZO_PCICTRL_TTO_ERR	(1UL << 38UL) /* Safari/Tomatillo */
+#define SCHIZO_PCICTRL_RTRY_ERR	(1UL << 37UL) /* Safari/Tomatillo */
+#define SCHIZO_PCICTRL_DTO_ERR	(1UL << 36UL) /* Safari/Tomatillo */
+#define SCHIZO_PCICTRL_SBH_ERR	(1UL << 35UL) /* Safari */
+#define SCHIZO_PCICTRL_SERR	(1UL << 34UL) /* Safari/Tomatillo */
+#define SCHIZO_PCICTRL_PCISPD	(1UL << 33UL) /* Safari */
+#define SCHIZO_PCICTRL_MRM_PREF	(1UL << 30UL) /* Tomatillo */
+#define SCHIZO_PCICTRL_RDO_PREF	(1UL << 29UL) /* Tomatillo */
+#define SCHIZO_PCICTRL_RDL_PREF	(1UL << 28UL) /* Tomatillo */
+#define SCHIZO_PCICTRL_PTO	(3UL << 24UL) /* Safari/Tomatillo */
+#define SCHIZO_PCICTRL_PTO_SHIFT 24UL
+#define SCHIZO_PCICTRL_TRWSW	(7UL << 21UL) /* Tomatillo */
+#define SCHIZO_PCICTRL_F_TGT_A	(1UL << 20UL) /* Tomatillo */
+#define SCHIZO_PCICTRL_S_DTO_INT (1UL << 19UL) /* Safari */
+#define SCHIZO_PCICTRL_F_TGT_RT	(1UL << 19UL) /* Tomatillo */
+#define SCHIZO_PCICTRL_SBH_INT	(1UL << 18UL) /* Safari */
+#define SCHIZO_PCICTRL_T_DTO_INT (1UL << 18UL) /* Tomatillo */
+#define SCHIZO_PCICTRL_EEN	(1UL << 17UL) /* Safari/Tomatillo */
+#define SCHIZO_PCICTRL_PARK	(1UL << 16UL) /* Safari/Tomatillo */
+#define SCHIZO_PCICTRL_PCIRST	(1UL <<  8UL) /* Safari */
+#define SCHIZO_PCICTRL_ARB_S	(0x3fUL << 0UL) /* Safari */
+#define SCHIZO_PCICTRL_ARB_T	(0xffUL << 0UL) /* Tomatillo */
+
+static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
+{
+	unsigned long csr_reg, csr, csr_error_bits;
+	irqreturn_t ret = IRQ_NONE;
+	u16 stat;
+
+	csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL;
+	csr = schizo_read(csr_reg);
+	csr_error_bits =
+		csr & (SCHIZO_PCICTRL_BUS_UNUS |
+		       SCHIZO_PCICTRL_TTO_ERR |
+		       SCHIZO_PCICTRL_RTRY_ERR |
+		       SCHIZO_PCICTRL_DTO_ERR |
+		       SCHIZO_PCICTRL_SBH_ERR |
+		       SCHIZO_PCICTRL_SERR);
+	if (csr_error_bits) {
+		/* Clear the errors.  */
+		schizo_write(csr_reg, csr);
+
+		/* Log 'em.  */
+		if (csr_error_bits & SCHIZO_PCICTRL_BUS_UNUS)
+			printk("%s: Bus unusable error asserted.\n",
+			       pbm->name);
+		if (csr_error_bits & SCHIZO_PCICTRL_TTO_ERR)
+			printk("%s: PCI TRDY# timeout error asserted.\n",
+			       pbm->name);
+		if (csr_error_bits & SCHIZO_PCICTRL_RTRY_ERR)
+			printk("%s: PCI excessive retry error asserted.\n",
+			       pbm->name);
+		if (csr_error_bits & SCHIZO_PCICTRL_DTO_ERR)
+			printk("%s: PCI discard timeout error asserted.\n",
+			       pbm->name);
+		if (csr_error_bits & SCHIZO_PCICTRL_SBH_ERR)
+			printk("%s: PCI streaming byte hole error asserted.\n",
+			       pbm->name);
+		if (csr_error_bits & SCHIZO_PCICTRL_SERR)
+			printk("%s: PCI SERR signal asserted.\n",
+			       pbm->name);
+		ret = IRQ_HANDLED;
+	}
+	pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
+	if (stat & (PCI_STATUS_PARITY |
+		    PCI_STATUS_SIG_TARGET_ABORT |
+		    PCI_STATUS_REC_TARGET_ABORT |
+		    PCI_STATUS_REC_MASTER_ABORT |
+		    PCI_STATUS_SIG_SYSTEM_ERROR)) {
+		printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
+		       pbm->name, stat);
+		pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
+		ret = IRQ_HANDLED;
+	}
+	return ret;
+}
+
+static irqreturn_t schizo_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct pci_pbm_info *pbm = dev_id;
+	struct pci_controller_info *p = pbm->parent;
+	unsigned long afsr_reg, afar_reg, base;
+	unsigned long afsr, afar, error_bits;
+	int reported;
+
+	base = pbm->pbm_regs;
+
+	afsr_reg = base + SCHIZO_PCI_AFSR;
+	afar_reg = base + SCHIZO_PCI_AFAR;
+
+	/* Latch error status. */
+	afar = schizo_read(afar_reg);
+	afsr = schizo_read(afsr_reg);
+
+	/* Clear primary/secondary error status bits. */
+	error_bits = afsr &
+		(SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
+		 SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
+		 SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
+		 SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
+		 SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
+		 SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS);
+	if (!error_bits)
+		return schizo_pcierr_intr_other(pbm);
+	schizo_write(afsr_reg, error_bits);
+
+	/* Log the error. */
+	printk("%s: PCI Error, primary error type[%s]\n",
+	       pbm->name,
+	       (((error_bits & SCHIZO_PCIAFSR_PMA) ?
+		 "Master Abort" :
+		 ((error_bits & SCHIZO_PCIAFSR_PTA) ?
+		  "Target Abort" :
+		  ((error_bits & SCHIZO_PCIAFSR_PRTRY) ?
+		   "Excessive Retries" :
+		   ((error_bits & SCHIZO_PCIAFSR_PPERR) ?
+		    "Parity Error" :
+		    ((error_bits & SCHIZO_PCIAFSR_PTTO) ?
+		     "Timeout" :
+		     ((error_bits & SCHIZO_PCIAFSR_PUNUS) ?
+		      "Bus Unusable" : "???"))))))));
+	printk("%s: bytemask[%04lx] was_block(%d) space(%s)\n",
+	       pbm->name,
+	       (afsr & SCHIZO_PCIAFSR_BMSK) >> 32UL,
+	       (afsr & SCHIZO_PCIAFSR_BLK) ? 1 : 0,
+	       ((afsr & SCHIZO_PCIAFSR_CFG) ?
+		"Config" :
+		((afsr & SCHIZO_PCIAFSR_MEM) ?
+		 "Memory" :
+		 ((afsr & SCHIZO_PCIAFSR_IO) ?
+		  "I/O" : "???"))));
+	printk("%s: PCI AFAR [%016lx]\n",
+	       pbm->name, afar);
+	printk("%s: PCI Secondary errors [",
+	       pbm->name);
+	reported = 0;
+	if (afsr & SCHIZO_PCIAFSR_SMA) {
+		reported++;
+		printk("(Master Abort)");
+	}
+	if (afsr & SCHIZO_PCIAFSR_STA) {
+		reported++;
+		printk("(Target Abort)");
+	}
+	if (afsr & SCHIZO_PCIAFSR_SRTRY) {
+		reported++;
+		printk("(Excessive Retries)");
+	}
+	if (afsr & SCHIZO_PCIAFSR_SPERR) {
+		reported++;
+		printk("(Parity Error)");
+	}
+	if (afsr & SCHIZO_PCIAFSR_STTO) {
+		reported++;
+		printk("(Timeout)");
+	}
+	if (afsr & SCHIZO_PCIAFSR_SUNUS) {
+		reported++;
+		printk("(Bus Unusable)");
+	}
+	if (!reported)
+		printk("(none)");
+	printk("]\n");
+
+	/* For the error types shown, scan PBM's PCI bus for devices
+	 * which have logged that error type.
+	 */
+
+	/* If we see a Target Abort, this could be the result of an
+	 * IOMMU translation error of some sort.  It is extremely
+	 * useful to log this information as usually it indicates
+	 * a bug in the IOMMU support code or a PCI device driver.
+	 */
+	if (error_bits & (SCHIZO_PCIAFSR_PTA | SCHIZO_PCIAFSR_STA)) {
+		schizo_check_iommu_error(p, PCI_ERR);
+		pci_scan_for_target_abort(p, pbm, pbm->pci_bus);
+	}
+	if (error_bits & (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_SMA))
+		pci_scan_for_master_abort(p, pbm, pbm->pci_bus);
+
+	/* For excessive retries, PSYCHO/PBM will abort the device
+	 * and there is no way to specifically check for excessive
+	 * retries in the config space status registers.  So what
+	 * we hope is that we'll catch it via the master/target
+	 * abort events.
+	 */
+
+	if (error_bits & (SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_SPERR))
+		pci_scan_for_parity_error(p, pbm, pbm->pci_bus);
+
+	schizo_clear_other_err_intr(p, irq);
+
+	return IRQ_HANDLED;
+}
+
+#define SCHIZO_SAFARI_ERRLOG	0x10018UL
+
+#define SAFARI_ERRLOG_ERROUT	0x8000000000000000UL
+
+#define BUS_ERROR_BADCMD	0x4000000000000000UL /* Schizo/Tomatillo */
+#define BUS_ERROR_SSMDIS	0x2000000000000000UL /* Safari */
+#define BUS_ERROR_BADMA		0x1000000000000000UL /* Safari */
+#define BUS_ERROR_BADMB		0x0800000000000000UL /* Safari */
+#define BUS_ERROR_BADMC		0x0400000000000000UL /* Safari */
+#define BUS_ERROR_SNOOP_GR	0x0000000000200000UL /* Tomatillo */
+#define BUS_ERROR_SNOOP_PCI	0x0000000000100000UL /* Tomatillo */
+#define BUS_ERROR_SNOOP_RD	0x0000000000080000UL /* Tomatillo */
+#define BUS_ERROR_SNOOP_RDS	0x0000000000020000UL /* Tomatillo */
+#define BUS_ERROR_SNOOP_RDSA	0x0000000000010000UL /* Tomatillo */
+#define BUS_ERROR_SNOOP_OWN	0x0000000000008000UL /* Tomatillo */
+#define BUS_ERROR_SNOOP_RDO	0x0000000000004000UL /* Tomatillo */
+#define BUS_ERROR_CPU1PS	0x0000000000002000UL /* Safari */
+#define BUS_ERROR_WDATA_PERR	0x0000000000002000UL /* Tomatillo */
+#define BUS_ERROR_CPU1PB	0x0000000000001000UL /* Safari */
+#define BUS_ERROR_CTRL_PERR	0x0000000000001000UL /* Tomatillo */
+#define BUS_ERROR_CPU0PS	0x0000000000000800UL /* Safari */
+#define BUS_ERROR_SNOOP_ERR	0x0000000000000800UL /* Tomatillo */
+#define BUS_ERROR_CPU0PB	0x0000000000000400UL /* Safari */
+#define BUS_ERROR_JBUS_ILL_B	0x0000000000000400UL /* Tomatillo */
+#define BUS_ERROR_CIQTO		0x0000000000000200UL /* Safari */
+#define BUS_ERROR_LPQTO		0x0000000000000100UL /* Safari */
+#define BUS_ERROR_JBUS_ILL_C	0x0000000000000100UL /* Tomatillo */
+#define BUS_ERROR_SFPQTO	0x0000000000000080UL /* Safari */
+#define BUS_ERROR_UFPQTO	0x0000000000000040UL /* Safari */
+#define BUS_ERROR_RD_PERR	0x0000000000000040UL /* Tomatillo */
+#define BUS_ERROR_APERR		0x0000000000000020UL /* Safari/Tomatillo */
+#define BUS_ERROR_UNMAP		0x0000000000000010UL /* Safari/Tomatillo */
+#define BUS_ERROR_BUSERR	0x0000000000000004UL /* Safari/Tomatillo */
+#define BUS_ERROR_TIMEOUT	0x0000000000000002UL /* Safari/Tomatillo */
+#define BUS_ERROR_ILL		0x0000000000000001UL /* Safari */
+
+/* We only expect UNMAP errors here.  The rest of the Safari errors
+ * are marked fatal and thus cause a system reset.
+ */
+static irqreturn_t schizo_safarierr_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct pci_controller_info *p = dev_id;
+	u64 errlog;
+
+	errlog = schizo_read(p->pbm_B.controller_regs + SCHIZO_SAFARI_ERRLOG);
+	schizo_write(p->pbm_B.controller_regs + SCHIZO_SAFARI_ERRLOG,
+		     errlog & ~(SAFARI_ERRLOG_ERROUT));
+
+	if (!(errlog & BUS_ERROR_UNMAP)) {
+		printk("PCI%d: Unexpected Safari/JBUS error interrupt, errlog[%016lx]\n",
+		       p->index, errlog);
+
+		schizo_clear_other_err_intr(p, irq);
+		return IRQ_HANDLED;
+	}
+
+	printk("PCI%d: Safari/JBUS interrupt, UNMAPPED error, interrogating IOMMUs.\n",
+	       p->index);
+	schizo_check_iommu_error(p, SAFARI_ERR);
+
+	schizo_clear_other_err_intr(p, irq);
+	return IRQ_HANDLED;
+}
+
+/* Nearly identical to PSYCHO equivalents... */
+#define SCHIZO_ECC_CTRL		0x10020UL
+#define  SCHIZO_ECCCTRL_EE	 0x8000000000000000UL /* Enable ECC Checking */
+#define  SCHIZO_ECCCTRL_UE	 0x4000000000000000UL /* Enable UE Interrupts */
+#define  SCHIZO_ECCCTRL_CE	 0x2000000000000000UL /* Enable CE INterrupts */
+
+#define SCHIZO_SAFARI_ERRCTRL	0x10008UL
+#define  SCHIZO_SAFERRCTRL_EN	 0x8000000000000000UL
+#define SCHIZO_SAFARI_IRQCTRL	0x10010UL
+#define  SCHIZO_SAFIRQCTRL_EN	 0x8000000000000000UL
+
+/* How the Tomatillo IRQs are routed around is pure guesswork here.
+ *
+ * All the Tomatillo devices I see in prtconf dumps seem to have only
+ * a single PCI bus unit attached to it.  It would seem they are seperate
+ * devices because their PortID (ie. JBUS ID) values are all different
+ * and thus the registers are mapped to totally different locations.
+ *
+ * However, two Tomatillo's look "similar" in that the only difference
+ * in their PortID is the lowest bit.
+ *
+ * So if we were to ignore this lower bit, it certainly looks like two
+ * PCI bus units of the same Tomatillo.  I still have not really
+ * figured this out...
+ */
+static void __init tomatillo_register_error_handlers(struct pci_controller_info *p)
+{
+	struct pci_pbm_info *pbm;
+	unsigned int irq;
+	struct ino_bucket *bucket;
+	u64 tmp, err_mask, err_no_mask;
+
+	/* Build IRQs and register handlers. */
+	pbm = pbm_for_ino(p, SCHIZO_UE_INO);
+	irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_UE_INO);
+	if (request_irq(irq, schizo_ue_intr,
+			SA_SHIRQ, "TOMATILLO UE", p) < 0) {
+		prom_printf("%s: Cannot register UE interrupt.\n",
+			    pbm->name);
+		prom_halt();
+	}
+	bucket = __bucket(irq);
+	tmp = upa_readl(bucket->imap);
+	upa_writel(tmp, (pbm->pbm_regs +
+			 schizo_imap_offset(SCHIZO_UE_INO) + 4));
+
+	pbm = pbm_for_ino(p, SCHIZO_CE_INO);
+	irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_CE_INO);
+	if (request_irq(irq, schizo_ce_intr,
+			SA_SHIRQ, "TOMATILLO CE", p) < 0) {
+		prom_printf("%s: Cannot register CE interrupt.\n",
+			    pbm->name);
+		prom_halt();
+	}
+	bucket = __bucket(irq);
+	tmp = upa_readl(bucket->imap);
+	upa_writel(tmp, (pbm->pbm_regs +
+			 schizo_imap_offset(SCHIZO_CE_INO) + 4));
+
+	pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO);
+	irq = schizo_irq_build(pbm, NULL, ((pbm->portid << 6) |
+					   SCHIZO_PCIERR_A_INO));
+	if (request_irq(irq, schizo_pcierr_intr,
+			SA_SHIRQ, "TOMATILLO PCIERR", pbm) < 0) {
+		prom_printf("%s: Cannot register PBM A PciERR interrupt.\n",
+			    pbm->name);
+		prom_halt();
+	}
+	bucket = __bucket(irq);
+	tmp = upa_readl(bucket->imap);
+	upa_writel(tmp, (pbm->pbm_regs +
+			 schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));
+
+	pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO);
+	irq = schizo_irq_build(pbm, NULL, ((pbm->portid << 6) |
+					    SCHIZO_PCIERR_B_INO));
+	if (request_irq(irq, schizo_pcierr_intr,
+			SA_SHIRQ, "TOMATILLO PCIERR", pbm) < 0) {
+		prom_printf("%s: Cannot register PBM B PciERR interrupt.\n",
+			    pbm->name);
+		prom_halt();
+	}
+	bucket = __bucket(irq);
+	tmp = upa_readl(bucket->imap);
+	upa_writel(tmp, (pbm->pbm_regs +
+			 schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));
+
+	pbm = pbm_for_ino(p, SCHIZO_SERR_INO);
+	irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_SERR_INO);
+	if (request_irq(irq, schizo_safarierr_intr,
+			SA_SHIRQ, "TOMATILLO SERR", p) < 0) {
+		prom_printf("%s: Cannot register SafariERR interrupt.\n",
+			    pbm->name);
+		prom_halt();
+	}
+	bucket = __bucket(irq);
+	tmp = upa_readl(bucket->imap);
+	upa_writel(tmp, (pbm->pbm_regs +
+			 schizo_imap_offset(SCHIZO_SERR_INO) + 4));
+
+	/* Enable UE and CE interrupts for controller. */
+	schizo_write(p->pbm_A.controller_regs + SCHIZO_ECC_CTRL,
+		     (SCHIZO_ECCCTRL_EE |
+		      SCHIZO_ECCCTRL_UE |
+		      SCHIZO_ECCCTRL_CE));
+
+	schizo_write(p->pbm_B.controller_regs + SCHIZO_ECC_CTRL,
+		     (SCHIZO_ECCCTRL_EE |
+		      SCHIZO_ECCCTRL_UE |
+		      SCHIZO_ECCCTRL_CE));
+
+	/* Enable PCI Error interrupts and clear error
+	 * bits.
+	 */
+	err_mask = (SCHIZO_PCICTRL_BUS_UNUS |
+		    SCHIZO_PCICTRL_TTO_ERR |
+		    SCHIZO_PCICTRL_RTRY_ERR |
+		    SCHIZO_PCICTRL_SERR |
+		    SCHIZO_PCICTRL_EEN);
+
+	err_no_mask = SCHIZO_PCICTRL_DTO_ERR;
+
+	tmp = schizo_read(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL);
+	tmp |= err_mask;
+	tmp &= ~err_no_mask;
+	schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL, tmp);
+
+	tmp = schizo_read(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL);
+	tmp |= err_mask;
+	tmp &= ~err_no_mask;
+	schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL, tmp);
+
+	err_mask = (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
+		    SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
+		    SCHIZO_PCIAFSR_PTTO |
+		    SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
+		    SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
+		    SCHIZO_PCIAFSR_STTO);
+
+	schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_AFSR, err_mask);
+	schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_AFSR, err_mask);
+
+	err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SNOOP_GR |
+		    BUS_ERROR_SNOOP_PCI | BUS_ERROR_SNOOP_RD |
+		    BUS_ERROR_SNOOP_RDS | BUS_ERROR_SNOOP_RDSA |
+		    BUS_ERROR_SNOOP_OWN | BUS_ERROR_SNOOP_RDO |
+		    BUS_ERROR_WDATA_PERR | BUS_ERROR_CTRL_PERR |
+		    BUS_ERROR_SNOOP_ERR | BUS_ERROR_JBUS_ILL_B |
+		    BUS_ERROR_JBUS_ILL_C | BUS_ERROR_RD_PERR |
+		    BUS_ERROR_APERR | BUS_ERROR_UNMAP |
+		    BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT);
+
+	schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_ERRCTRL,
+		     (SCHIZO_SAFERRCTRL_EN | err_mask));
+	schizo_write(p->pbm_B.controller_regs + SCHIZO_SAFARI_ERRCTRL,
+		     (SCHIZO_SAFERRCTRL_EN | err_mask));
+
+	schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_IRQCTRL,
+		     (SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)));
+	schizo_write(p->pbm_B.controller_regs + SCHIZO_SAFARI_IRQCTRL,
+		     (SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)));
+}
+
+static void __init schizo_register_error_handlers(struct pci_controller_info *p)
+{
+	struct pci_pbm_info *pbm;
+	unsigned int irq;
+	struct ino_bucket *bucket;
+	u64 tmp, err_mask, err_no_mask;
+
+	/* Build IRQs and register handlers. */
+	pbm = pbm_for_ino(p, SCHIZO_UE_INO);
+	irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_UE_INO);
+	if (request_irq(irq, schizo_ue_intr,
+			SA_SHIRQ, "SCHIZO UE", p) < 0) {
+		prom_printf("%s: Cannot register UE interrupt.\n",
+			    pbm->name);
+		prom_halt();
+	}
+	bucket = __bucket(irq);
+	tmp = upa_readl(bucket->imap);
+	upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_UE_INO) + 4));
+
+	pbm = pbm_for_ino(p, SCHIZO_CE_INO);
+	irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_CE_INO);
+	if (request_irq(irq, schizo_ce_intr,
+			SA_SHIRQ, "SCHIZO CE", p) < 0) {
+		prom_printf("%s: Cannot register CE interrupt.\n",
+			    pbm->name);
+		prom_halt();
+	}
+	bucket = __bucket(irq);
+	tmp = upa_readl(bucket->imap);
+	upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_CE_INO) + 4));
+
+	pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO);
+	irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_PCIERR_A_INO);
+	if (request_irq(irq, schizo_pcierr_intr,
+			SA_SHIRQ, "SCHIZO PCIERR", pbm) < 0) {
+		prom_printf("%s: Cannot register PBM A PciERR interrupt.\n",
+			    pbm->name);
+		prom_halt();
+	}
+	bucket = __bucket(irq);
+	tmp = upa_readl(bucket->imap);
+	upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));
+
+	pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO);
+	irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_PCIERR_B_INO);
+	if (request_irq(irq, schizo_pcierr_intr,
+			SA_SHIRQ, "SCHIZO PCIERR", &p->pbm_B) < 0) {
+		prom_printf("%s: Cannot register PBM B PciERR interrupt.\n",
+			    pbm->name);
+		prom_halt();
+	}
+	bucket = __bucket(irq);
+	tmp = upa_readl(bucket->imap);
+	upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));
+
+	pbm = pbm_for_ino(p, SCHIZO_SERR_INO);
+	irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_SERR_INO);
+	if (request_irq(irq, schizo_safarierr_intr,
+			SA_SHIRQ, "SCHIZO SERR", p) < 0) {
+		prom_printf("%s: Cannot register SafariERR interrupt.\n",
+			    pbm->name);
+		prom_halt();
+	}
+	bucket = __bucket(irq);
+	tmp = upa_readl(bucket->imap);
+	upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_SERR_INO) + 4));
+
+	/* Enable UE and CE interrupts for controller. */
+	schizo_write(p->pbm_A.controller_regs + SCHIZO_ECC_CTRL,
+		     (SCHIZO_ECCCTRL_EE |
+		      SCHIZO_ECCCTRL_UE |
+		      SCHIZO_ECCCTRL_CE));
+
+	err_mask = (SCHIZO_PCICTRL_BUS_UNUS |
+		    SCHIZO_PCICTRL_ESLCK |
+		    SCHIZO_PCICTRL_TTO_ERR |
+		    SCHIZO_PCICTRL_RTRY_ERR |
+		    SCHIZO_PCICTRL_SBH_ERR |
+		    SCHIZO_PCICTRL_SERR |
+		    SCHIZO_PCICTRL_EEN);
+
+	err_no_mask = (SCHIZO_PCICTRL_DTO_ERR |
+		       SCHIZO_PCICTRL_SBH_INT);
+
+	/* Enable PCI Error interrupts and clear error
+	 * bits for each PBM.
+	 */
+	tmp = schizo_read(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL);
+	tmp |= err_mask;
+	tmp &= ~err_no_mask;
+	schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL, tmp);
+
+	schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_AFSR,
+		     (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
+		      SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
+		      SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
+		      SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
+		      SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
+		      SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS));
+
+	tmp = schizo_read(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL);
+	tmp |= err_mask;
+	tmp &= ~err_no_mask;
+	schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL, tmp);
+
+	schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_AFSR,
+		     (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
+		      SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
+		      SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
+		      SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
+		      SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
+		      SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS));
+
+	/* Make all Safari error conditions fatal except unmapped
+	 * errors which we make generate interrupts.
+	 */
+	err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SSMDIS |
+		    BUS_ERROR_BADMA | BUS_ERROR_BADMB |
+		    BUS_ERROR_BADMC |
+		    BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB |
+		    BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB |
+		    BUS_ERROR_CIQTO |
+		    BUS_ERROR_LPQTO | BUS_ERROR_SFPQTO |
+		    BUS_ERROR_UFPQTO | BUS_ERROR_APERR |
+		    BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT |
+		    BUS_ERROR_ILL);
+#if 1
+	/* XXX Something wrong with some Excalibur systems
+	 * XXX Sun is shipping.  The behavior on a 2-cpu
+	 * XXX machine is that both CPU1 parity error bits
+	 * XXX are set and are immediately set again when
+	 * XXX their error status bits are cleared.  Just
+	 * XXX ignore them for now.  -DaveM
+	 */
+	err_mask &= ~(BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB |
+		      BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB);
+#endif
+
+	schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_ERRCTRL,
+		     (SCHIZO_SAFERRCTRL_EN | err_mask));
+
+	schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_IRQCTRL,
+		     (SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)));
+}
+
+static void __init pbm_config_busmastering(struct pci_pbm_info *pbm)
+{
+	u8 *addr;
+
+	/* Set cache-line size to 64 bytes, this is actually
+	 * a nop but I do it for completeness.
+	 */
+	addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno,
+					0, PCI_CACHE_LINE_SIZE);
+	pci_config_write8(addr, 64 / sizeof(u32));
+
+	/* Set PBM latency timer to 64 PCI clocks. */
+	addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno,
+					0, PCI_LATENCY_TIMER);
+	pci_config_write8(addr, 64);
+}
+
+static void __init pbm_scan_bus(struct pci_controller_info *p,
+				struct pci_pbm_info *pbm)
+{
+	struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
+
+	if (!cookie) {
+		prom_printf("%s: Critical allocation failure.\n", pbm->name);
+		prom_halt();
+	}
+
+	/* All we care about is the PBM. */
+	memset(cookie, 0, sizeof(*cookie));
+	cookie->pbm = pbm;
+
+	pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
+				    p->pci_ops,
+				    pbm);
+	pci_fixup_host_bridge_self(pbm->pci_bus);
+	pbm->pci_bus->self->sysdata = cookie;
+
+	pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
+	pci_record_assignments(pbm, pbm->pci_bus);
+	pci_assign_unassigned(pbm, pbm->pci_bus);
+	pci_fixup_irq(pbm, pbm->pci_bus);
+	pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
+	pci_setup_busmastering(pbm, pbm->pci_bus);
+}
+
+static void __init __schizo_scan_bus(struct pci_controller_info *p,
+				     int chip_type)
+{
+	if (!p->pbm_B.prom_node || !p->pbm_A.prom_node) {
+		printk("PCI: Only one PCI bus module of controller found.\n");
+		printk("PCI: Ignoring entire controller.\n");
+		return;
+	}
+
+	pbm_config_busmastering(&p->pbm_B);
+	p->pbm_B.is_66mhz_capable =
+		prom_getbool(p->pbm_B.prom_node, "66mhz-capable");
+	pbm_config_busmastering(&p->pbm_A);
+	p->pbm_A.is_66mhz_capable =
+		prom_getbool(p->pbm_A.prom_node, "66mhz-capable");
+	pbm_scan_bus(p, &p->pbm_B);
+	pbm_scan_bus(p, &p->pbm_A);
+
+	/* After the PCI bus scan is complete, we can register
+	 * the error interrupt handlers.
+	 */
+	if (chip_type == PBM_CHIP_TYPE_TOMATILLO)
+		tomatillo_register_error_handlers(p);
+	else
+		schizo_register_error_handlers(p);
+}
+
+static void __init schizo_scan_bus(struct pci_controller_info *p)
+{
+	__schizo_scan_bus(p, PBM_CHIP_TYPE_SCHIZO);
+}
+
+static void __init tomatillo_scan_bus(struct pci_controller_info *p)
+{
+	__schizo_scan_bus(p, PBM_CHIP_TYPE_TOMATILLO);
+}
+
+static void __init schizo_base_address_update(struct pci_dev *pdev, int resource)
+{
+	struct pcidev_cookie *pcp = pdev->sysdata;
+	struct pci_pbm_info *pbm = pcp->pbm;
+	struct resource *res, *root;
+	u32 reg;
+	int where, size, is_64bit;
+
+	res = &pdev->resource[resource];
+	if (resource < 6) {
+		where = PCI_BASE_ADDRESS_0 + (resource * 4);
+	} else if (resource == PCI_ROM_RESOURCE) {
+		where = pdev->rom_base_reg;
+	} else {
+		/* Somebody might have asked allocation of a non-standard resource */
+		return;
+	}
+
+	is_64bit = 0;
+	if (res->flags & IORESOURCE_IO)
+		root = &pbm->io_space;
+	else {
+		root = &pbm->mem_space;
+		if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
+		    == PCI_BASE_ADDRESS_MEM_TYPE_64)
+			is_64bit = 1;
+	}
+
+	size = res->end - res->start;
+	pci_read_config_dword(pdev, where, &reg);
+	reg = ((reg & size) |
+	       (((u32)(res->start - root->start)) & ~size));
+	if (resource == PCI_ROM_RESOURCE) {
+		reg |= PCI_ROM_ADDRESS_ENABLE;
+		res->flags |= IORESOURCE_ROM_ENABLE;
+	}
+	pci_write_config_dword(pdev, where, reg);
+
+	/* This knows that the upper 32-bits of the address
+	 * must be zero.  Our PCI common layer enforces this.
+	 */
+	if (is_64bit)
+		pci_write_config_dword(pdev, where + 4, 0);
+}
+
+static void __init schizo_resource_adjust(struct pci_dev *pdev,
+					  struct resource *res,
+					  struct resource *root)
+{
+	res->start += root->start;
+	res->end += root->start;
+}
+
+/* Use ranges property to determine where PCI MEM, I/O, and Config
+ * space are for this PCI bus module.
+ */
+static void schizo_determine_mem_io_space(struct pci_pbm_info *pbm)
+{
+	int i, saw_cfg, saw_mem, saw_io;
+
+	saw_cfg = saw_mem = saw_io = 0;
+	for (i = 0; i < pbm->num_pbm_ranges; i++) {
+		struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
+		unsigned long a;
+		int type;
+
+		type = (pr->child_phys_hi >> 24) & 0x3;
+		a = (((unsigned long)pr->parent_phys_hi << 32UL) |
+		     ((unsigned long)pr->parent_phys_lo  <<  0UL));
+
+		switch (type) {
+		case 0:
+			/* PCI config space, 16MB */
+			pbm->config_space = a;
+			saw_cfg = 1;
+			break;
+
+		case 1:
+			/* 16-bit IO space, 16MB */
+			pbm->io_space.start = a;
+			pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
+			pbm->io_space.flags = IORESOURCE_IO;
+			saw_io = 1;
+			break;
+
+		case 2:
+			/* 32-bit MEM space, 2GB */
+			pbm->mem_space.start = a;
+			pbm->mem_space.end = a + (0x80000000UL - 1UL);
+			pbm->mem_space.flags = IORESOURCE_MEM;
+			saw_mem = 1;
+			break;
+
+		default:
+			break;
+		};
+	}
+
+	if (!saw_cfg || !saw_io || !saw_mem) {
+		prom_printf("%s: Fatal error, missing %s PBM range.\n",
+			    pbm->name,
+			    ((!saw_cfg ?
+			      "CFG" :
+			      (!saw_io ?
+			       "IO" : "MEM"))));
+		prom_halt();
+	}
+
+	printk("%s: PCI CFG[%lx] IO[%lx] MEM[%lx]\n",
+	       pbm->name,
+	       pbm->config_space,
+	       pbm->io_space.start,
+	       pbm->mem_space.start);
+}
+
+static void __init pbm_register_toplevel_resources(struct pci_controller_info *p,
+						   struct pci_pbm_info *pbm)
+{
+	pbm->io_space.name = pbm->mem_space.name = pbm->name;
+
+	request_resource(&ioport_resource, &pbm->io_space);
+	request_resource(&iomem_resource, &pbm->mem_space);
+	pci_register_legacy_regions(&pbm->io_space,
+				    &pbm->mem_space);
+}
+
+#define SCHIZO_STRBUF_CONTROL		(0x02800UL)
+#define SCHIZO_STRBUF_FLUSH		(0x02808UL)
+#define SCHIZO_STRBUF_FSYNC		(0x02810UL)
+#define SCHIZO_STRBUF_CTXFLUSH		(0x02818UL)
+#define SCHIZO_STRBUF_CTXMATCH		(0x10000UL)
+
+static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm)
+{
+	unsigned long base = pbm->pbm_regs;
+	u64 control;
+
+	if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
+		/* TOMATILLO lacks streaming cache.  */
+		return;
+	}
+
+	/* SCHIZO has context flushing. */
+	pbm->stc.strbuf_control		= base + SCHIZO_STRBUF_CONTROL;
+	pbm->stc.strbuf_pflush		= base + SCHIZO_STRBUF_FLUSH;
+	pbm->stc.strbuf_fsync		= base + SCHIZO_STRBUF_FSYNC;
+	pbm->stc.strbuf_ctxflush	= base + SCHIZO_STRBUF_CTXFLUSH;
+	pbm->stc.strbuf_ctxmatch_base	= base + SCHIZO_STRBUF_CTXMATCH;
+
+	pbm->stc.strbuf_flushflag = (volatile unsigned long *)
+		((((unsigned long)&pbm->stc.__flushflag_buf[0])
+		  + 63UL)
+		 & ~63UL);
+	pbm->stc.strbuf_flushflag_pa = (unsigned long)
+		__pa(pbm->stc.strbuf_flushflag);
+
+	/* Turn off LRU locking and diag mode, enable the
+	 * streaming buffer and leave the rerun-disable
+	 * setting however OBP set it.
+	 */
+	control = schizo_read(pbm->stc.strbuf_control);
+	control &= ~(SCHIZO_STRBUF_CTRL_LPTR |
+		     SCHIZO_STRBUF_CTRL_LENAB |
+		     SCHIZO_STRBUF_CTRL_DENAB);
+	control |= SCHIZO_STRBUF_CTRL_ENAB;
+	schizo_write(pbm->stc.strbuf_control, control);
+
+	pbm->stc.strbuf_enabled = 1;
+}
+
+#define SCHIZO_IOMMU_CONTROL		(0x00200UL)
+#define SCHIZO_IOMMU_TSBBASE		(0x00208UL)
+#define SCHIZO_IOMMU_FLUSH		(0x00210UL)
+#define SCHIZO_IOMMU_CTXFLUSH		(0x00218UL)
+
+static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
+{
+	struct pci_iommu *iommu = pbm->iommu;
+	unsigned long tsbbase, i, tagbase, database, order;
+	u32 vdma[2], dma_mask;
+	u64 control;
+	int err, tsbsize;
+
+	err = prom_getproperty(pbm->prom_node, "virtual-dma",
+			       (char *)&vdma[0], sizeof(vdma));
+	if (err == 0 || err == -1) {
+		/* No property, use default values. */
+		vdma[0] = 0xc0000000;
+		vdma[1] = 0x40000000;
+	}
+
+	dma_mask = vdma[0];
+	switch (vdma[1]) {
+		case 0x20000000:
+			dma_mask |= 0x1fffffff;
+			tsbsize = 64;
+			break;
+
+		case 0x40000000:
+			dma_mask |= 0x3fffffff;
+			tsbsize = 128;
+			break;
+
+		case 0x80000000:
+			dma_mask |= 0x7fffffff;
+			tsbsize = 128;
+			break;
+
+		default:
+			prom_printf("SCHIZO: strange virtual-dma size.\n");
+			prom_halt();
+	};
+
+	/* Setup initial software IOMMU state. */
+	spin_lock_init(&iommu->lock);
+	iommu->iommu_cur_ctx = 0;
+
+	/* Register addresses, SCHIZO has iommu ctx flushing. */
+	iommu->iommu_control  = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL;
+	iommu->iommu_tsbbase  = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE;
+	iommu->iommu_flush    = pbm->pbm_regs + SCHIZO_IOMMU_FLUSH;
+	iommu->iommu_ctxflush = pbm->pbm_regs + SCHIZO_IOMMU_CTXFLUSH;
+
+	/* We use the main control/status register of SCHIZO as the write
+	 * completion register.
+	 */
+	iommu->write_complete_reg = pbm->controller_regs + 0x10000UL;
+
+	/*
+	 * Invalidate TLB Entries.
+	 */
+	control = schizo_read(iommu->iommu_control);
+	control |= SCHIZO_IOMMU_CTRL_DENAB;
+	schizo_write(iommu->iommu_control, control);
+
+	tagbase = SCHIZO_IOMMU_TAG, database = SCHIZO_IOMMU_DATA;
+
+	for(i = 0; i < 16; i++) {
+		schizo_write(pbm->pbm_regs + tagbase + (i * 8UL), 0);
+		schizo_write(pbm->pbm_regs + database + (i * 8UL), 0);
+	}
+
+	/* Leave diag mode enabled for full-flushing done
+	 * in pci_iommu.c
+	 */
+
+	iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
+	if (!iommu->dummy_page) {
+		prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
+		prom_halt();
+	}
+	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
+	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
+
+	/* Using assumed page size 8K with 128K entries we need 1MB iommu page
+	 * table (128K ioptes * 8 bytes per iopte).  This is
+	 * page order 7 on UltraSparc.
+	 */
+	order = get_order(tsbsize * 8 * 1024);
+	tsbbase = __get_free_pages(GFP_KERNEL, order);
+	if (!tsbbase) {
+		prom_printf("%s: Error, gfp(tsb) failed.\n", pbm->name);
+		prom_halt();
+	}
+
+	iommu->page_table = (iopte_t *)tsbbase;
+	iommu->page_table_map_base = vdma[0];
+	iommu->dma_addr_mask = dma_mask;
+	pci_iommu_table_init(iommu, PAGE_SIZE << order);
+
+	switch (tsbsize) {
+	case 64:
+		iommu->page_table_sz_bits = 16;
+		break;
+
+	case 128:
+		iommu->page_table_sz_bits = 17;
+		break;
+
+	default:
+		prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
+		prom_halt();
+		break;
+	};
+
+	/* We start with no consistent mappings. */
+	iommu->lowest_consistent_map =
+		1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
+
+	for (i = 0; i < PBM_NCLUSTERS; i++) {
+		iommu->alloc_info[i].flush = 0;
+		iommu->alloc_info[i].next = 0;
+	}
+
+	schizo_write(iommu->iommu_tsbbase, __pa(tsbbase));
+
+	control = schizo_read(iommu->iommu_control);
+	control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ);
+	switch (tsbsize) {
+	case 64:
+		control |= SCHIZO_IOMMU_TSBSZ_64K;
+		break;
+	case 128:
+		control |= SCHIZO_IOMMU_TSBSZ_128K;
+		break;
+	};
+
+	control |= SCHIZO_IOMMU_CTRL_ENAB;
+	schizo_write(iommu->iommu_control, control);
+}
+
+#define SCHIZO_PCI_IRQ_RETRY	(0x1a00UL)
+#define  SCHIZO_IRQ_RETRY_INF	 0xffUL
+
+#define SCHIZO_PCI_DIAG			(0x2020UL)
+#define  SCHIZO_PCIDIAG_D_BADECC	(1UL << 10UL) /* Disable BAD ECC errors (Schizo) */
+#define  SCHIZO_PCIDIAG_D_BYPASS	(1UL <<  9UL) /* Disable MMU bypass mode (Schizo/Tomatillo) */
+#define  SCHIZO_PCIDIAG_D_TTO		(1UL <<  8UL) /* Disable TTO errors (Schizo/Tomatillo) */
+#define  SCHIZO_PCIDIAG_D_RTRYARB	(1UL <<  7UL) /* Disable retry arbitration (Schizo) */
+#define  SCHIZO_PCIDIAG_D_RETRY		(1UL <<  6UL) /* Disable retry limit (Schizo/Tomatillo) */
+#define  SCHIZO_PCIDIAG_D_INTSYNC	(1UL <<  5UL) /* Disable interrupt/DMA synch (Schizo/Tomatillo) */
+#define  SCHIZO_PCIDIAG_I_DMA_PARITY	(1UL <<  3UL) /* Invert DMA parity (Schizo/Tomatillo) */
+#define  SCHIZO_PCIDIAG_I_PIOD_PARITY	(1UL <<  2UL) /* Invert PIO data parity (Schizo/Tomatillo) */
+#define  SCHIZO_PCIDIAG_I_PIOA_PARITY	(1UL <<  1UL) /* Invert PIO address parity (Schizo/Tomatillo) */
+
+#define TOMATILLO_PCI_IOC_CSR		(0x2248UL)
+#define TOMATILLO_IOC_PART_WPENAB	0x0000000000080000UL
+#define TOMATILLO_IOC_RDMULT_PENAB	0x0000000000040000UL
+#define TOMATILLO_IOC_RDONE_PENAB	0x0000000000020000UL
+#define TOMATILLO_IOC_RDLINE_PENAB	0x0000000000010000UL
+#define TOMATILLO_IOC_RDMULT_PLEN	0x000000000000c000UL
+#define TOMATILLO_IOC_RDMULT_PLEN_SHIFT	14UL
+#define TOMATILLO_IOC_RDONE_PLEN	0x0000000000003000UL
+#define TOMATILLO_IOC_RDONE_PLEN_SHIFT	12UL
+#define TOMATILLO_IOC_RDLINE_PLEN	0x0000000000000c00UL
+#define TOMATILLO_IOC_RDLINE_PLEN_SHIFT	10UL
+#define TOMATILLO_IOC_PREF_OFF		0x00000000000003f8UL
+#define TOMATILLO_IOC_PREF_OFF_SHIFT	3UL
+#define TOMATILLO_IOC_RDMULT_CPENAB	0x0000000000000004UL
+#define TOMATILLO_IOC_RDONE_CPENAB	0x0000000000000002UL
+#define TOMATILLO_IOC_RDLINE_CPENAB	0x0000000000000001UL
+
+#define TOMATILLO_PCI_IOC_TDIAG		(0x2250UL)
+#define TOMATILLO_PCI_IOC_DDIAG		(0x2290UL)
+
+static void __init schizo_pbm_hw_init(struct pci_pbm_info *pbm)
+{
+	u64 tmp;
+
+	/* Set IRQ retry to infinity. */
+	schizo_write(pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY,
+		     SCHIZO_IRQ_RETRY_INF);
+
+	/* Enable arbiter for all PCI slots.  Also, disable PCI interval
+	 * timer so that DTO (Discard TimeOuts) are not reported because
+	 * some Schizo revisions report them erroneously.
+	 */
+	tmp = schizo_read(pbm->pbm_regs + SCHIZO_PCI_CTRL);
+	if (pbm->chip_type == PBM_CHIP_TYPE_SCHIZO_PLUS &&
+	    pbm->chip_version == 0x5 &&
+	    pbm->chip_revision == 0x1)
+		tmp |= 0x0f;
+	else
+		tmp |= 0xff;
+
+	tmp &= ~SCHIZO_PCICTRL_PTO;
+	if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO &&
+	    pbm->chip_version >= 0x2)
+		tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT;
+	else
+		tmp |= 0x1UL << SCHIZO_PCICTRL_PTO_SHIFT;
+
+	if (!prom_getbool(pbm->prom_node, "no-bus-parking"))
+		tmp |= SCHIZO_PCICTRL_PARK;
+
+	if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO &&
+	    pbm->chip_version <= 0x1)
+		tmp |= (1UL << 61);
+	else
+		tmp &= ~(1UL << 61);
+
+	if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO)
+		tmp |= (SCHIZO_PCICTRL_MRM_PREF |
+			SCHIZO_PCICTRL_RDO_PREF |
+			SCHIZO_PCICTRL_RDL_PREF);
+
+	schizo_write(pbm->pbm_regs + SCHIZO_PCI_CTRL, tmp);
+
+	tmp = schizo_read(pbm->pbm_regs + SCHIZO_PCI_DIAG);
+	tmp &= ~(SCHIZO_PCIDIAG_D_RTRYARB |
+		 SCHIZO_PCIDIAG_D_RETRY |
+		 SCHIZO_PCIDIAG_D_INTSYNC);
+	schizo_write(pbm->pbm_regs + SCHIZO_PCI_DIAG, tmp);
+
+	if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
+		/* Clear prefetch lengths to workaround a bug in
+		 * Jalapeno...
+		 */
+		tmp = (TOMATILLO_IOC_PART_WPENAB |
+		       (1 << TOMATILLO_IOC_PREF_OFF_SHIFT) |
+		       TOMATILLO_IOC_RDMULT_CPENAB |
+		       TOMATILLO_IOC_RDONE_CPENAB |
+		       TOMATILLO_IOC_RDLINE_CPENAB);
+
+		schizo_write(pbm->pbm_regs + TOMATILLO_PCI_IOC_CSR,
+			     tmp);
+	}
+}
+
+static void __init schizo_pbm_init(struct pci_controller_info *p,
+				   int prom_node, u32 portid,
+				   int chip_type)
+{
+	struct linux_prom64_registers pr_regs[4];
+	unsigned int busrange[2];
+	struct pci_pbm_info *pbm;
+	const char *chipset_name;
+	u32 ino_bitmap[2];
+	int is_pbm_a;
+	int err;
+
+	switch (chip_type) {
+	case PBM_CHIP_TYPE_TOMATILLO:
+		chipset_name = "TOMATILLO";
+		break;
+
+	case PBM_CHIP_TYPE_SCHIZO_PLUS:
+		chipset_name = "SCHIZO+";
+		break;
+
+	case PBM_CHIP_TYPE_SCHIZO:
+	default:
+		chipset_name = "SCHIZO";
+		break;
+	};
+
+	/* For SCHIZO, three OBP regs:
+	 * 1) PBM controller regs
+	 * 2) Schizo front-end controller regs (same for both PBMs)
+	 * 3) PBM PCI config space
+	 *
+	 * For TOMATILLO, four OBP regs:
+	 * 1) PBM controller regs
+	 * 2) Tomatillo front-end controller regs
+	 * 3) PBM PCI config space
+	 * 4) Ichip regs
+	 */
+	err = prom_getproperty(prom_node, "reg",
+			       (char *)&pr_regs[0],
+			       sizeof(pr_regs));
+	if (err == 0 || err == -1) {
+		prom_printf("%s: Fatal error, no reg property.\n",
+			    chipset_name);
+		prom_halt();
+	}
+
+	is_pbm_a = ((pr_regs[0].phys_addr & 0x00700000) == 0x00600000);
+
+	if (is_pbm_a)
+		pbm = &p->pbm_A;
+	else
+		pbm = &p->pbm_B;
+
+	pbm->portid = portid;
+	pbm->parent = p;
+	pbm->prom_node = prom_node;
+	pbm->pci_first_slot = 1;
+
+	pbm->chip_type = chip_type;
+	pbm->chip_version =
+		prom_getintdefault(prom_node, "version#", 0);
+	pbm->chip_revision =
+		prom_getintdefault(prom_node, "module-revision#", 0);
+
+	pbm->pbm_regs = pr_regs[0].phys_addr;
+	pbm->controller_regs = pr_regs[1].phys_addr - 0x10000UL;
+
+	sprintf(pbm->name,
+		(chip_type == PBM_CHIP_TYPE_TOMATILLO ?
+		 "TOMATILLO%d PBM%c" :
+		 "SCHIZO%d PBM%c"),
+		p->index,
+		(pbm == &p->pbm_A ? 'A' : 'B'));
+
+	printk("%s: ver[%x:%x], portid %x, "
+	       "cregs[%lx] pregs[%lx]\n",
+	       pbm->name,
+	       pbm->chip_version, pbm->chip_revision,
+	       pbm->portid,
+	       pbm->controller_regs,
+	       pbm->pbm_regs);
+
+	schizo_pbm_hw_init(pbm);
+
+	prom_getstring(prom_node, "name",
+		       pbm->prom_name,
+		       sizeof(pbm->prom_name));
+
+	err = prom_getproperty(prom_node, "ranges",
+			       (char *) pbm->pbm_ranges,
+			       sizeof(pbm->pbm_ranges));
+	if (err == 0 || err == -1) {
+		prom_printf("%s: Fatal error, no ranges property.\n",
+			    pbm->name);
+		prom_halt();
+	}
+
+	pbm->num_pbm_ranges =
+		(err / sizeof(struct linux_prom_pci_ranges));
+
+	schizo_determine_mem_io_space(pbm);
+	pbm_register_toplevel_resources(p, pbm);
+
+	err = prom_getproperty(prom_node, "interrupt-map",
+			       (char *)pbm->pbm_intmap,
+			       sizeof(pbm->pbm_intmap));
+	if (err != -1) {
+		pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
+		err = prom_getproperty(prom_node, "interrupt-map-mask",
+				       (char *)&pbm->pbm_intmask,
+				       sizeof(pbm->pbm_intmask));
+		if (err == -1) {
+			prom_printf("%s: Fatal error, no "
+				    "interrupt-map-mask.\n", pbm->name);
+			prom_halt();
+		}
+	} else {
+		pbm->num_pbm_intmap = 0;
+		memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
+	}
+
+	err = prom_getproperty(prom_node, "ino-bitmap",
+			       (char *) &ino_bitmap[0],
+			       sizeof(ino_bitmap));
+	if (err == 0 || err == -1) {
+		prom_printf("%s: Fatal error, no ino-bitmap.\n", pbm->name);
+		prom_halt();
+	}
+	pbm->ino_bitmap = (((u64)ino_bitmap[1] << 32UL) |
+			   ((u64)ino_bitmap[0] <<  0UL));
+
+	err = prom_getproperty(prom_node, "bus-range",
+			       (char *)&busrange[0],
+			       sizeof(busrange));
+	if (err == 0 || err == -1) {
+		prom_printf("%s: Fatal error, no bus-range.\n", pbm->name);
+		prom_halt();
+	}
+	pbm->pci_first_busno = busrange[0];
+	pbm->pci_last_busno = busrange[1];
+
+	schizo_pbm_iommu_init(pbm);
+	schizo_pbm_strbuf_init(pbm);
+}
+
+static inline int portid_compare(u32 x, u32 y, int chip_type)
+{
+	if (chip_type == PBM_CHIP_TYPE_TOMATILLO) {
+		if (x == (y ^ 1))
+			return 1;
+		return 0;
+	}
+	return (x == y);
+}
+
+static void __init __schizo_init(int node, char *model_name, int chip_type)
+{
+	struct pci_controller_info *p;
+	struct pci_iommu *iommu;
+	int is_pbm_a;
+	u32 portid;
+
+	portid = prom_getintdefault(node, "portid", 0xff);
+
+	for(p = pci_controller_root; p; p = p->next) {
+		struct pci_pbm_info *pbm;
+
+		if (p->pbm_A.prom_node && p->pbm_B.prom_node)
+			continue;
+
+		pbm = (p->pbm_A.prom_node ?
+		       &p->pbm_A :
+		       &p->pbm_B);
+
+		if (portid_compare(pbm->portid, portid, chip_type)) {
+			is_pbm_a = (p->pbm_A.prom_node == 0);
+			schizo_pbm_init(p, node, portid, chip_type);
+			return;
+		}
+	}
+
+	p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
+	if (!p) {
+		prom_printf("SCHIZO: Fatal memory allocation error.\n");
+		prom_halt();
+	}
+	memset(p, 0, sizeof(*p));
+
+	iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
+	if (!iommu) {
+		prom_printf("SCHIZO: Fatal memory allocation error.\n");
+		prom_halt();
+	}
+	memset(iommu, 0, sizeof(*iommu));
+	p->pbm_A.iommu = iommu;
+
+	iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
+	if (!iommu) {
+		prom_printf("SCHIZO: Fatal memory allocation error.\n");
+		prom_halt();
+	}
+	memset(iommu, 0, sizeof(*iommu));
+	p->pbm_B.iommu = iommu;
+
+	p->next = pci_controller_root;
+	pci_controller_root = p;
+
+	p->index = pci_num_controllers++;
+	p->pbms_same_domain = 0;
+	p->scan_bus = (chip_type == PBM_CHIP_TYPE_TOMATILLO ?
+		       tomatillo_scan_bus :
+		       schizo_scan_bus);
+	p->irq_build = schizo_irq_build;
+	p->base_address_update = schizo_base_address_update;
+	p->resource_adjust = schizo_resource_adjust;
+	p->pci_ops = &schizo_ops;
+
+	/* Like PSYCHO we have a 2GB aligned area for memory space. */
+	pci_memspace_mask = 0x7fffffffUL;
+
+	schizo_pbm_init(p, node, portid, chip_type);
+}
+
+void __init schizo_init(int node, char *model_name)
+{
+	__schizo_init(node, model_name, PBM_CHIP_TYPE_SCHIZO);
+}
+
+void __init schizo_plus_init(int node, char *model_name)
+{
+	__schizo_init(node, model_name, PBM_CHIP_TYPE_SCHIZO_PLUS);
+}
+
+void __init tomatillo_init(int node, char *model_name)
+{
+	__schizo_init(node, model_name, PBM_CHIP_TYPE_TOMATILLO);
+}
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c
new file mode 100644
index 0000000..52f14e3
--- /dev/null
+++ b/arch/sparc64/kernel/power.c
@@ -0,0 +1,150 @@
+/* $Id: power.c,v 1.10 2001/12/11 01:57:16 davem Exp $
+ * power.c: Power management driver.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include <asm/system.h>
+#include <asm/ebus.h>
+#include <asm/auxio.h>
+
+#define __KERNEL_SYSCALLS__
+#include <linux/unistd.h>
+
+/*
+ * sysctl - toggle power-off restriction for serial console 
+ * systems in machine_power_off()
+ */
+int scons_pwroff = 1; 
+
+#ifdef CONFIG_PCI
+static void __iomem *power_reg;
+
+static DECLARE_WAIT_QUEUE_HEAD(powerd_wait);
+static int button_pressed;
+
+static irqreturn_t power_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+	if (button_pressed == 0) {
+		button_pressed = 1;
+		wake_up(&powerd_wait);
+	}
+
+	/* FIXME: Check registers for status... */
+	return IRQ_HANDLED;
+}
+#endif /* CONFIG_PCI */
+
+extern void machine_halt(void);
+extern void machine_alt_power_off(void);
+static void (*poweroff_method)(void) = machine_alt_power_off;
+
+void machine_power_off(void)
+{
+	if (!serial_console || scons_pwroff) {
+#ifdef CONFIG_PCI
+		if (power_reg) {
+			/* Both register bits seem to have the
+			 * same effect, so until I figure out
+			 * what the difference is...
+			 */
+			writel(AUXIO_PCIO_CPWR_OFF | AUXIO_PCIO_SPWR_OFF, power_reg);
+		} else
+#endif /* CONFIG_PCI */
+			if (poweroff_method != NULL) {
+				poweroff_method();
+				/* not reached */
+			}
+	}
+	machine_halt();
+}
+
+EXPORT_SYMBOL(machine_power_off);
+
+#ifdef CONFIG_PCI
+static int powerd(void *__unused)
+{
+	static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
+	char *argv[] = { "/sbin/shutdown", "-h", "now", NULL };
+	DECLARE_WAITQUEUE(wait, current);
+
+	daemonize("powerd");
+
+	add_wait_queue(&powerd_wait, &wait);
+again:
+	for (;;) {
+		set_task_state(current, TASK_INTERRUPTIBLE);
+		if (button_pressed)
+			break;
+		flush_signals(current);
+		schedule();
+	}
+	__set_current_state(TASK_RUNNING);
+	remove_wait_queue(&powerd_wait, &wait);
+
+	/* Ok, down we go... */
+	button_pressed = 0;
+	if (execve("/sbin/shutdown", argv, envp) < 0) {
+		printk("powerd: shutdown execution failed\n");
+		add_wait_queue(&powerd_wait, &wait);
+		goto again;
+	}
+	return 0;
+}
+
+static int __init has_button_interrupt(struct linux_ebus_device *edev)
+{
+	if (edev->irqs[0] == PCI_IRQ_NONE)
+		return 0;
+	if (!prom_node_has_property(edev->prom_node, "button"))
+		return 0;
+
+	return 1;
+}
+
+void __init power_init(void)
+{
+	struct linux_ebus *ebus;
+	struct linux_ebus_device *edev;
+	static int invoked;
+
+	if (invoked)
+		return;
+	invoked = 1;
+
+	for_each_ebus(ebus) {
+		for_each_ebusdev(edev, ebus) {
+			if (!strcmp(edev->prom_name, "power"))
+				goto found;
+		}
+	}
+	return;
+
+found:
+	power_reg = ioremap(edev->resource[0].start, 0x4);
+	printk("power: Control reg at %p ... ", power_reg);
+	poweroff_method = machine_halt;  /* able to use the standard halt */
+	if (has_button_interrupt(edev)) {
+		if (kernel_thread(powerd, NULL, CLONE_FS) < 0) {
+			printk("Failed to start power daemon.\n");
+			return;
+		}
+		printk("powerd running.\n");
+
+		if (request_irq(edev->irqs[0],
+				power_handler, SA_SHIRQ, "power", NULL) < 0)
+			printk("power: Error, cannot register IRQ handler.\n");
+	} else {
+		printk("not using powerd.\n");
+	}
+}
+#endif /* CONFIG_PCI */
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
new file mode 100644
index 0000000..26d3ec4
--- /dev/null
+++ b/arch/sparc64/kernel/process.c
@@ -0,0 +1,869 @@
+/*  $Id: process.c,v 1.131 2002/02/09 19:49:30 davem Exp $
+ *  arch/sparc64/kernel/process.c
+ *
+ *  Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
+ *  Copyright (C) 1996       Eddie C. Dost   (ecd@skynet.be)
+ *  Copyright (C) 1997, 1998 Jakub Jelinek   (jj@sunsite.mff.cuni.cz)
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#include <stdarg.h>
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/kallsyms.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/config.h>
+#include <linux/reboot.h>
+#include <linux/delay.h>
+#include <linux/compat.h>
+#include <linux/init.h>
+
+#include <asm/oplib.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/pstate.h>
+#include <asm/elf.h>
+#include <asm/fpumacro.h>
+#include <asm/head.h>
+#include <asm/cpudata.h>
+#include <asm/unistd.h>
+
+/* #define VERBOSE_SHOWREGS */
+
+/*
+ * Nothing special yet...
+ */
+void default_idle(void)
+{
+}
+
+#ifndef CONFIG_SMP
+
+/*
+ * the idle loop on a Sparc... ;)
+ */
+void cpu_idle(void)
+{
+	if (current->pid != 0)
+		return;
+
+	/* endless idle loop with no priority at all */
+	for (;;) {
+		/* If current->work.need_resched is zero we should really
+		 * setup for a system wakup event and execute a shutdown
+		 * instruction.
+		 *
+		 * But this requires writing back the contents of the
+		 * L2 cache etc. so implement this later. -DaveM
+		 */
+		while (!need_resched())
+			barrier();
+
+		schedule();
+		check_pgt_cache();
+	}
+	return;
+}
+
+#else
+
+/*
+ * the idle loop on a UltraMultiPenguin...
+ */
+#define idle_me_harder()	(cpu_data(smp_processor_id()).idle_volume += 1)
+#define unidle_me()		(cpu_data(smp_processor_id()).idle_volume = 0)
+void cpu_idle(void)
+{
+	set_thread_flag(TIF_POLLING_NRFLAG);
+	while(1) {
+		if (need_resched()) {
+			unidle_me();
+			clear_thread_flag(TIF_POLLING_NRFLAG);
+			schedule();
+			set_thread_flag(TIF_POLLING_NRFLAG);
+			check_pgt_cache();
+		}
+		idle_me_harder();
+
+		/* The store ordering is so that IRQ handlers on
+		 * other cpus see our increasing idleness for the buddy
+		 * redistribution algorithm.  -DaveM
+		 */
+		membar("#StoreStore | #StoreLoad");
+	}
+}
+
+#endif
+
+extern char reboot_command [];
+
+extern void (*prom_palette)(int);
+extern void (*prom_keyboard)(void);
+
+void machine_halt(void)
+{
+	if (!serial_console && prom_palette)
+		prom_palette (1);
+	if (prom_keyboard)
+		prom_keyboard();
+	prom_halt();
+	panic("Halt failed!");
+}
+
+EXPORT_SYMBOL(machine_halt);
+
+void machine_alt_power_off(void)
+{
+	if (!serial_console && prom_palette)
+		prom_palette(1);
+	if (prom_keyboard)
+		prom_keyboard();
+	prom_halt_power_off();
+	panic("Power-off failed!");
+}
+
+void machine_restart(char * cmd)
+{
+	char *p;
+	
+	p = strchr (reboot_command, '\n');
+	if (p) *p = 0;
+	if (!serial_console && prom_palette)
+		prom_palette (1);
+	if (prom_keyboard)
+		prom_keyboard();
+	if (cmd)
+		prom_reboot(cmd);
+	if (*reboot_command)
+		prom_reboot(reboot_command);
+	prom_reboot("");
+	panic("Reboot failed!");
+}
+
+EXPORT_SYMBOL(machine_restart);
+
+static void show_regwindow32(struct pt_regs *regs)
+{
+	struct reg_window32 __user *rw;
+	struct reg_window32 r_w;
+	mm_segment_t old_fs;
+	
+	__asm__ __volatile__ ("flushw");
+	rw = compat_ptr((unsigned)regs->u_regs[14]);
+	old_fs = get_fs();
+	set_fs (USER_DS);
+	if (copy_from_user (&r_w, rw, sizeof(r_w))) {
+		set_fs (old_fs);
+		return;
+	}
+
+	set_fs (old_fs);			
+	printk("l0: %08x l1: %08x l2: %08x l3: %08x "
+	       "l4: %08x l5: %08x l6: %08x l7: %08x\n",
+	       r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
+	       r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]);
+	printk("i0: %08x i1: %08x i2: %08x i3: %08x "
+	       "i4: %08x i5: %08x i6: %08x i7: %08x\n",
+	       r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
+	       r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
+}
+
+static void show_regwindow(struct pt_regs *regs)
+{
+	struct reg_window __user *rw;
+	struct reg_window *rwk;
+	struct reg_window r_w;
+	mm_segment_t old_fs;
+
+	if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
+		__asm__ __volatile__ ("flushw");
+		rw = (struct reg_window __user *)
+			(regs->u_regs[14] + STACK_BIAS);
+		rwk = (struct reg_window *)
+			(regs->u_regs[14] + STACK_BIAS);
+		if (!(regs->tstate & TSTATE_PRIV)) {
+			old_fs = get_fs();
+			set_fs (USER_DS);
+			if (copy_from_user (&r_w, rw, sizeof(r_w))) {
+				set_fs (old_fs);
+				return;
+			}
+			rwk = &r_w;
+			set_fs (old_fs);			
+		}
+	} else {
+		show_regwindow32(regs);
+		return;
+	}
+	printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
+	       rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]);
+	printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
+	       rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]);
+	printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
+	       rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]);
+	printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
+	       rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
+	if (regs->tstate & TSTATE_PRIV)
+		print_symbol("I7: <%s>\n", rwk->ins[7]);
+}
+
+void show_stackframe(struct sparc_stackf *sf)
+{
+	unsigned long size;
+	unsigned long *stk;
+	int i;
+
+	printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n"
+	       "l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
+	       sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3],
+	       sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
+	printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n"
+	       "i4: %016lx i5: %016lx fp: %016lx ret_pc: %016lx\n",
+	       sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3],
+	       sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc);
+	printk("sp: %016lx x0: %016lx x1: %016lx x2: %016lx\n"
+	       "x3: %016lx x4: %016lx x5: %016lx xx: %016lx\n",
+	       (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1],
+	       sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
+	       sf->xxargs[0]);
+	size = ((unsigned long)sf->fp) - ((unsigned long)sf);
+	size -= STACKFRAME_SZ;
+	stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ);
+	i = 0;
+	do {
+		printk("s%d: %016lx\n", i++, *stk++);
+	} while ((size -= sizeof(unsigned long)));
+}
+
+void show_stackframe32(struct sparc_stackf32 *sf)
+{
+	unsigned long size;
+	unsigned *stk;
+	int i;
+
+	printk("l0: %08x l1: %08x l2: %08x l3: %08x\n",
+	       sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3]);
+	printk("l4: %08x l5: %08x l6: %08x l7: %08x\n",
+	       sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
+	printk("i0: %08x i1: %08x i2: %08x i3: %08x\n",
+	       sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3]);
+	printk("i4: %08x i5: %08x fp: %08x ret_pc: %08x\n",
+	       sf->ins[4], sf->ins[5], sf->fp, sf->callers_pc);
+	printk("sp: %08x x0: %08x x1: %08x x2: %08x\n"
+	       "x3: %08x x4: %08x x5: %08x xx: %08x\n",
+	       sf->structptr, sf->xargs[0], sf->xargs[1],
+	       sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
+	       sf->xxargs[0]);
+	size = ((unsigned long)sf->fp) - ((unsigned long)sf);
+	size -= STACKFRAME32_SZ;
+	stk = (unsigned *)((unsigned long)sf + STACKFRAME32_SZ);
+	i = 0;
+	do {
+		printk("s%d: %08x\n", i++, *stk++);
+	} while ((size -= sizeof(unsigned)));
+}
+
+#ifdef CONFIG_SMP
+static DEFINE_SPINLOCK(regdump_lock);
+#endif
+
+void __show_regs(struct pt_regs * regs)
+{
+#ifdef CONFIG_SMP
+	unsigned long flags;
+
+	/* Protect against xcall ipis which might lead to livelock on the lock */
+	__asm__ __volatile__("rdpr      %%pstate, %0\n\t"
+			     "wrpr      %0, %1, %%pstate"
+			     : "=r" (flags)
+			     : "i" (PSTATE_IE));
+	spin_lock(&regdump_lock);
+#endif
+	printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x    %s\n", regs->tstate,
+	       regs->tpc, regs->tnpc, regs->y, print_tainted());
+	print_symbol("TPC: <%s>\n", regs->tpc);
+	printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
+	       regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
+	       regs->u_regs[3]);
+	printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
+	       regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
+	       regs->u_regs[7]);
+	printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
+	       regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
+	       regs->u_regs[11]);
+	printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
+	       regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
+	       regs->u_regs[15]);
+	print_symbol("RPC: <%s>\n", regs->u_regs[15]);
+	show_regwindow(regs);
+#ifdef CONFIG_SMP
+	spin_unlock(&regdump_lock);
+	__asm__ __volatile__("wrpr	%0, 0, %%pstate"
+			     : : "r" (flags));
+#endif
+}
+
+#ifdef VERBOSE_SHOWREGS
+static void idump_from_user (unsigned int *pc)
+{
+	int i;
+	int code;
+	
+	if((((unsigned long) pc) & 3))
+		return;
+	
+	pc -= 3;
+	for(i = -3; i < 6; i++) {
+		get_user(code, pc);
+		printk("%c%08x%c",i?' ':'<',code,i?' ':'>');
+		pc++;
+	}
+	printk("\n");
+}
+#endif
+
+void show_regs(struct pt_regs *regs)
+{
+#ifdef VERBOSE_SHOWREGS
+	extern long etrap, etraptl1;
+#endif
+	__show_regs(regs);
+#ifdef CONFIG_SMP
+	{
+		extern void smp_report_regs(void);
+
+		smp_report_regs();
+	}
+#endif
+
+#ifdef VERBOSE_SHOWREGS	
+	if (regs->tpc >= &etrap && regs->tpc < &etraptl1 &&
+	    regs->u_regs[14] >= (long)current - PAGE_SIZE &&
+	    regs->u_regs[14] < (long)current + 6 * PAGE_SIZE) {
+		printk ("*********parent**********\n");
+		__show_regs((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF));
+		idump_from_user(((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF))->tpc);
+		printk ("*********endpar**********\n");
+	}
+#endif
+}
+
+void show_regs32(struct pt_regs32 *regs)
+{
+	printk("PSR: %08x PC: %08x NPC: %08x Y: %08x    %s\n", regs->psr,
+	       regs->pc, regs->npc, regs->y, print_tainted());
+	printk("g0: %08x g1: %08x g2: %08x g3: %08x ",
+	       regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
+	       regs->u_regs[3]);
+	printk("g4: %08x g5: %08x g6: %08x g7: %08x\n",
+	       regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
+	       regs->u_regs[7]);
+	printk("o0: %08x o1: %08x o2: %08x o3: %08x ",
+	       regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
+	       regs->u_regs[11]);
+	printk("o4: %08x o5: %08x sp: %08x ret_pc: %08x\n",
+	       regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
+	       regs->u_regs[15]);
+}
+
+unsigned long thread_saved_pc(struct task_struct *tsk)
+{
+	struct thread_info *ti = tsk->thread_info;
+	unsigned long ret = 0xdeadbeefUL;
+	
+	if (ti && ti->ksp) {
+		unsigned long *sp;
+		sp = (unsigned long *)(ti->ksp + STACK_BIAS);
+		if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
+		    sp[14]) {
+			unsigned long *fp;
+			fp = (unsigned long *)(sp[14] + STACK_BIAS);
+			if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
+				ret = fp[15];
+		}
+	}
+	return ret;
+}
+
+/* Free current thread data structures etc.. */
+void exit_thread(void)
+{
+	struct thread_info *t = current_thread_info();
+
+	if (t->utraps) {
+		if (t->utraps[0] < 2)
+			kfree (t->utraps);
+		else
+			t->utraps[0]--;
+	}
+
+	if (test_and_clear_thread_flag(TIF_PERFCTR)) {
+		t->user_cntd0 = t->user_cntd1 = NULL;
+		t->pcr_reg = 0;
+		write_pcr(0);
+	}
+}
+
+void flush_thread(void)
+{
+	struct thread_info *t = current_thread_info();
+
+	if (t->flags & _TIF_ABI_PENDING)
+		t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
+
+	if (t->task->mm) {
+		unsigned long pgd_cache = 0UL;
+		if (test_thread_flag(TIF_32BIT)) {
+			struct mm_struct *mm = t->task->mm;
+			pgd_t *pgd0 = &mm->pgd[0];
+			pud_t *pud0 = pud_offset(pgd0, 0);
+
+			if (pud_none(*pud0)) {
+				pmd_t *page = pmd_alloc_one(mm, 0);
+				pud_set(pud0, page);
+			}
+			pgd_cache = get_pgd_cache(pgd0);
+		}
+		__asm__ __volatile__("stxa %0, [%1] %2\n\t"
+				     "membar #Sync"
+				     : /* no outputs */
+				     : "r" (pgd_cache),
+				     "r" (TSB_REG),
+				     "i" (ASI_DMMU));
+	}
+	set_thread_wsaved(0);
+
+	/* Turn off performance counters if on. */
+	if (test_and_clear_thread_flag(TIF_PERFCTR)) {
+		t->user_cntd0 = t->user_cntd1 = NULL;
+		t->pcr_reg = 0;
+		write_pcr(0);
+	}
+
+	/* Clear FPU register state. */
+	t->fpsaved[0] = 0;
+	
+	if (get_thread_current_ds() != ASI_AIUS)
+		set_fs(USER_DS);
+
+	/* Init new signal delivery disposition. */
+	clear_thread_flag(TIF_NEWSIGNALS);
+}
+
+/* It's a bit more tricky when 64-bit tasks are involved... */
+static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
+{
+	unsigned long fp, distance, rval;
+
+	if (!(test_thread_flag(TIF_32BIT))) {
+		csp += STACK_BIAS;
+		psp += STACK_BIAS;
+		__get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
+		fp += STACK_BIAS;
+	} else
+		__get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
+
+	/* Now 8-byte align the stack as this is mandatory in the
+	 * Sparc ABI due to how register windows work.  This hides
+	 * the restriction from thread libraries etc.  -DaveM
+	 */
+	csp &= ~7UL;
+
+	distance = fp - psp;
+	rval = (csp - distance);
+	if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
+		rval = 0;
+	else if (test_thread_flag(TIF_32BIT)) {
+		if (put_user(((u32)csp),
+			     &(((struct reg_window32 __user *)rval)->ins[6])))
+			rval = 0;
+	} else {
+		if (put_user(((u64)csp - STACK_BIAS),
+			     &(((struct reg_window __user *)rval)->ins[6])))
+			rval = 0;
+		else
+			rval = rval - STACK_BIAS;
+	}
+
+	return rval;
+}
+
+/* Standard stuff. */
+static inline void shift_window_buffer(int first_win, int last_win,
+				       struct thread_info *t)
+{
+	int i;
+
+	for (i = first_win; i < last_win; i++) {
+		t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
+		memcpy(&t->reg_window[i], &t->reg_window[i+1],
+		       sizeof(struct reg_window));
+	}
+}
+
+void synchronize_user_stack(void)
+{
+	struct thread_info *t = current_thread_info();
+	unsigned long window;
+
+	flush_user_windows();
+	if ((window = get_thread_wsaved()) != 0) {
+		int winsize = sizeof(struct reg_window);
+		int bias = 0;
+
+		if (test_thread_flag(TIF_32BIT))
+			winsize = sizeof(struct reg_window32);
+		else
+			bias = STACK_BIAS;
+
+		window -= 1;
+		do {
+			unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
+			struct reg_window *rwin = &t->reg_window[window];
+
+			if (!copy_to_user((char __user *)sp, rwin, winsize)) {
+				shift_window_buffer(window, get_thread_wsaved() - 1, t);
+				set_thread_wsaved(get_thread_wsaved() - 1);
+			}
+		} while (window--);
+	}
+}
+
+void fault_in_user_windows(void)
+{
+	struct thread_info *t = current_thread_info();
+	unsigned long window;
+	int winsize = sizeof(struct reg_window);
+	int bias = 0;
+
+	if (test_thread_flag(TIF_32BIT))
+		winsize = sizeof(struct reg_window32);
+	else
+		bias = STACK_BIAS;
+
+	flush_user_windows();
+	window = get_thread_wsaved();
+
+	if (window != 0) {
+		window -= 1;
+		do {
+			unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
+			struct reg_window *rwin = &t->reg_window[window];
+
+			if (copy_to_user((char __user *)sp, rwin, winsize))
+				goto barf;
+		} while (window--);
+	}
+	set_thread_wsaved(0);
+	return;
+
+barf:
+	set_thread_wsaved(window + 1);
+	do_exit(SIGILL);
+}
+
+asmlinkage long sparc_do_fork(unsigned long clone_flags,
+			      unsigned long stack_start,
+			      struct pt_regs *regs,
+			      unsigned long stack_size)
+{
+	int __user *parent_tid_ptr, *child_tid_ptr;
+
+#ifdef CONFIG_COMPAT
+	if (test_thread_flag(TIF_32BIT)) {
+		parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]);
+		child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]);
+	} else
+#endif
+	{
+		parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2];
+		child_tid_ptr = (int __user *) regs->u_regs[UREG_I4];
+	}
+
+	return do_fork(clone_flags, stack_start,
+		       regs, stack_size,
+		       parent_tid_ptr, child_tid_ptr);
+}
+
+/* Copy a Sparc thread.  The fork() return value conventions
+ * under SunOS are nothing short of bletcherous:
+ * Parent -->  %o0 == childs  pid, %o1 == 0
+ * Child  -->  %o0 == parents pid, %o1 == 1
+ */
+int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
+		unsigned long unused,
+		struct task_struct *p, struct pt_regs *regs)
+{
+	struct thread_info *t = p->thread_info;
+	char *child_trap_frame;
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+	p->thread.smp_lock_count = 0;
+	p->thread.smp_lock_pc = 0;
+#endif
+
+	/* Calculate offset to stack_frame & pt_regs */
+	child_trap_frame = ((char *)t) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ));
+	memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ));
+
+	t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) |
+		_TIF_NEWCHILD |
+		(((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT);
+	t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
+	t->kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct sparc_stackf));
+	t->fpsaved[0] = 0;
+
+	if (regs->tstate & TSTATE_PRIV) {
+		/* Special case, if we are spawning a kernel thread from
+		 * a userspace task (via KMOD, NFS, or similar) we must
+		 * disable performance counters in the child because the
+		 * address space and protection realm are changing.
+		 */
+		if (t->flags & _TIF_PERFCTR) {
+			t->user_cntd0 = t->user_cntd1 = NULL;
+			t->pcr_reg = 0;
+			t->flags &= ~_TIF_PERFCTR;
+		}
+		t->kregs->u_regs[UREG_FP] = t->ksp;
+		t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT);
+		flush_register_windows();
+		memcpy((void *)(t->ksp + STACK_BIAS),
+		       (void *)(regs->u_regs[UREG_FP] + STACK_BIAS),
+		       sizeof(struct sparc_stackf));
+		t->kregs->u_regs[UREG_G6] = (unsigned long) t;
+		t->kregs->u_regs[UREG_G4] = (unsigned long) t->task;
+	} else {
+		if (t->flags & _TIF_32BIT) {
+			sp &= 0x00000000ffffffffUL;
+			regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
+		}
+		t->kregs->u_regs[UREG_FP] = sp;
+		t->flags |= ((long)ASI_AIUS << TI_FLAG_CURRENT_DS_SHIFT);
+		if (sp != regs->u_regs[UREG_FP]) {
+			unsigned long csp;
+
+			csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
+			if (!csp)
+				return -EFAULT;
+			t->kregs->u_regs[UREG_FP] = csp;
+		}
+		if (t->utraps)
+			t->utraps[0]++;
+	}
+
+	/* Set the return value for the child. */
+	t->kregs->u_regs[UREG_I0] = current->pid;
+	t->kregs->u_regs[UREG_I1] = 1;
+
+	/* Set the second return value for the parent. */
+	regs->u_regs[UREG_I1] = 0;
+
+	if (clone_flags & CLONE_SETTLS)
+		t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
+
+	return 0;
+}
+
+/*
+ * This is the mechanism for creating a new kernel thread.
+ *
+ * NOTE! Only a kernel-only process(ie the swapper or direct descendants
+ * who haven't done an "execve()") should use this: it will work within
+ * a system call from a "real" process, but the process memory space will
+ * not be free'd until both the parent and the child have exited.
+ */
+pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+	long retval;
+
+	/* If the parent runs before fn(arg) is called by the child,
+	 * the input registers of this function can be clobbered.
+	 * So we stash 'fn' and 'arg' into global registers which
+	 * will not be modified by the parent.
+	 */
+	__asm__ __volatile__("mov %4, %%g2\n\t"	   /* Save FN into global */
+			     "mov %5, %%g3\n\t"	   /* Save ARG into global */
+			     "mov %1, %%g1\n\t"	   /* Clone syscall nr. */
+			     "mov %2, %%o0\n\t"	   /* Clone flags. */
+			     "mov 0, %%o1\n\t"	   /* usp arg == 0 */
+			     "t 0x6d\n\t"	   /* Linux/Sparc clone(). */
+			     "brz,a,pn %%o1, 1f\n\t" /* Parent, just return. */
+			     " mov %%o0, %0\n\t"
+			     "jmpl %%g2, %%o7\n\t"   /* Call the function. */
+			     " mov %%g3, %%o0\n\t"   /* Set arg in delay. */
+			     "mov %3, %%g1\n\t"
+			     "t 0x6d\n\t"	   /* Linux/Sparc exit(). */
+			     /* Notreached by child. */
+			     "1:" :
+			     "=r" (retval) :
+			     "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED),
+			     "i" (__NR_exit),  "r" (fn), "r" (arg) :
+			     "g1", "g2", "g3", "o0", "o1", "memory", "cc");
+	return retval;
+}
+
+/*
+ * fill in the user structure for a core dump..
+ */
+void dump_thread(struct pt_regs * regs, struct user * dump)
+{
+	/* Only should be used for SunOS and ancient a.out
+	 * SparcLinux binaries...  Not worth implementing.
+	 */
+	memset(dump, 0, sizeof(struct user));
+}
+
+typedef struct {
+	union {
+		unsigned int	pr_regs[32];
+		unsigned long	pr_dregs[16];
+	} pr_fr;
+	unsigned int __unused;
+	unsigned int	pr_fsr;
+	unsigned char	pr_qcnt;
+	unsigned char	pr_q_entrysize;
+	unsigned char	pr_en;
+	unsigned int	pr_q[64];
+} elf_fpregset_t32;
+
+/*
+ * fill in the fpu structure for a core dump.
+ */
+int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
+{
+	unsigned long *kfpregs = current_thread_info()->fpregs;
+	unsigned long fprs = current_thread_info()->fpsaved[0];
+
+	if (test_thread_flag(TIF_32BIT)) {
+		elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs;
+
+		if (fprs & FPRS_DL)
+			memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs,
+			       sizeof(unsigned int) * 32);
+		else
+			memset(&fpregs32->pr_fr.pr_regs[0], 0,
+			       sizeof(unsigned int) * 32);
+		fpregs32->pr_qcnt = 0;
+		fpregs32->pr_q_entrysize = 8;
+		memset(&fpregs32->pr_q[0], 0,
+		       (sizeof(unsigned int) * 64));
+		if (fprs & FPRS_FEF) {
+			fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0];
+			fpregs32->pr_en = 1;
+		} else {
+			fpregs32->pr_fsr = 0;
+			fpregs32->pr_en = 0;
+		}
+	} else {
+		if(fprs & FPRS_DL)
+			memcpy(&fpregs->pr_regs[0], kfpregs,
+			       sizeof(unsigned int) * 32);
+		else
+			memset(&fpregs->pr_regs[0], 0,
+			       sizeof(unsigned int) * 32);
+		if(fprs & FPRS_DU)
+			memcpy(&fpregs->pr_regs[16], kfpregs+16,
+			       sizeof(unsigned int) * 32);
+		else
+			memset(&fpregs->pr_regs[16], 0,
+			       sizeof(unsigned int) * 32);
+		if(fprs & FPRS_FEF) {
+			fpregs->pr_fsr = current_thread_info()->xfsr[0];
+			fpregs->pr_gsr = current_thread_info()->gsr[0];
+		} else {
+			fpregs->pr_fsr = fpregs->pr_gsr = 0;
+		}
+		fpregs->pr_fprs = fprs;
+	}
+	return 1;
+}
+
+/*
+ * sparc_execve() executes a new program after the asm stub has set
+ * things up for us.  This should basically do what I want it to.
+ */
+asmlinkage int sparc_execve(struct pt_regs *regs)
+{
+	int error, base = 0;
+	char *filename;
+
+	/* User register window flush is done by entry.S */
+
+	/* Check for indirect call. */
+	if (regs->u_regs[UREG_G1] == 0)
+		base = 1;
+
+	filename = getname((char __user *)regs->u_regs[base + UREG_I0]);
+	error = PTR_ERR(filename);
+	if (IS_ERR(filename))
+		goto out;
+	error = do_execve(filename,
+			  (char __user * __user *)
+			  regs->u_regs[base + UREG_I1],
+			  (char __user * __user *)
+			  regs->u_regs[base + UREG_I2], regs);
+	putname(filename);
+	if (!error) {
+		fprs_write(0);
+		current_thread_info()->xfsr[0] = 0;
+		current_thread_info()->fpsaved[0] = 0;
+		regs->tstate &= ~TSTATE_PEF;
+		task_lock(current);
+		current->ptrace &= ~PT_DTRACE;
+		task_unlock(current);
+	}
+out:
+	return error;
+}
+
+unsigned long get_wchan(struct task_struct *task)
+{
+	unsigned long pc, fp, bias = 0;
+	unsigned long thread_info_base;
+	struct reg_window *rw;
+        unsigned long ret = 0;
+	int count = 0; 
+
+	if (!task || task == current ||
+            task->state == TASK_RUNNING)
+		goto out;
+
+	thread_info_base = (unsigned long) task->thread_info;
+	bias = STACK_BIAS;
+	fp = task->thread_info->ksp + bias;
+
+	do {
+		/* Bogus frame pointer? */
+		if (fp < (thread_info_base + sizeof(struct thread_info)) ||
+		    fp >= (thread_info_base + THREAD_SIZE))
+			break;
+		rw = (struct reg_window *) fp;
+		pc = rw->ins[7];
+		if (!in_sched_functions(pc)) {
+			ret = pc;
+			goto out;
+		}
+		fp = rw->ins[6] + bias;
+	} while (++count < 16);
+
+out:
+	return ret;
+}
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
new file mode 100644
index 0000000..1722dc5
--- /dev/null
+++ b/arch/sparc64/kernel/ptrace.c
@@ -0,0 +1,646 @@
+/* ptrace.c: Sparc process tracing support.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ *
+ * Based upon code written by Ross Biro, Linus Torvalds, Bob Manson,
+ * and David Mosberger.
+ *
+ * Added Linux support -miguel (weird, eh?, the original code was meant
+ * to emulate SunOS).
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/security.h>
+
+#include <asm/asi.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/psrcompat.h>
+#include <asm/visasm.h>
+#include <asm/spitfire.h>
+
+/* Returning from ptrace is a bit tricky because the syscall return
+ * low level code assumes any value returned which is negative and
+ * is a valid errno will mean setting the condition codes to indicate
+ * an error return.  This doesn't work, so we have this hook.
+ */
+static inline void pt_error_return(struct pt_regs *regs, unsigned long error)
+{
+	regs->u_regs[UREG_I0] = error;
+	regs->tstate |= (TSTATE_ICARRY | TSTATE_XCARRY);
+	regs->tpc = regs->tnpc;
+	regs->tnpc += 4;
+}
+
+static inline void pt_succ_return(struct pt_regs *regs, unsigned long value)
+{
+	regs->u_regs[UREG_I0] = value;
+	regs->tstate &= ~(TSTATE_ICARRY | TSTATE_XCARRY);
+	regs->tpc = regs->tnpc;
+	regs->tnpc += 4;
+}
+
+static inline void
+pt_succ_return_linux(struct pt_regs *regs, unsigned long value, void __user *addr)
+{
+	if (test_thread_flag(TIF_32BIT)) {
+		if (put_user(value, (unsigned int __user *) addr)) {
+			pt_error_return(regs, EFAULT);
+			return;
+		}
+	} else {
+		if (put_user(value, (long __user *) addr)) {
+			pt_error_return(regs, EFAULT);
+			return;
+		}
+	}
+	regs->u_regs[UREG_I0] = 0;
+	regs->tstate &= ~(TSTATE_ICARRY | TSTATE_XCARRY);
+	regs->tpc = regs->tnpc;
+	regs->tnpc += 4;
+}
+
+static void
+pt_os_succ_return (struct pt_regs *regs, unsigned long val, void __user *addr)
+{
+	if (current->personality == PER_SUNOS)
+		pt_succ_return (regs, val);
+	else
+		pt_succ_return_linux (regs, val, addr);
+}
+
+/* #define ALLOW_INIT_TRACING */
+/* #define DEBUG_PTRACE */
+
+#ifdef DEBUG_PTRACE
+char *pt_rq [] = {
+	/* 0  */ "TRACEME", "PEEKTEXT", "PEEKDATA", "PEEKUSR",
+	/* 4  */ "POKETEXT", "POKEDATA", "POKEUSR", "CONT",
+	/* 8  */ "KILL", "SINGLESTEP", "SUNATTACH", "SUNDETACH",
+	/* 12 */ "GETREGS", "SETREGS", "GETFPREGS", "SETFPREGS",
+	/* 16 */ "READDATA", "WRITEDATA", "READTEXT", "WRITETEXT",
+	/* 20 */ "GETFPAREGS", "SETFPAREGS", "unknown", "unknown",
+	/* 24 */ "SYSCALL", ""
+};
+#endif
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+	/* nothing to do */
+}
+
+asmlinkage void do_ptrace(struct pt_regs *regs)
+{
+	int request = regs->u_regs[UREG_I0];
+	pid_t pid = regs->u_regs[UREG_I1];
+	unsigned long addr = regs->u_regs[UREG_I2];
+	unsigned long data = regs->u_regs[UREG_I3];
+	unsigned long addr2 = regs->u_regs[UREG_I4];
+	struct task_struct *child;
+	int ret;
+
+	if (test_thread_flag(TIF_32BIT)) {
+		addr &= 0xffffffffUL;
+		data &= 0xffffffffUL;
+		addr2 &= 0xffffffffUL;
+	}
+	lock_kernel();
+#ifdef DEBUG_PTRACE
+	{
+		char *s;
+
+		if ((request >= 0) && (request <= 24))
+			s = pt_rq [request];
+		else
+			s = "unknown";
+
+		if (request == PTRACE_POKEDATA && data == 0x91d02001){
+			printk ("do_ptrace: breakpoint pid=%d, addr=%016lx addr2=%016lx\n",
+				pid, addr, addr2);
+		} else 
+			printk("do_ptrace: rq=%s(%d) pid=%d addr=%016lx data=%016lx addr2=%016lx\n",
+			       s, request, pid, addr, data, addr2);
+	}
+#endif
+	if (request == PTRACE_TRACEME) {
+		int ret;
+
+		/* are we already being traced? */
+		if (current->ptrace & PT_PTRACED) {
+			pt_error_return(regs, EPERM);
+			goto out;
+		}
+		ret = security_ptrace(current->parent, current);
+		if (ret) {
+			pt_error_return(regs, -ret);
+			goto out;
+		}
+
+		/* set the ptrace bit in the process flags. */
+		current->ptrace |= PT_PTRACED;
+		pt_succ_return(regs, 0);
+		goto out;
+	}
+#ifndef ALLOW_INIT_TRACING
+	if (pid == 1) {
+		/* Can't dork with init. */
+		pt_error_return(regs, EPERM);
+		goto out;
+	}
+#endif
+	read_lock(&tasklist_lock);
+	child = find_task_by_pid(pid);
+	if (child)
+		get_task_struct(child);
+	read_unlock(&tasklist_lock);
+
+	if (!child) {
+		pt_error_return(regs, ESRCH);
+		goto out;
+	}
+
+	if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
+	    || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
+		if (ptrace_attach(child)) {
+			pt_error_return(regs, EPERM);
+			goto out_tsk;
+		}
+		pt_succ_return(regs, 0);
+		goto out_tsk;
+	}
+
+	ret = ptrace_check_attach(child, request == PTRACE_KILL);
+	if (ret < 0) {
+		pt_error_return(regs, -ret);
+		goto out_tsk;
+	}
+
+	if (!(test_thread_flag(TIF_32BIT))	&&
+	    ((request == PTRACE_READDATA64)		||
+	     (request == PTRACE_WRITEDATA64)		||
+	     (request == PTRACE_READTEXT64)		||
+	     (request == PTRACE_WRITETEXT64)		||
+	     (request == PTRACE_PEEKTEXT64)		||
+	     (request == PTRACE_POKETEXT64)		||
+	     (request == PTRACE_PEEKDATA64)		||
+	     (request == PTRACE_POKEDATA64))) {
+		addr = regs->u_regs[UREG_G2];
+		addr2 = regs->u_regs[UREG_G3];
+		request -= 30; /* wheee... */
+	}
+
+	switch(request) {
+	case PTRACE_PEEKTEXT: /* read word at location addr. */ 
+	case PTRACE_PEEKDATA: {
+		unsigned long tmp64;
+		unsigned int tmp32;
+		int res, copied;
+
+		res = -EIO;
+		if (test_thread_flag(TIF_32BIT)) {
+			copied = access_process_vm(child, addr,
+						   &tmp32, sizeof(tmp32), 0);
+			tmp64 = (unsigned long) tmp32;
+			if (copied == sizeof(tmp32))
+				res = 0;
+		} else {
+			copied = access_process_vm(child, addr,
+						   &tmp64, sizeof(tmp64), 0);
+			if (copied == sizeof(tmp64))
+				res = 0;
+		}
+		if (res < 0)
+			pt_error_return(regs, -res);
+		else
+			pt_os_succ_return(regs, tmp64, (void __user *) data);
+		goto flush_and_out;
+	}
+
+	case PTRACE_POKETEXT: /* write the word at location addr. */
+	case PTRACE_POKEDATA: {
+		unsigned long tmp64;
+		unsigned int tmp32;
+		int copied, res = -EIO;
+
+		if (test_thread_flag(TIF_32BIT)) {
+			tmp32 = data;
+			copied = access_process_vm(child, addr,
+						   &tmp32, sizeof(tmp32), 1);
+			if (copied == sizeof(tmp32))
+				res = 0;
+		} else {
+			tmp64 = data;
+			copied = access_process_vm(child, addr,
+						   &tmp64, sizeof(tmp64), 1);
+			if (copied == sizeof(tmp64))
+				res = 0;
+		}
+		if (res < 0)
+			pt_error_return(regs, -res);
+		else
+			pt_succ_return(regs, res);
+		goto flush_and_out;
+	}
+
+	case PTRACE_GETREGS: {
+		struct pt_regs32 __user *pregs =
+			(struct pt_regs32 __user *) addr;
+		struct pt_regs *cregs = child->thread_info->kregs;
+		int rval;
+
+		if (__put_user(tstate_to_psr(cregs->tstate), (&pregs->psr)) ||
+		    __put_user(cregs->tpc, (&pregs->pc)) ||
+		    __put_user(cregs->tnpc, (&pregs->npc)) ||
+		    __put_user(cregs->y, (&pregs->y))) {
+			pt_error_return(regs, EFAULT);
+			goto out_tsk;
+		}
+		for (rval = 1; rval < 16; rval++)
+			if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
+				pt_error_return(regs, EFAULT);
+				goto out_tsk;
+			}
+		pt_succ_return(regs, 0);
+#ifdef DEBUG_PTRACE
+		printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
+#endif
+		goto out_tsk;
+	}
+
+	case PTRACE_GETREGS64: {
+		struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
+		struct pt_regs *cregs = child->thread_info->kregs;
+		unsigned long tpc = cregs->tpc;
+		int rval;
+
+		if ((child->thread_info->flags & _TIF_32BIT) != 0)
+			tpc &= 0xffffffff;
+		if (__put_user(cregs->tstate, (&pregs->tstate)) ||
+		    __put_user(tpc, (&pregs->tpc)) ||
+		    __put_user(cregs->tnpc, (&pregs->tnpc)) ||
+		    __put_user(cregs->y, (&pregs->y))) {
+			pt_error_return(regs, EFAULT);
+			goto out_tsk;
+		}
+		for (rval = 1; rval < 16; rval++)
+			if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
+				pt_error_return(regs, EFAULT);
+				goto out_tsk;
+			}
+		pt_succ_return(regs, 0);
+#ifdef DEBUG_PTRACE
+		printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
+#endif
+		goto out_tsk;
+	}
+
+	case PTRACE_SETREGS: {
+		struct pt_regs32 __user *pregs =
+			(struct pt_regs32 __user *) addr;
+		struct pt_regs *cregs = child->thread_info->kregs;
+		unsigned int psr, pc, npc, y;
+		int i;
+
+		/* Must be careful, tracing process can only set certain
+		 * bits in the psr.
+		 */
+		if (__get_user(psr, (&pregs->psr)) ||
+		    __get_user(pc, (&pregs->pc)) ||
+		    __get_user(npc, (&pregs->npc)) ||
+		    __get_user(y, (&pregs->y))) {
+			pt_error_return(regs, EFAULT);
+			goto out_tsk;
+		}
+		cregs->tstate &= ~(TSTATE_ICC);
+		cregs->tstate |= psr_to_tstate_icc(psr);
+               	if (!((pc | npc) & 3)) {
+			cregs->tpc = pc;
+			cregs->tnpc = npc;
+		}
+		cregs->y = y;
+		for (i = 1; i < 16; i++) {
+			if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
+				pt_error_return(regs, EFAULT);
+				goto out_tsk;
+			}
+		}
+		pt_succ_return(regs, 0);
+		goto out_tsk;
+	}
+
+	case PTRACE_SETREGS64: {
+		struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
+		struct pt_regs *cregs = child->thread_info->kregs;
+		unsigned long tstate, tpc, tnpc, y;
+		int i;
+
+		/* Must be careful, tracing process can only set certain
+		 * bits in the psr.
+		 */
+		if (__get_user(tstate, (&pregs->tstate)) ||
+		    __get_user(tpc, (&pregs->tpc)) ||
+		    __get_user(tnpc, (&pregs->tnpc)) ||
+		    __get_user(y, (&pregs->y))) {
+			pt_error_return(regs, EFAULT);
+			goto out_tsk;
+		}
+		if ((child->thread_info->flags & _TIF_32BIT) != 0) {
+			tpc &= 0xffffffff;
+			tnpc &= 0xffffffff;
+		}
+		tstate &= (TSTATE_ICC | TSTATE_XCC);
+		cregs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
+		cregs->tstate |= tstate;
+		if (!((tpc | tnpc) & 3)) {
+			cregs->tpc = tpc;
+			cregs->tnpc = tnpc;
+		}
+		cregs->y = y;
+		for (i = 1; i < 16; i++) {
+			if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
+				pt_error_return(regs, EFAULT);
+				goto out_tsk;
+			}
+		}
+		pt_succ_return(regs, 0);
+		goto out_tsk;
+	}
+
+	case PTRACE_GETFPREGS: {
+		struct fps {
+			unsigned int regs[32];
+			unsigned int fsr;
+			unsigned int flags;
+			unsigned int extra;
+			unsigned int fpqd;
+			struct fq {
+				unsigned int insnaddr;
+				unsigned int insn;
+			} fpq[16];
+		};
+		struct fps __user *fps = (struct fps __user *) addr;
+		unsigned long *fpregs = child->thread_info->fpregs;
+
+		if (copy_to_user(&fps->regs[0], fpregs,
+				 (32 * sizeof(unsigned int))) ||
+		    __put_user(child->thread_info->xfsr[0], (&fps->fsr)) ||
+		    __put_user(0, (&fps->fpqd)) ||
+		    __put_user(0, (&fps->flags)) ||
+		    __put_user(0, (&fps->extra)) ||
+		    clear_user(&fps->fpq[0], 32 * sizeof(unsigned int))) {
+			pt_error_return(regs, EFAULT);
+			goto out_tsk;
+		}
+		pt_succ_return(regs, 0);
+		goto out_tsk;
+	}
+
+	case PTRACE_GETFPREGS64: {
+		struct fps {
+			unsigned int regs[64];
+			unsigned long fsr;
+		};
+		struct fps __user *fps = (struct fps __user *) addr;
+		unsigned long *fpregs = child->thread_info->fpregs;
+
+		if (copy_to_user(&fps->regs[0], fpregs,
+				 (64 * sizeof(unsigned int))) ||
+		    __put_user(child->thread_info->xfsr[0], (&fps->fsr))) {
+			pt_error_return(regs, EFAULT);
+			goto out_tsk;
+		}
+		pt_succ_return(regs, 0);
+		goto out_tsk;
+	}
+
+	case PTRACE_SETFPREGS: {
+		struct fps {
+			unsigned int regs[32];
+			unsigned int fsr;
+			unsigned int flags;
+			unsigned int extra;
+			unsigned int fpqd;
+			struct fq {
+				unsigned int insnaddr;
+				unsigned int insn;
+			} fpq[16];
+		};
+		struct fps __user *fps = (struct fps __user *) addr;
+		unsigned long *fpregs = child->thread_info->fpregs;
+		unsigned fsr;
+
+		if (copy_from_user(fpregs, &fps->regs[0],
+				   (32 * sizeof(unsigned int))) ||
+		    __get_user(fsr, (&fps->fsr))) {
+			pt_error_return(regs, EFAULT);
+			goto out_tsk;
+		}
+		child->thread_info->xfsr[0] &= 0xffffffff00000000UL;
+		child->thread_info->xfsr[0] |= fsr;
+		if (!(child->thread_info->fpsaved[0] & FPRS_FEF))
+			child->thread_info->gsr[0] = 0;
+		child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL);
+		pt_succ_return(regs, 0);
+		goto out_tsk;
+	}
+
+	case PTRACE_SETFPREGS64: {
+		struct fps {
+			unsigned int regs[64];
+			unsigned long fsr;
+		};
+		struct fps __user *fps = (struct fps __user *) addr;
+		unsigned long *fpregs = child->thread_info->fpregs;
+
+		if (copy_from_user(fpregs, &fps->regs[0],
+				   (64 * sizeof(unsigned int))) ||
+		    __get_user(child->thread_info->xfsr[0], (&fps->fsr))) {
+			pt_error_return(regs, EFAULT);
+			goto out_tsk;
+		}
+		if (!(child->thread_info->fpsaved[0] & FPRS_FEF))
+			child->thread_info->gsr[0] = 0;
+		child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
+		pt_succ_return(regs, 0);
+		goto out_tsk;
+	}
+
+	case PTRACE_READTEXT:
+	case PTRACE_READDATA: {
+		int res = ptrace_readdata(child, addr,
+					  (char __user *)addr2, data);
+		if (res == data) {
+			pt_succ_return(regs, 0);
+			goto flush_and_out;
+		}
+		if (res >= 0)
+			res = -EIO;
+		pt_error_return(regs, -res);
+		goto flush_and_out;
+	}
+
+	case PTRACE_WRITETEXT:
+	case PTRACE_WRITEDATA: {
+		int res = ptrace_writedata(child, (char __user *) addr2,
+					   addr, data);
+		if (res == data) {
+			pt_succ_return(regs, 0);
+			goto flush_and_out;
+		}
+		if (res >= 0)
+			res = -EIO;
+		pt_error_return(regs, -res);
+		goto flush_and_out;
+	}
+	case PTRACE_SYSCALL: /* continue and stop at (return from) syscall */
+		addr = 1;
+
+	case PTRACE_CONT: { /* restart after signal. */
+		if (data > _NSIG) {
+			pt_error_return(regs, EIO);
+			goto out_tsk;
+		}
+		if (addr != 1) {
+			unsigned long pc_mask = ~0UL;
+
+			if ((child->thread_info->flags & _TIF_32BIT) != 0)
+				pc_mask = 0xffffffff;
+
+			if (addr & 3) {
+				pt_error_return(regs, EINVAL);
+				goto out_tsk;
+			}
+#ifdef DEBUG_PTRACE
+			printk ("Original: %016lx %016lx\n",
+				child->thread_info->kregs->tpc,
+				child->thread_info->kregs->tnpc);
+			printk ("Continuing with %016lx %016lx\n", addr, addr+4);
+#endif
+			child->thread_info->kregs->tpc = (addr & pc_mask);
+			child->thread_info->kregs->tnpc = ((addr + 4) & pc_mask);
+		}
+
+		if (request == PTRACE_SYSCALL) {
+			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+		} else {
+			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+		}
+
+		child->exit_code = data;
+#ifdef DEBUG_PTRACE
+		printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n", child->comm,
+			child->pid, child->exit_code,
+			child->thread_info->kregs->tpc,
+			child->thread_info->kregs->tnpc);
+		       
+#endif
+		wake_up_process(child);
+		pt_succ_return(regs, 0);
+		goto out_tsk;
+	}
+
+/*
+ * make the child exit.  Best I can do is send it a sigkill. 
+ * perhaps it should be put in the status that it wants to 
+ * exit.
+ */
+	case PTRACE_KILL: {
+		if (child->exit_state == EXIT_ZOMBIE) {	/* already dead */
+			pt_succ_return(regs, 0);
+			goto out_tsk;
+		}
+		child->exit_code = SIGKILL;
+		wake_up_process(child);
+		pt_succ_return(regs, 0);
+		goto out_tsk;
+	}
+
+	case PTRACE_SUNDETACH: { /* detach a process that was attached. */
+		int error = ptrace_detach(child, data);
+		if (error) {
+			pt_error_return(regs, EIO);
+			goto out_tsk;
+		}
+		pt_succ_return(regs, 0);
+		goto out_tsk;
+	}
+
+	/* PTRACE_DUMPCORE unsupported... */
+
+	default: {
+		int err = ptrace_request(child, request, addr, data);
+		if (err)
+			pt_error_return(regs, -err);
+		else
+			pt_succ_return(regs, 0);
+		goto out_tsk;
+	}
+	}
+flush_and_out:
+	{
+		unsigned long va;
+
+		if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+			for (va = 0; va < (1 << 16); va += (1 << 5))
+				spitfire_put_dcache_tag(va, 0x0);
+			/* No need to mess with I-cache on Cheetah. */
+		} else {
+			for (va =  0; va < L1DCACHE_SIZE; va += 32)
+				spitfire_put_dcache_tag(va, 0x0);
+			if (request == PTRACE_PEEKTEXT ||
+			    request == PTRACE_POKETEXT ||
+			    request == PTRACE_READTEXT ||
+			    request == PTRACE_WRITETEXT) {
+				for (va =  0; va < (PAGE_SIZE << 1); va += 32)
+					spitfire_put_icache_tag(va, 0x0);
+				__asm__ __volatile__("flush %g6");
+			}
+		}
+	}
+out_tsk:
+	if (child)
+		put_task_struct(child);
+out:
+	unlock_kernel();
+}
+
+asmlinkage void syscall_trace(void)
+{
+#ifdef DEBUG_PTRACE
+	printk("%s [%d]: syscall_trace\n", current->comm, current->pid);
+#endif
+	if (!test_thread_flag(TIF_SYSCALL_TRACE))
+		return;
+	if (!(current->ptrace & PT_PTRACED))
+		return;
+	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+				 ? 0x80 : 0));
+
+	/*
+	 * this isn't the same as continuing with a signal, but it will do
+	 * for normal use.  strace only continues with a signal if the
+	 * stopping signal is not SIGTRAP.  -brl
+	 */
+#ifdef DEBUG_PTRACE
+	printk("%s [%d]: syscall_trace exit= %x\n", current->comm,
+		current->pid, current->exit_code);
+#endif
+	if (current->exit_code) {
+		send_sig (current->exit_code, current, 1);
+		current->exit_code = 0;
+	}
+}
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
new file mode 100644
index 0000000..0696ed4
--- /dev/null
+++ b/arch/sparc64/kernel/rtrap.S
@@ -0,0 +1,362 @@
+/* $Id: rtrap.S,v 1.61 2002/02/09 19:49:31 davem Exp $
+ * rtrap.S: Preparing for return from trap on Sparc V9.
+ *
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+
+#include <asm/asi.h>
+#include <asm/pstate.h>
+#include <asm/ptrace.h>
+#include <asm/spitfire.h>
+#include <asm/head.h>
+#include <asm/visasm.h>
+#include <asm/processor.h>
+
+#define		RTRAP_PSTATE		(PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
+#define		RTRAP_PSTATE_IRQOFF	(PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV)
+#define		RTRAP_PSTATE_AG_IRQOFF	(PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
+
+		/* Register %l6 keeps track of whether we are returning
+		 * from a system call or not.  It is cleared if we call
+		 * do_notify_resume, and it must not be otherwise modified
+		 * until we fully commit to returning to userspace.
+		 */
+
+		.text
+		.align			32
+__handle_softirq:
+		call			do_softirq
+		 nop
+		ba,a,pt			%xcc, __handle_softirq_continue
+		 nop
+__handle_preemption:
+		call			schedule
+		 wrpr			%g0, RTRAP_PSTATE, %pstate
+		ba,pt			%xcc, __handle_preemption_continue
+		 wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
+
+__handle_user_windows:
+		call			fault_in_user_windows
+		 wrpr			%g0, RTRAP_PSTATE, %pstate
+		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
+		/* Redo sched+sig checks */
+		ldx			[%g6 + TI_FLAGS], %l0
+		andcc			%l0, _TIF_NEED_RESCHED, %g0
+
+		be,pt			%xcc, 1f
+		 nop
+		call			schedule
+		 wrpr			%g0, RTRAP_PSTATE, %pstate
+		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
+		ldx			[%g6 + TI_FLAGS], %l0
+
+1:		andcc			%l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
+		be,pt			%xcc, __handle_user_windows_continue
+		 nop
+		clr			%o0
+		mov			%l5, %o2
+		mov			%l6, %o3
+		add			%sp, PTREGS_OFF, %o1
+		mov			%l0, %o4
+
+		call			do_notify_resume
+		 wrpr			%g0, RTRAP_PSTATE, %pstate
+		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
+		clr			%l6
+		/* Signal delivery can modify pt_regs tstate, so we must
+		 * reload it.
+		 */
+		ldx			[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+		sethi			%hi(0xf << 20), %l4
+		and			%l1, %l4, %l4
+		ba,pt			%xcc, __handle_user_windows_continue
+
+		 andn			%l1, %l4, %l1
+__handle_perfctrs:
+		call			update_perfctrs
+		 wrpr			%g0, RTRAP_PSTATE, %pstate
+		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
+		ldub			[%g6 + TI_WSAVED], %o2
+		brz,pt			%o2, 1f
+		 nop
+		/* Redo userwin+sched+sig checks */
+		call			fault_in_user_windows
+
+		 wrpr			%g0, RTRAP_PSTATE, %pstate
+		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
+		ldx			[%g6 + TI_FLAGS], %l0
+		andcc			%l0, _TIF_NEED_RESCHED, %g0
+		be,pt			%xcc, 1f
+
+		 nop
+		call			schedule
+		 wrpr			%g0, RTRAP_PSTATE, %pstate
+		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
+		ldx			[%g6 + TI_FLAGS], %l0
+1:		andcc			%l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
+
+		be,pt			%xcc, __handle_perfctrs_continue
+		 sethi			%hi(TSTATE_PEF), %o0
+		clr			%o0
+		mov			%l5, %o2
+		mov			%l6, %o3
+		add			%sp, PTREGS_OFF, %o1
+		mov			%l0, %o4
+		call			do_notify_resume
+
+		 wrpr			%g0, RTRAP_PSTATE, %pstate
+		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
+		clr			%l6
+		/* Signal delivery can modify pt_regs tstate, so we must
+		 * reload it.
+		 */
+		ldx			[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+		sethi			%hi(0xf << 20), %l4
+		and			%l1, %l4, %l4
+		andn			%l1, %l4, %l1
+		ba,pt			%xcc, __handle_perfctrs_continue
+
+		 sethi			%hi(TSTATE_PEF), %o0
+__handle_userfpu:
+		rd			%fprs, %l5
+		andcc			%l5, FPRS_FEF, %g0
+		sethi			%hi(TSTATE_PEF), %o0
+		be,a,pn			%icc, __handle_userfpu_continue
+		 andn			%l1, %o0, %l1
+		ba,a,pt			%xcc, __handle_userfpu_continue
+
+__handle_signal:
+		clr			%o0
+		mov			%l5, %o2
+		mov			%l6, %o3
+		add			%sp, PTREGS_OFF, %o1
+		mov			%l0, %o4
+		call			do_notify_resume
+		 wrpr			%g0, RTRAP_PSTATE, %pstate
+		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
+		clr			%l6
+
+		/* Signal delivery can modify pt_regs tstate, so we must
+		 * reload it.
+		 */
+		ldx			[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+		sethi			%hi(0xf << 20), %l4
+		and			%l1, %l4, %l4
+		ba,pt			%xcc, __handle_signal_continue
+		 andn			%l1, %l4, %l1
+
+		.align			64
+		.globl			rtrap_irq, rtrap_clr_l6, rtrap, irqsz_patchme, rtrap_xcall
+rtrap_irq:
+rtrap_clr_l6:	clr			%l6
+rtrap:
+		ldub			[%g6 + TI_CPU], %l0
+		sethi			%hi(irq_stat), %l2	! &softirq_active
+		or			%l2, %lo(irq_stat), %l2	! &softirq_active
+irqsz_patchme:	sllx			%l0, 0, %l0
+		lduw			[%l2 + %l0], %l1	! softirq_pending
+		cmp			%l1, 0
+
+		/* mm/ultra.S:xcall_report_regs KNOWS about this load. */
+		bne,pn			%icc, __handle_softirq
+		 ldx			[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+__handle_softirq_continue:
+rtrap_xcall:
+		sethi			%hi(0xf << 20), %l4
+		andcc			%l1, TSTATE_PRIV, %l3
+		and			%l1, %l4, %l4
+		bne,pn			%icc, to_kernel
+		 andn			%l1, %l4, %l1
+
+		/* We must hold IRQs off and atomically test schedule+signal
+		 * state, then hold them off all the way back to userspace.
+		 * If we are returning to kernel, none of this matters.
+		 *
+		 * If we do not do this, there is a window where we would do
+		 * the tests, later the signal/resched event arrives but we do
+		 * not process it since we are still in kernel mode.  It would
+		 * take until the next local IRQ before the signal/resched
+		 * event would be handled.
+		 *
+		 * This also means that if we have to deal with performance
+		 * counters or user windows, we have to redo all of these
+		 * sched+signal checks with IRQs disabled.
+		 */
+to_user:	wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
+		wrpr			0, %pil
+__handle_preemption_continue:
+		ldx			[%g6 + TI_FLAGS], %l0
+		sethi			%hi(_TIF_USER_WORK_MASK), %o0
+		or			%o0, %lo(_TIF_USER_WORK_MASK), %o0
+		andcc			%l0, %o0, %g0
+		sethi			%hi(TSTATE_PEF), %o0
+		be,pt			%xcc, user_nowork
+		 andcc			%l1, %o0, %g0
+		andcc			%l0, _TIF_NEED_RESCHED, %g0
+		bne,pn			%xcc, __handle_preemption
+		 andcc			%l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
+		bne,pn			%xcc, __handle_signal
+__handle_signal_continue:
+		 ldub			[%g6 + TI_WSAVED], %o2
+		brnz,pn			%o2, __handle_user_windows
+		 nop
+__handle_user_windows_continue:
+		ldx			[%g6 + TI_FLAGS], %l5
+		andcc			%l5, _TIF_PERFCTR, %g0
+		sethi			%hi(TSTATE_PEF), %o0
+		bne,pn			%xcc, __handle_perfctrs
+__handle_perfctrs_continue:
+		 andcc			%l1, %o0, %g0
+
+		/* This fpdepth clear is necessary for non-syscall rtraps only */
+user_nowork:
+		bne,pn			%xcc, __handle_userfpu
+		 stb			%g0, [%g6 + TI_FPDEPTH]
+__handle_userfpu_continue:
+
+rt_continue:	ldx			[%sp + PTREGS_OFF + PT_V9_G1], %g1
+		ldx			[%sp + PTREGS_OFF + PT_V9_G2], %g2
+
+		ldx			[%sp + PTREGS_OFF + PT_V9_G3], %g3
+		ldx			[%sp + PTREGS_OFF + PT_V9_G4], %g4
+		ldx			[%sp + PTREGS_OFF + PT_V9_G5], %g5
+		mov			TSB_REG, %g6
+		brnz,a,pn		%l3, 1f
+		 ldxa			[%g6] ASI_IMMU, %g5
+1:		ldx			[%sp + PTREGS_OFF + PT_V9_G6], %g6
+		ldx			[%sp + PTREGS_OFF + PT_V9_G7], %g7
+		wrpr			%g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
+		ldx			[%sp + PTREGS_OFF + PT_V9_I0], %i0
+		ldx			[%sp + PTREGS_OFF + PT_V9_I1], %i1
+
+		ldx			[%sp + PTREGS_OFF + PT_V9_I2], %i2
+		ldx			[%sp + PTREGS_OFF + PT_V9_I3], %i3
+		ldx			[%sp + PTREGS_OFF + PT_V9_I4], %i4
+		ldx			[%sp + PTREGS_OFF + PT_V9_I5], %i5
+		ldx			[%sp + PTREGS_OFF + PT_V9_I6], %i6
+		ldx			[%sp + PTREGS_OFF + PT_V9_I7], %i7
+		ldx			[%sp + PTREGS_OFF + PT_V9_TPC], %l2
+		ldx			[%sp + PTREGS_OFF + PT_V9_TNPC], %o2
+
+		ld			[%sp + PTREGS_OFF + PT_V9_Y], %o3
+		wr			%o3, %g0, %y
+		srl			%l4, 20, %l4
+		wrpr			%l4, 0x0, %pil
+		wrpr			%g0, 0x1, %tl
+		wrpr			%l1, %g0, %tstate
+		wrpr			%l2, %g0, %tpc
+		wrpr			%o2, %g0, %tnpc
+
+		brnz,pn			%l3, kern_rtt
+		 mov			PRIMARY_CONTEXT, %l7
+		ldxa			[%l7 + %l7] ASI_DMMU, %l0
+cplus_rtrap_insn_1:
+		sethi			%hi(0), %l1
+		sllx			%l1, 32, %l1
+		or			%l0, %l1, %l0
+		stxa			%l0, [%l7] ASI_DMMU
+		flush			%g6
+		rdpr			%wstate, %l1
+		rdpr			%otherwin, %l2
+		srl			%l1, 3, %l1
+
+		wrpr			%l2, %g0, %canrestore
+		wrpr			%l1, %g0, %wstate
+		wrpr			%g0, %g0, %otherwin
+		restore
+		rdpr			%canrestore, %g1
+		wrpr			%g1, 0x0, %cleanwin
+		retry
+		nop
+
+kern_rtt:	restore
+		retry
+to_kernel:
+#ifdef CONFIG_PREEMPT
+		ldsw			[%g6 + TI_PRE_COUNT], %l5
+		brnz			%l5, kern_fpucheck
+		 ldx			[%g6 + TI_FLAGS], %l5
+		andcc			%l5, _TIF_NEED_RESCHED, %g0
+		be,pt			%xcc, kern_fpucheck
+		 srl			%l4, 20, %l5
+		cmp			%l5, 0
+		bne,pn			%xcc, kern_fpucheck
+		 sethi			%hi(PREEMPT_ACTIVE), %l6
+		stw			%l6, [%g6 + TI_PRE_COUNT]
+		call			schedule
+		 nop
+		ba,pt			%xcc, rtrap
+		 stw			%g0, [%g6 + TI_PRE_COUNT]
+#endif
+kern_fpucheck:	ldub			[%g6 + TI_FPDEPTH], %l5
+		brz,pt			%l5, rt_continue
+		 srl			%l5, 1, %o0
+		add			%g6, TI_FPSAVED, %l6
+		ldub			[%l6 + %o0], %l2
+		sub			%l5, 2, %l5
+
+		add			%g6, TI_GSR, %o1
+		andcc			%l2, (FPRS_FEF|FPRS_DU), %g0
+		be,pt			%icc, 2f
+		 and			%l2, FPRS_DL, %l6
+		andcc			%l2, FPRS_FEF, %g0
+		be,pn			%icc, 5f
+		 sll			%o0, 3, %o5
+		rd			%fprs, %g1
+
+		wr			%g1, FPRS_FEF, %fprs
+		ldx			[%o1 + %o5], %g1
+		add			%g6, TI_XFSR, %o1
+		membar			#StoreLoad | #LoadLoad
+		sll			%o0, 8, %o2
+		add			%g6, TI_FPREGS, %o3
+		brz,pn			%l6, 1f
+		 add			%g6, TI_FPREGS+0x40, %o4
+
+		ldda			[%o3 + %o2] ASI_BLK_P, %f0
+		ldda			[%o4 + %o2] ASI_BLK_P, %f16
+1:		andcc			%l2, FPRS_DU, %g0
+		be,pn			%icc, 1f
+		 wr			%g1, 0, %gsr
+		add			%o2, 0x80, %o2
+		ldda			[%o3 + %o2] ASI_BLK_P, %f32
+		ldda			[%o4 + %o2] ASI_BLK_P, %f48
+
+1:		membar			#Sync
+		ldx			[%o1 + %o5], %fsr
+2:		stb			%l5, [%g6 + TI_FPDEPTH]
+		ba,pt			%xcc, rt_continue
+		 nop
+5:		wr			%g0, FPRS_FEF, %fprs
+		membar			#StoreLoad | #LoadLoad
+		sll			%o0, 8, %o2
+
+		add			%g6, TI_FPREGS+0x80, %o3
+		add			%g6, TI_FPREGS+0xc0, %o4
+		ldda			[%o3 + %o2] ASI_BLK_P, %f32
+		ldda			[%o4 + %o2] ASI_BLK_P, %f48
+		membar			#Sync
+		wr			%g0, FPRS_DU, %fprs
+		ba,pt			%xcc, rt_continue
+		 stb			%l5, [%g6 + TI_FPDEPTH]
+
+cplus_rinsn_1:
+		sethi			%uhi(CTX_CHEETAH_PLUS_NUC), %l1
+
+		.globl			cheetah_plus_patch_rtrap
+cheetah_plus_patch_rtrap:
+		/* We configure the dTLB512_0 for 4MB pages and the
+		 * dTLB512_1 for 8K pages when in context zero.
+		 */
+		sethi			%hi(cplus_rinsn_1), %o0
+		sethi			%hi(cplus_rtrap_insn_1), %o2
+		lduw			[%o0 + %lo(cplus_rinsn_1)], %o1
+		or			%o2, %lo(cplus_rtrap_insn_1), %o2
+		stw			%o1, [%o2]
+		flush			%o2
+
+		retl
+		 nop
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
new file mode 100644
index 0000000..14d9c3a
--- /dev/null
+++ b/arch/sparc64/kernel/sbus.c
@@ -0,0 +1,1243 @@
+/* $Id: sbus.c,v 1.19 2002/01/23 11:27:32 davem Exp $
+ * sbus.c: UltraSparc SBUS controller support.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include <asm/page.h>
+#include <asm/sbus.h>
+#include <asm/io.h>
+#include <asm/upa.h>
+#include <asm/cache.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/starfire.h>
+
+#include "iommu_common.h"
+
+/* These should be allocated on an SMP_CACHE_BYTES
+ * aligned boundary for optimal performance.
+ *
+ * On SYSIO, using an 8K page size we have 1GB of SBUS
+ * DMA space mapped.  We divide this space into equally
+ * sized clusters. We allocate a DMA mapping from the
+ * cluster that matches the order of the allocation, or
+ * if the order is greater than the number of clusters,
+ * we try to allocate from the last cluster.
+ */
+
+#define NCLUSTERS	8UL
+#define ONE_GIG		(1UL * 1024UL * 1024UL * 1024UL)
+#define CLUSTER_SIZE	(ONE_GIG / NCLUSTERS)
+#define CLUSTER_MASK	(CLUSTER_SIZE - 1)
+#define CLUSTER_NPAGES	(CLUSTER_SIZE >> IO_PAGE_SHIFT)
+#define MAP_BASE	((u32)0xc0000000)
+
+struct sbus_iommu {
+/*0x00*/spinlock_t		lock;
+
+/*0x08*/iopte_t			*page_table;
+/*0x10*/unsigned long		strbuf_regs;
+/*0x18*/unsigned long		iommu_regs;
+/*0x20*/unsigned long		sbus_control_reg;
+
+/*0x28*/volatile unsigned long	strbuf_flushflag;
+
+	/* If NCLUSTERS is ever decresed to 4 or lower,
+	 * you must increase the size of the type of
+	 * these counters.  You have been duly warned. -DaveM
+	 */
+/*0x30*/struct {
+		u16	next;
+		u16	flush;
+	} alloc_info[NCLUSTERS];
+
+	/* The lowest used consistent mapping entry.  Since
+	 * we allocate consistent maps out of cluster 0 this
+	 * is relative to the beginning of closter 0.
+	 */
+/*0x50*/u32		lowest_consistent_map;
+};
+
+/* Offsets from iommu_regs */
+#define SYSIO_IOMMUREG_BASE	0x2400UL
+#define IOMMU_CONTROL	(0x2400UL - 0x2400UL)	/* IOMMU control register */
+#define IOMMU_TSBBASE	(0x2408UL - 0x2400UL)	/* TSB base address register */
+#define IOMMU_FLUSH	(0x2410UL - 0x2400UL)	/* IOMMU flush register */
+#define IOMMU_VADIAG	(0x4400UL - 0x2400UL)	/* SBUS virtual address diagnostic */
+#define IOMMU_TAGCMP	(0x4408UL - 0x2400UL)	/* TLB tag compare diagnostics */
+#define IOMMU_LRUDIAG	(0x4500UL - 0x2400UL)	/* IOMMU LRU queue diagnostics */
+#define IOMMU_TAGDIAG	(0x4580UL - 0x2400UL)	/* TLB tag diagnostics */
+#define IOMMU_DRAMDIAG	(0x4600UL - 0x2400UL)	/* TLB data RAM diagnostics */
+
+#define IOMMU_DRAM_VALID	(1UL << 30UL)
+
+static void __iommu_flushall(struct sbus_iommu *iommu)
+{
+	unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
+	int entry;
+
+	for (entry = 0; entry < 16; entry++) {
+		upa_writeq(0, tag);
+		tag += 8UL;
+	}
+	upa_readq(iommu->sbus_control_reg);
+
+	for (entry = 0; entry < NCLUSTERS; entry++) {
+		iommu->alloc_info[entry].flush =
+			iommu->alloc_info[entry].next;
+	}
+}
+
+static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
+{
+	while (npages--)
+		upa_writeq(base + (npages << IO_PAGE_SHIFT),
+			   iommu->iommu_regs + IOMMU_FLUSH);
+	upa_readq(iommu->sbus_control_reg);
+}
+
+/* Offsets from strbuf_regs */
+#define SYSIO_STRBUFREG_BASE	0x2800UL
+#define STRBUF_CONTROL	(0x2800UL - 0x2800UL)	/* Control */
+#define STRBUF_PFLUSH	(0x2808UL - 0x2800UL)	/* Page flush/invalidate */
+#define STRBUF_FSYNC	(0x2810UL - 0x2800UL)	/* Flush synchronization */
+#define STRBUF_DRAMDIAG	(0x5000UL - 0x2800UL)	/* data RAM diagnostic */
+#define STRBUF_ERRDIAG	(0x5400UL - 0x2800UL)	/* error status diagnostics */
+#define STRBUF_PTAGDIAG	(0x5800UL - 0x2800UL)	/* Page tag diagnostics */
+#define STRBUF_LTAGDIAG	(0x5900UL - 0x2800UL)	/* Line tag diagnostics */
+
+#define STRBUF_TAG_VALID	0x02UL
+
+static void strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
+{
+	iommu->strbuf_flushflag = 0UL;
+	while (npages--)
+		upa_writeq(base + (npages << IO_PAGE_SHIFT),
+			   iommu->strbuf_regs + STRBUF_PFLUSH);
+
+	/* Whoopee cushion! */
+	upa_writeq(__pa(&iommu->strbuf_flushflag),
+		   iommu->strbuf_regs + STRBUF_FSYNC);
+	upa_readq(iommu->sbus_control_reg);
+	while (iommu->strbuf_flushflag == 0UL)
+		membar("#LoadLoad");
+}
+
+static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
+{
+	iopte_t *iopte, *limit, *first, *cluster;
+	unsigned long cnum, ent, nent, flush_point, found;
+
+	cnum = 0;
+	nent = 1;
+	while ((1UL << cnum) < npages)
+		cnum++;
+	if(cnum >= NCLUSTERS) {
+		nent = 1UL << (cnum - NCLUSTERS);
+		cnum = NCLUSTERS - 1;
+	}
+	iopte  = iommu->page_table + (cnum * CLUSTER_NPAGES);
+
+	if (cnum == 0)
+		limit = (iommu->page_table +
+			 iommu->lowest_consistent_map);
+	else
+		limit = (iopte + CLUSTER_NPAGES);
+
+	iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
+	flush_point = iommu->alloc_info[cnum].flush;
+
+	first = iopte;
+	cluster = NULL;
+	found = 0;
+	for (;;) {
+		if (iopte_val(*iopte) == 0UL) {
+			found++;
+			if (!cluster)
+				cluster = iopte;
+		} else {
+			/* Used cluster in the way */
+			cluster = NULL;
+			found = 0;
+		}
+
+		if (found == nent)
+			break;
+
+		iopte += (1 << cnum);
+		ent++;
+		if (iopte >= limit) {
+			iopte = (iommu->page_table + (cnum * CLUSTER_NPAGES));
+			ent = 0;
+
+			/* Multiple cluster allocations must not wrap */
+			cluster = NULL;
+			found = 0;
+		}
+		if (ent == flush_point)
+			__iommu_flushall(iommu);
+		if (iopte == first)
+			goto bad;
+	}
+
+	/* ent/iopte points to the last cluster entry we're going to use,
+	 * so save our place for the next allocation.
+	 */
+	if ((iopte + (1 << cnum)) >= limit)
+		ent = 0;
+	else
+		ent = ent + 1;
+	iommu->alloc_info[cnum].next = ent;
+	if (ent == flush_point)
+		__iommu_flushall(iommu);
+
+	/* I've got your streaming cluster right here buddy boy... */
+	return cluster;
+
+bad:
+	printk(KERN_EMERG "sbus: alloc_streaming_cluster of npages(%ld) failed!\n",
+	       npages);
+	return NULL;
+}
+
+static void free_streaming_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
+{
+	unsigned long cnum, ent, nent;
+	iopte_t *iopte;
+
+	cnum = 0;
+	nent = 1;
+	while ((1UL << cnum) < npages)
+		cnum++;
+	if(cnum >= NCLUSTERS) {
+		nent = 1UL << (cnum - NCLUSTERS);
+		cnum = NCLUSTERS - 1;
+	}
+	ent = (base & CLUSTER_MASK) >> (IO_PAGE_SHIFT + cnum);
+	iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
+	do {
+		iopte_val(*iopte) = 0UL;
+		iopte += 1 << cnum;
+	} while(--nent);
+
+	/* If the global flush might not have caught this entry,
+	 * adjust the flush point such that we will flush before
+	 * ever trying to reuse it.
+	 */
+#define between(X,Y,Z)	(((Z) - (Y)) >= ((X) - (Y)))
+	if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
+		iommu->alloc_info[cnum].flush = ent;
+#undef between
+}
+
+/* We allocate consistent mappings from the end of cluster zero. */
+static iopte_t *alloc_consistent_cluster(struct sbus_iommu *iommu, unsigned long npages)
+{
+	iopte_t *iopte;
+
+	iopte = iommu->page_table + (1 * CLUSTER_NPAGES);
+	while (iopte > iommu->page_table) {
+		iopte--;
+		if (!(iopte_val(*iopte) & IOPTE_VALID)) {
+			unsigned long tmp = npages;
+
+			while (--tmp) {
+				iopte--;
+				if (iopte_val(*iopte) & IOPTE_VALID)
+					break;
+			}
+			if (tmp == 0) {
+				u32 entry = (iopte - iommu->page_table);
+
+				if (entry < iommu->lowest_consistent_map)
+					iommu->lowest_consistent_map = entry;
+				return iopte;
+			}
+		}
+	}
+	return NULL;
+}
+
+static void free_consistent_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
+{
+	iopte_t *iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
+
+	if ((iopte - iommu->page_table) == iommu->lowest_consistent_map) {
+		iopte_t *walk = iopte + npages;
+		iopte_t *limit;
+
+		limit = iommu->page_table + CLUSTER_NPAGES;
+		while (walk < limit) {
+			if (iopte_val(*walk) != 0UL)
+				break;
+			walk++;
+		}
+		iommu->lowest_consistent_map =
+			(walk - iommu->page_table);
+	}
+
+	while (npages--)
+		*iopte++ = __iopte(0UL);
+}
+
+void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
+{
+	unsigned long order, first_page, flags;
+	struct sbus_iommu *iommu;
+	iopte_t *iopte;
+	void *ret;
+	int npages;
+
+	if (size <= 0 || sdev == NULL || dvma_addr == NULL)
+		return NULL;
+
+	size = IO_PAGE_ALIGN(size);
+	order = get_order(size);
+	if (order >= 10)
+		return NULL;
+	first_page = __get_free_pages(GFP_KERNEL, order);
+	if (first_page == 0UL)
+		return NULL;
+	memset((char *)first_page, 0, PAGE_SIZE << order);
+
+	iommu = sdev->bus->iommu;
+
+	spin_lock_irqsave(&iommu->lock, flags);
+	iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
+	if (iopte == NULL) {
+		spin_unlock_irqrestore(&iommu->lock, flags);
+		free_pages(first_page, order);
+		return NULL;
+	}
+
+	/* Ok, we're committed at this point. */
+	*dvma_addr = MAP_BASE +	((iopte - iommu->page_table) << IO_PAGE_SHIFT);
+	ret = (void *) first_page;
+	npages = size >> IO_PAGE_SHIFT;
+	while (npages--) {
+		*iopte++ = __iopte(IOPTE_VALID | IOPTE_CACHE | IOPTE_WRITE |
+				   (__pa(first_page) & IOPTE_PAGE));
+		first_page += IO_PAGE_SIZE;
+	}
+	iommu_flush(iommu, *dvma_addr, size >> IO_PAGE_SHIFT);
+	spin_unlock_irqrestore(&iommu->lock, flags);
+
+	return ret;
+}
+
+void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
+{
+	unsigned long order, npages;
+	struct sbus_iommu *iommu;
+
+	if (size <= 0 || sdev == NULL || cpu == NULL)
+		return;
+
+	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
+	iommu = sdev->bus->iommu;
+
+	spin_lock_irq(&iommu->lock);
+	free_consistent_cluster(iommu, dvma, npages);
+	iommu_flush(iommu, dvma, npages);
+	spin_unlock_irq(&iommu->lock);
+
+	order = get_order(size);
+	if (order < 10)
+		free_pages((unsigned long)cpu, order);
+}
+
+dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t size, int dir)
+{
+	struct sbus_iommu *iommu = sdev->bus->iommu;
+	unsigned long npages, pbase, flags;
+	iopte_t *iopte;
+	u32 dma_base, offset;
+	unsigned long iopte_bits;
+
+	if (dir == SBUS_DMA_NONE)
+		BUG();
+
+	pbase = (unsigned long) ptr;
+	offset = (u32) (pbase & ~IO_PAGE_MASK);
+	size = (IO_PAGE_ALIGN(pbase + size) - (pbase & IO_PAGE_MASK));
+	pbase = (unsigned long) __pa(pbase & IO_PAGE_MASK);
+
+	spin_lock_irqsave(&iommu->lock, flags);
+	npages = size >> IO_PAGE_SHIFT;
+	iopte = alloc_streaming_cluster(iommu, npages);
+	if (iopte == NULL)
+		goto bad;
+	dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
+	npages = size >> IO_PAGE_SHIFT;
+	iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
+	if (dir != SBUS_DMA_TODEVICE)
+		iopte_bits |= IOPTE_WRITE;
+	while (npages--) {
+		*iopte++ = __iopte(iopte_bits | (pbase & IOPTE_PAGE));
+		pbase += IO_PAGE_SIZE;
+	}
+	npages = size >> IO_PAGE_SHIFT;
+	spin_unlock_irqrestore(&iommu->lock, flags);
+
+	return (dma_base | offset);
+
+bad:
+	spin_unlock_irqrestore(&iommu->lock, flags);
+	BUG();
+	return 0;
+}
+
+void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size, int direction)
+{
+	struct sbus_iommu *iommu = sdev->bus->iommu;
+	u32 dma_base = dma_addr & IO_PAGE_MASK;
+	unsigned long flags;
+
+	size = (IO_PAGE_ALIGN(dma_addr + size) - dma_base);
+
+	spin_lock_irqsave(&iommu->lock, flags);
+	free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
+	strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT);
+	spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+#define SG_ENT_PHYS_ADDRESS(SG)	\
+	(__pa(page_address((SG)->page)) + (SG)->offset)
+
+static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, int nused, int nelems, unsigned long iopte_bits)
+{
+	struct scatterlist *dma_sg = sg;
+	struct scatterlist *sg_end = sg + nelems;
+	int i;
+
+	for (i = 0; i < nused; i++) {
+		unsigned long pteval = ~0UL;
+		u32 dma_npages;
+
+		dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
+			      dma_sg->dma_length +
+			      ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
+		do {
+			unsigned long offset;
+			signed int len;
+
+			/* If we are here, we know we have at least one
+			 * more page to map.  So walk forward until we
+			 * hit a page crossing, and begin creating new
+			 * mappings from that spot.
+			 */
+			for (;;) {
+				unsigned long tmp;
+
+				tmp = (unsigned long) SG_ENT_PHYS_ADDRESS(sg);
+				len = sg->length;
+				if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
+					pteval = tmp & IO_PAGE_MASK;
+					offset = tmp & (IO_PAGE_SIZE - 1UL);
+					break;
+				}
+				if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
+					pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
+					offset = 0UL;
+					len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
+					break;
+				}
+				sg++;
+			}
+
+			pteval = ((pteval & IOPTE_PAGE) | iopte_bits);
+			while (len > 0) {
+				*iopte++ = __iopte(pteval);
+				pteval += IO_PAGE_SIZE;
+				len -= (IO_PAGE_SIZE - offset);
+				offset = 0;
+				dma_npages--;
+			}
+
+			pteval = (pteval & IOPTE_PAGE) + len;
+			sg++;
+
+			/* Skip over any tail mappings we've fully mapped,
+			 * adjusting pteval along the way.  Stop when we
+			 * detect a page crossing event.
+			 */
+			while (sg < sg_end &&
+			       (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
+			       (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
+			       ((pteval ^
+				 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
+				pteval += sg->length;
+				sg++;
+			}
+			if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
+				pteval = ~0UL;
+		} while (dma_npages != 0);
+		dma_sg++;
+	}
+}
+
+int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int dir)
+{
+	struct sbus_iommu *iommu = sdev->bus->iommu;
+	unsigned long flags, npages;
+	iopte_t *iopte;
+	u32 dma_base;
+	struct scatterlist *sgtmp;
+	int used;
+	unsigned long iopte_bits;
+
+	if (dir == SBUS_DMA_NONE)
+		BUG();
+
+	/* Fast path single entry scatterlists. */
+	if (nents == 1) {
+		sg->dma_address =
+			sbus_map_single(sdev,
+					(page_address(sg->page) + sg->offset),
+					sg->length, dir);
+		sg->dma_length = sg->length;
+		return 1;
+	}
+
+	npages = prepare_sg(sg, nents);
+
+	spin_lock_irqsave(&iommu->lock, flags);
+	iopte = alloc_streaming_cluster(iommu, npages);
+	if (iopte == NULL)
+		goto bad;
+	dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
+
+	/* Normalize DVMA addresses. */
+	sgtmp = sg;
+	used = nents;
+
+	while (used && sgtmp->dma_length) {
+		sgtmp->dma_address += dma_base;
+		sgtmp++;
+		used--;
+	}
+	used = nents - used;
+
+	iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
+	if (dir != SBUS_DMA_TODEVICE)
+		iopte_bits |= IOPTE_WRITE;
+
+	fill_sg(iopte, sg, used, nents, iopte_bits);
+#ifdef VERIFY_SG
+	verify_sglist(sg, nents, iopte, npages);
+#endif
+	spin_unlock_irqrestore(&iommu->lock, flags);
+
+	return used;
+
+bad:
+	spin_unlock_irqrestore(&iommu->lock, flags);
+	BUG();
+	return 0;
+}
+
+void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
+{
+	unsigned long size, flags;
+	struct sbus_iommu *iommu;
+	u32 dvma_base;
+	int i;
+
+	/* Fast path single entry scatterlists. */
+	if (nents == 1) {
+		sbus_unmap_single(sdev, sg->dma_address, sg->dma_length, direction);
+		return;
+	}
+
+	dvma_base = sg[0].dma_address & IO_PAGE_MASK;
+	for (i = 0; i < nents; i++) {
+		if (sg[i].dma_length == 0)
+			break;
+	}
+	i--;
+	size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - dvma_base;
+
+	iommu = sdev->bus->iommu;
+	spin_lock_irqsave(&iommu->lock, flags);
+	free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
+	strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT);
+	spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
+{
+	struct sbus_iommu *iommu = sdev->bus->iommu;
+	unsigned long flags;
+
+	size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
+
+	spin_lock_irqsave(&iommu->lock, flags);
+	strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT);
+	spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
+{
+}
+
+void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
+{
+	struct sbus_iommu *iommu = sdev->bus->iommu;
+	unsigned long flags, size;
+	u32 base;
+	int i;
+
+	base = sg[0].dma_address & IO_PAGE_MASK;
+	for (i = 0; i < nents; i++) {
+		if (sg[i].dma_length == 0)
+			break;
+	}
+	i--;
+	size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base;
+
+	spin_lock_irqsave(&iommu->lock, flags);
+	strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT);
+	spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
+{
+}
+
+/* Enable 64-bit DVMA mode for the given device. */
+void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
+{
+	struct sbus_iommu *iommu = sdev->bus->iommu;
+	int slot = sdev->slot;
+	unsigned long cfg_reg;
+	u64 val;
+
+	cfg_reg = iommu->sbus_control_reg;
+	switch (slot) {
+	case 0:
+		cfg_reg += 0x20UL;
+		break;
+	case 1:
+		cfg_reg += 0x28UL;
+		break;
+	case 2:
+		cfg_reg += 0x30UL;
+		break;
+	case 3:
+		cfg_reg += 0x38UL;
+		break;
+	case 13:
+		cfg_reg += 0x40UL;
+		break;
+	case 14:
+		cfg_reg += 0x48UL;
+		break;
+	case 15:
+		cfg_reg += 0x50UL;
+		break;
+
+	default:
+		return;
+	};
+
+	val = upa_readq(cfg_reg);
+	if (val & (1UL << 14UL)) {
+		/* Extended transfer mode already enabled. */
+		return;
+	}
+
+	val |= (1UL << 14UL);
+
+	if (bursts & DMA_BURST8)
+		val |= (1UL << 1UL);
+	if (bursts & DMA_BURST16)
+		val |= (1UL << 2UL);
+	if (bursts & DMA_BURST32)
+		val |= (1UL << 3UL);
+	if (bursts & DMA_BURST64)
+		val |= (1UL << 4UL);
+	upa_writeq(val, cfg_reg);
+}
+
+/* SBUS SYSIO INO number to Sparc PIL level. */
+static unsigned char sysio_ino_to_pil[] = {
+	0, 4, 4, 7, 5, 7, 8, 9,		/* SBUS slot 0 */
+	0, 4, 4, 7, 5, 7, 8, 9,		/* SBUS slot 1 */
+	0, 4, 4, 7, 5, 7, 8, 9,		/* SBUS slot 2 */
+	0, 4, 4, 7, 5, 7, 8, 9,		/* SBUS slot 3 */
+	4, /* Onboard SCSI */
+	5, /* Onboard Ethernet */
+/*XXX*/	8, /* Onboard BPP */
+	0, /* Bogon */
+       13, /* Audio */
+/*XXX*/15, /* PowerFail */
+	0, /* Bogon */
+	0, /* Bogon */
+       12, /* Zilog Serial Channels (incl. Keyboard/Mouse lines) */
+       11, /* Floppy */
+	0, /* Spare Hardware (bogon for now) */
+	0, /* Keyboard (bogon for now) */
+	0, /* Mouse (bogon for now) */
+	0, /* Serial (bogon for now) */
+     0, 0, /* Bogon, Bogon */
+       10, /* Timer 0 */
+       11, /* Timer 1 */
+     0, 0, /* Bogon, Bogon */
+       15, /* Uncorrectable SBUS Error */
+       15, /* Correctable SBUS Error */
+       15, /* SBUS Error */
+/*XXX*/ 0, /* Power Management (bogon for now) */
+};
+
+/* INO number to IMAP register offset for SYSIO external IRQ's.
+ * This should conform to both Sunfire/Wildfire server and Fusion
+ * desktop designs.
+ */
+#define SYSIO_IMAP_SLOT0	0x2c04UL
+#define SYSIO_IMAP_SLOT1	0x2c0cUL
+#define SYSIO_IMAP_SLOT2	0x2c14UL
+#define SYSIO_IMAP_SLOT3	0x2c1cUL
+#define SYSIO_IMAP_SCSI		0x3004UL
+#define SYSIO_IMAP_ETH		0x300cUL
+#define SYSIO_IMAP_BPP		0x3014UL
+#define SYSIO_IMAP_AUDIO	0x301cUL
+#define SYSIO_IMAP_PFAIL	0x3024UL
+#define SYSIO_IMAP_KMS		0x302cUL
+#define SYSIO_IMAP_FLPY		0x3034UL
+#define SYSIO_IMAP_SHW		0x303cUL
+#define SYSIO_IMAP_KBD		0x3044UL
+#define SYSIO_IMAP_MS		0x304cUL
+#define SYSIO_IMAP_SER		0x3054UL
+#define SYSIO_IMAP_TIM0		0x3064UL
+#define SYSIO_IMAP_TIM1		0x306cUL
+#define SYSIO_IMAP_UE		0x3074UL
+#define SYSIO_IMAP_CE		0x307cUL
+#define SYSIO_IMAP_SBERR	0x3084UL
+#define SYSIO_IMAP_PMGMT	0x308cUL
+#define SYSIO_IMAP_GFX		0x3094UL
+#define SYSIO_IMAP_EUPA		0x309cUL
+
+#define bogon     ((unsigned long) -1)
+static unsigned long sysio_irq_offsets[] = {
+	/* SBUS Slot 0 --> 3, level 1 --> 7 */
+	SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
+	SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
+	SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
+	SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
+	SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
+	SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
+	SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
+	SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
+
+	/* Onboard devices (not relevant/used on SunFire). */
+	SYSIO_IMAP_SCSI,
+	SYSIO_IMAP_ETH,
+	SYSIO_IMAP_BPP,
+	bogon,
+	SYSIO_IMAP_AUDIO,
+	SYSIO_IMAP_PFAIL,
+	bogon,
+	bogon,
+	SYSIO_IMAP_KMS,
+	SYSIO_IMAP_FLPY,
+	SYSIO_IMAP_SHW,
+	SYSIO_IMAP_KBD,
+	SYSIO_IMAP_MS,
+	SYSIO_IMAP_SER,
+	bogon,
+	bogon,
+	SYSIO_IMAP_TIM0,
+	SYSIO_IMAP_TIM1,
+	bogon,
+	bogon,
+	SYSIO_IMAP_UE,
+	SYSIO_IMAP_CE,
+	SYSIO_IMAP_SBERR,
+	SYSIO_IMAP_PMGMT,
+};
+
+#undef bogon
+
+#define NUM_SYSIO_OFFSETS (sizeof(sysio_irq_offsets) / sizeof(sysio_irq_offsets[0]))
+
+/* Convert Interrupt Mapping register pointer to associated
+ * Interrupt Clear register pointer, SYSIO specific version.
+ */
+#define SYSIO_ICLR_UNUSED0	0x3400UL
+#define SYSIO_ICLR_SLOT0	0x340cUL
+#define SYSIO_ICLR_SLOT1	0x344cUL
+#define SYSIO_ICLR_SLOT2	0x348cUL
+#define SYSIO_ICLR_SLOT3	0x34ccUL
+static unsigned long sysio_imap_to_iclr(unsigned long imap)
+{
+	unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
+	return imap + diff;
+}
+
+unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
+{
+	struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
+	struct sbus_iommu *iommu = sbus->iommu;
+	unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+	unsigned long imap, iclr;
+	int pil, sbus_level = 0;
+
+	pil = sysio_ino_to_pil[ino];
+	if (!pil) {
+		printk("sbus_irq_build: Bad SYSIO INO[%x]\n", ino);
+		panic("Bad SYSIO IRQ translations...");
+	}
+
+	if (PIL_RESERVED(pil))
+		BUG();
+
+	imap = sysio_irq_offsets[ino];
+	if (imap == ((unsigned long)-1)) {
+		prom_printf("get_irq_translations: Bad SYSIO INO[%x] cpu[%d]\n",
+			    ino, pil);
+		prom_halt();
+	}
+	imap += reg_base;
+
+	/* SYSIO inconsistency.  For external SLOTS, we have to select
+	 * the right ICLR register based upon the lower SBUS irq level
+	 * bits.
+	 */
+	if (ino >= 0x20) {
+		iclr = sysio_imap_to_iclr(imap);
+	} else {
+		int sbus_slot = (ino & 0x18)>>3;
+		
+		sbus_level = ino & 0x7;
+
+		switch(sbus_slot) {
+		case 0:
+			iclr = reg_base + SYSIO_ICLR_SLOT0;
+			break;
+		case 1:
+			iclr = reg_base + SYSIO_ICLR_SLOT1;
+			break;
+		case 2:
+			iclr = reg_base + SYSIO_ICLR_SLOT2;
+			break;
+		default:
+		case 3:
+			iclr = reg_base + SYSIO_ICLR_SLOT3;
+			break;
+		};
+
+		iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
+	}
+	return build_irq(pil, sbus_level, iclr, imap);
+}
+
+/* Error interrupt handling. */
+#define SYSIO_UE_AFSR	0x0030UL
+#define SYSIO_UE_AFAR	0x0038UL
+#define  SYSIO_UEAFSR_PPIO  0x8000000000000000UL /* Primary PIO cause         */
+#define  SYSIO_UEAFSR_PDRD  0x4000000000000000UL /* Primary DVMA read cause   */
+#define  SYSIO_UEAFSR_PDWR  0x2000000000000000UL /* Primary DVMA write cause  */
+#define  SYSIO_UEAFSR_SPIO  0x1000000000000000UL /* Secondary PIO is cause    */
+#define  SYSIO_UEAFSR_SDRD  0x0800000000000000UL /* Secondary DVMA read cause */
+#define  SYSIO_UEAFSR_SDWR  0x0400000000000000UL /* Secondary DVMA write cause*/
+#define  SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved                  */
+#define  SYSIO_UEAFSR_DOFF  0x0000e00000000000UL /* Doubleword Offset         */
+#define  SYSIO_UEAFSR_SIZE  0x00001c0000000000UL /* Bad transfer size 2^SIZE  */
+#define  SYSIO_UEAFSR_MID   0x000003e000000000UL /* UPA MID causing the fault */
+#define  SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved                  */
+static irqreturn_t sysio_ue_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct sbus_bus *sbus = dev_id;
+	struct sbus_iommu *iommu = sbus->iommu;
+	unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+	unsigned long afsr_reg, afar_reg;
+	unsigned long afsr, afar, error_bits;
+	int reported;
+
+	afsr_reg = reg_base + SYSIO_UE_AFSR;
+	afar_reg = reg_base + SYSIO_UE_AFAR;
+
+	/* Latch error status. */
+	afsr = upa_readq(afsr_reg);
+	afar = upa_readq(afar_reg);
+
+	/* Clear primary/secondary error status bits. */
+	error_bits = afsr &
+		(SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR |
+		 SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR);
+	upa_writeq(error_bits, afsr_reg);
+
+	/* Log the error. */
+	printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
+	       sbus->portid,
+	       (((error_bits & SYSIO_UEAFSR_PPIO) ?
+		 "PIO" :
+		 ((error_bits & SYSIO_UEAFSR_PDRD) ?
+		  "DVMA Read" :
+		  ((error_bits & SYSIO_UEAFSR_PDWR) ?
+		   "DVMA Write" : "???")))));
+	printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n",
+	       sbus->portid,
+	       (afsr & SYSIO_UEAFSR_DOFF) >> 45UL,
+	       (afsr & SYSIO_UEAFSR_SIZE) >> 42UL,
+	       (afsr & SYSIO_UEAFSR_MID) >> 37UL);
+	printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
+	printk("SYSIO[%x]: Secondary UE errors [", sbus->portid);
+	reported = 0;
+	if (afsr & SYSIO_UEAFSR_SPIO) {
+		reported++;
+		printk("(PIO)");
+	}
+	if (afsr & SYSIO_UEAFSR_SDRD) {
+		reported++;
+		printk("(DVMA Read)");
+	}
+	if (afsr & SYSIO_UEAFSR_SDWR) {
+		reported++;
+		printk("(DVMA Write)");
+	}
+	if (!reported)
+		printk("(none)");
+	printk("]\n");
+
+	return IRQ_HANDLED;
+}
+
+#define SYSIO_CE_AFSR	0x0040UL
+#define SYSIO_CE_AFAR	0x0048UL
+#define  SYSIO_CEAFSR_PPIO  0x8000000000000000UL /* Primary PIO cause         */
+#define  SYSIO_CEAFSR_PDRD  0x4000000000000000UL /* Primary DVMA read cause   */
+#define  SYSIO_CEAFSR_PDWR  0x2000000000000000UL /* Primary DVMA write cause  */
+#define  SYSIO_CEAFSR_SPIO  0x1000000000000000UL /* Secondary PIO cause       */
+#define  SYSIO_CEAFSR_SDRD  0x0800000000000000UL /* Secondary DVMA read cause */
+#define  SYSIO_CEAFSR_SDWR  0x0400000000000000UL /* Secondary DVMA write cause*/
+#define  SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved                  */
+#define  SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits             */
+#define  SYSIO_CEAFSR_DOFF  0x0000e00000000000UL /* Double Offset             */
+#define  SYSIO_CEAFSR_SIZE  0x00001c0000000000UL /* Bad transfer size 2^SIZE  */
+#define  SYSIO_CEAFSR_MID   0x000003e000000000UL /* UPA MID causing the fault */
+#define  SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved                  */
+static irqreturn_t sysio_ce_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct sbus_bus *sbus = dev_id;
+	struct sbus_iommu *iommu = sbus->iommu;
+	unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+	unsigned long afsr_reg, afar_reg;
+	unsigned long afsr, afar, error_bits;
+	int reported;
+
+	afsr_reg = reg_base + SYSIO_CE_AFSR;
+	afar_reg = reg_base + SYSIO_CE_AFAR;
+
+	/* Latch error status. */
+	afsr = upa_readq(afsr_reg);
+	afar = upa_readq(afar_reg);
+
+	/* Clear primary/secondary error status bits. */
+	error_bits = afsr &
+		(SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR |
+		 SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR);
+	upa_writeq(error_bits, afsr_reg);
+
+	printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
+	       sbus->portid,
+	       (((error_bits & SYSIO_CEAFSR_PPIO) ?
+		 "PIO" :
+		 ((error_bits & SYSIO_CEAFSR_PDRD) ?
+		  "DVMA Read" :
+		  ((error_bits & SYSIO_CEAFSR_PDWR) ?
+		   "DVMA Write" : "???")))));
+
+	/* XXX Use syndrome and afar to print out module string just like
+	 * XXX UDB CE trap handler does... -DaveM
+	 */
+	printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n",
+	       sbus->portid,
+	       (afsr & SYSIO_CEAFSR_DOFF) >> 45UL,
+	       (afsr & SYSIO_CEAFSR_ESYND) >> 48UL,
+	       (afsr & SYSIO_CEAFSR_SIZE) >> 42UL,
+	       (afsr & SYSIO_CEAFSR_MID) >> 37UL);
+	printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
+
+	printk("SYSIO[%x]: Secondary CE errors [", sbus->portid);
+	reported = 0;
+	if (afsr & SYSIO_CEAFSR_SPIO) {
+		reported++;
+		printk("(PIO)");
+	}
+	if (afsr & SYSIO_CEAFSR_SDRD) {
+		reported++;
+		printk("(DVMA Read)");
+	}
+	if (afsr & SYSIO_CEAFSR_SDWR) {
+		reported++;
+		printk("(DVMA Write)");
+	}
+	if (!reported)
+		printk("(none)");
+	printk("]\n");
+
+	return IRQ_HANDLED;
+}
+
+#define SYSIO_SBUS_AFSR		0x2010UL
+#define SYSIO_SBUS_AFAR		0x2018UL
+#define  SYSIO_SBAFSR_PLE   0x8000000000000000UL /* Primary Late PIO Error    */
+#define  SYSIO_SBAFSR_PTO   0x4000000000000000UL /* Primary SBUS Timeout      */
+#define  SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK    */
+#define  SYSIO_SBAFSR_SLE   0x1000000000000000UL /* Secondary Late PIO Error  */
+#define  SYSIO_SBAFSR_STO   0x0800000000000000UL /* Secondary SBUS Timeout    */
+#define  SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK  */
+#define  SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved                  */
+#define  SYSIO_SBAFSR_RD    0x0000800000000000UL /* Primary was late PIO read */
+#define  SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved                  */
+#define  SYSIO_SBAFSR_SIZE  0x00001c0000000000UL /* Size of transfer          */
+#define  SYSIO_SBAFSR_MID   0x000003e000000000UL /* MID causing the error     */
+#define  SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved                  */
+static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct sbus_bus *sbus = dev_id;
+	struct sbus_iommu *iommu = sbus->iommu;
+	unsigned long afsr_reg, afar_reg, reg_base;
+	unsigned long afsr, afar, error_bits;
+	int reported;
+
+	reg_base = iommu->sbus_control_reg - 0x2000UL;
+	afsr_reg = reg_base + SYSIO_SBUS_AFSR;
+	afar_reg = reg_base + SYSIO_SBUS_AFAR;
+
+	afsr = upa_readq(afsr_reg);
+	afar = upa_readq(afar_reg);
+
+	/* Clear primary/secondary error status bits. */
+	error_bits = afsr &
+		(SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR |
+		 SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR);
+	upa_writeq(error_bits, afsr_reg);
+
+	/* Log the error. */
+	printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
+	       sbus->portid,
+	       (((error_bits & SYSIO_SBAFSR_PLE) ?
+		 "Late PIO Error" :
+		 ((error_bits & SYSIO_SBAFSR_PTO) ?
+		  "Time Out" :
+		  ((error_bits & SYSIO_SBAFSR_PBERR) ?
+		   "Error Ack" : "???")))),
+	       (afsr & SYSIO_SBAFSR_RD) ? 1 : 0);
+	printk("SYSIO[%x]: size[%lx] MID[%lx]\n",
+	       sbus->portid,
+	       (afsr & SYSIO_SBAFSR_SIZE) >> 42UL,
+	       (afsr & SYSIO_SBAFSR_MID) >> 37UL);
+	printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
+	printk("SYSIO[%x]: Secondary SBUS errors [", sbus->portid);
+	reported = 0;
+	if (afsr & SYSIO_SBAFSR_SLE) {
+		reported++;
+		printk("(Late PIO Error)");
+	}
+	if (afsr & SYSIO_SBAFSR_STO) {
+		reported++;
+		printk("(Time Out)");
+	}
+	if (afsr & SYSIO_SBAFSR_SBERR) {
+		reported++;
+		printk("(Error Ack)");
+	}
+	if (!reported)
+		printk("(none)");
+	printk("]\n");
+
+	/* XXX check iommu/strbuf for further error status XXX */
+
+	return IRQ_HANDLED;
+}
+
+#define ECC_CONTROL	0x0020UL
+#define  SYSIO_ECNTRL_ECCEN	0x8000000000000000UL /* Enable ECC Checking   */
+#define  SYSIO_ECNTRL_UEEN	0x4000000000000000UL /* Enable UE Interrupts  */
+#define  SYSIO_ECNTRL_CEEN	0x2000000000000000UL /* Enable CE Interrupts  */
+
+#define SYSIO_UE_INO		0x34
+#define SYSIO_CE_INO		0x35
+#define SYSIO_SBUSERR_INO	0x36
+
+static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
+{
+	struct sbus_iommu *iommu = sbus->iommu;
+	unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+	unsigned int irq;
+	u64 control;
+
+	irq = sbus_build_irq(sbus, SYSIO_UE_INO);
+	if (request_irq(irq, sysio_ue_handler,
+			SA_SHIRQ, "SYSIO UE", sbus) < 0) {
+		prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n",
+			    sbus->portid);
+		prom_halt();
+	}
+
+	irq = sbus_build_irq(sbus, SYSIO_CE_INO);
+	if (request_irq(irq, sysio_ce_handler,
+			SA_SHIRQ, "SYSIO CE", sbus) < 0) {
+		prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n",
+			    sbus->portid);
+		prom_halt();
+	}
+
+	irq = sbus_build_irq(sbus, SYSIO_SBUSERR_INO);
+	if (request_irq(irq, sysio_sbus_error_handler,
+			SA_SHIRQ, "SYSIO SBUS Error", sbus) < 0) {
+		prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n",
+			    sbus->portid);
+		prom_halt();
+	}
+
+	/* Now turn the error interrupts on and also enable ECC checking. */
+	upa_writeq((SYSIO_ECNTRL_ECCEN |
+		    SYSIO_ECNTRL_UEEN  |
+		    SYSIO_ECNTRL_CEEN),
+		   reg_base + ECC_CONTROL);
+
+	control = upa_readq(iommu->sbus_control_reg);
+	control |= 0x100UL; /* SBUS Error Interrupt Enable */
+	upa_writeq(control, iommu->sbus_control_reg);
+}
+
+/* Boot time initialization. */
+void __init sbus_iommu_init(int prom_node, struct sbus_bus *sbus)
+{
+	struct linux_prom64_registers rprop;
+	struct sbus_iommu *iommu;
+	unsigned long regs, tsb_base;
+	u64 control;
+	int err, i;
+
+	sbus->portid = prom_getintdefault(sbus->prom_node,
+					  "upa-portid", -1);
+
+	err = prom_getproperty(prom_node, "reg",
+			       (char *)&rprop, sizeof(rprop));
+	if (err < 0) {
+		prom_printf("sbus_iommu_init: Cannot map SYSIO control registers.\n");
+		prom_halt();
+	}
+	regs = rprop.phys_addr;
+
+	iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC);
+	if (iommu == NULL) {
+		prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n");
+		prom_halt();
+	}
+
+	/* Align on E$ line boundary. */
+	iommu = (struct sbus_iommu *)
+		(((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) &
+		 ~(SMP_CACHE_BYTES - 1UL));
+
+	memset(iommu, 0, sizeof(*iommu));
+
+	/* We start with no consistent mappings. */
+	iommu->lowest_consistent_map = CLUSTER_NPAGES;
+
+	for (i = 0; i < NCLUSTERS; i++) {
+		iommu->alloc_info[i].flush = 0;
+		iommu->alloc_info[i].next = 0;
+	}
+
+	/* Setup spinlock. */
+	spin_lock_init(&iommu->lock);
+
+	/* Init register offsets. */
+	iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE;
+	iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE;
+
+	/* The SYSIO SBUS control register is used for dummy reads
+	 * in order to ensure write completion.
+	 */
+	iommu->sbus_control_reg = regs + 0x2000UL;
+
+	/* Link into SYSIO software state. */
+	sbus->iommu = iommu;
+
+	printk("SYSIO: UPA portID %x, at %016lx\n",
+	       sbus->portid, regs);
+
+	/* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
+	control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL);
+	control = ((7UL << 16UL)	|
+		   (0UL << 2UL)		|
+		   (1UL << 1UL)		|
+		   (1UL << 0UL));
+
+	/* Using the above configuration we need 1MB iommu page
+	 * table (128K ioptes * 8 bytes per iopte).  This is
+	 * page order 7 on UltraSparc.
+	 */
+	tsb_base = __get_free_pages(GFP_ATOMIC, get_order(IO_TSB_SIZE));
+	if (tsb_base == 0UL) {
+		prom_printf("sbus_iommu_init: Fatal error, cannot alloc TSB table.\n");
+		prom_halt();
+	}
+
+	iommu->page_table = (iopte_t *) tsb_base;
+	memset(iommu->page_table, 0, IO_TSB_SIZE);
+
+	upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL);
+
+	/* Clean out any cruft in the IOMMU using
+	 * diagnostic accesses.
+	 */
+	for (i = 0; i < 16; i++) {
+		unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG;
+		unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
+
+		dram += (unsigned long)i * 8UL;
+		tag += (unsigned long)i * 8UL;
+		upa_writeq(0, dram);
+		upa_writeq(0, tag);
+	}
+	upa_readq(iommu->sbus_control_reg);
+
+	/* Give the TSB to SYSIO. */
+	upa_writeq(__pa(tsb_base), iommu->iommu_regs + IOMMU_TSBBASE);
+
+	/* Setup streaming buffer, DE=1 SB_EN=1 */
+	control = (1UL << 1UL) | (1UL << 0UL);
+	upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL);
+
+	/* Clear out the tags using diagnostics. */
+	for (i = 0; i < 16; i++) {
+		unsigned long ptag, ltag;
+
+		ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG;
+		ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG;
+		ptag += (unsigned long)i * 8UL;
+		ltag += (unsigned long)i * 8UL;
+
+		upa_writeq(0UL, ptag);
+		upa_writeq(0UL, ltag);
+	}
+
+	/* Enable DVMA arbitration for all devices/slots. */
+	control = upa_readq(iommu->sbus_control_reg);
+	control |= 0x3fUL;
+	upa_writeq(control, iommu->sbus_control_reg);
+
+	/* Now some Xfire specific grot... */
+	if (this_is_starfire)
+		sbus->starfire_cookie = starfire_hookup(sbus->portid);
+	else
+		sbus->starfire_cookie = NULL;
+
+	sysio_register_error_handlers(sbus);
+}
diff --git a/arch/sparc64/kernel/semaphore.c b/arch/sparc64/kernel/semaphore.c
new file mode 100644
index 0000000..63496c4
--- /dev/null
+++ b/arch/sparc64/kernel/semaphore.c
@@ -0,0 +1,251 @@
+/* $Id: semaphore.c,v 1.9 2001/11/18 00:12:56 davem Exp $
+ * semaphore.c: Sparc64 semaphore implementation.
+ *
+ * This is basically the PPC semaphore scheme ported to use
+ * the sparc64 atomic instructions, so see the PPC code for
+ * credits.
+ */
+
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+/*
+ * Atomically update sem->count.
+ * This does the equivalent of the following:
+ *
+ *	old_count = sem->count;
+ *	tmp = MAX(old_count, 0) + incr;
+ *	sem->count = tmp;
+ *	return old_count;
+ */
+static __inline__ int __sem_update_count(struct semaphore *sem, int incr)
+{
+	int old_count, tmp;
+
+	__asm__ __volatile__("\n"
+"	! __sem_update_count old_count(%0) tmp(%1) incr(%4) &sem->count(%3)\n"
+"1:	ldsw	[%3], %0\n"
+"	mov	%0, %1\n"
+"	cmp	%0, 0\n"
+"	movl	%%icc, 0, %1\n"
+"	add	%1, %4, %1\n"
+"	cas	[%3], %0, %1\n"
+"	cmp	%0, %1\n"
+"	bne,pn	%%icc, 1b\n"
+"	 membar #StoreLoad | #StoreStore\n"
+	: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
+	: "r" (&sem->count), "r" (incr), "m" (sem->count)
+	: "cc");
+
+	return old_count;
+}
+
+static void __up(struct semaphore *sem)
+{
+	__sem_update_count(sem, 1);
+	wake_up(&sem->wait);
+}
+
+void up(struct semaphore *sem)
+{
+	/* This atomically does:
+	 * 	old_val = sem->count;
+	 *	new_val = sem->count + 1;
+	 *	sem->count = new_val;
+	 *	if (old_val < 0)
+	 *		__up(sem);
+	 *
+	 * The (old_val < 0) test is equivalent to
+	 * the more straightforward (new_val <= 0),
+	 * but it is easier to test the former because
+	 * of how the CAS instruction works.
+	 */
+
+	__asm__ __volatile__("\n"
+"	! up sem(%0)\n"
+"	membar	#StoreLoad | #LoadLoad\n"
+"1:	lduw	[%0], %%g1\n"
+"	add	%%g1, 1, %%g7\n"
+"	cas	[%0], %%g1, %%g7\n"
+"	cmp	%%g1, %%g7\n"
+"	bne,pn	%%icc, 1b\n"
+"	 addcc	%%g7, 1, %%g0\n"
+"	ble,pn	%%icc, 3f\n"
+"	 membar	#StoreLoad | #StoreStore\n"
+"2:\n"
+"	.subsection 2\n"
+"3:	mov	%0, %%g1\n"
+"	save	%%sp, -160, %%sp\n"
+"	call	%1\n"
+"	 mov	%%g1, %%o0\n"
+"	ba,pt	%%xcc, 2b\n"
+"	 restore\n"
+"	.previous\n"
+	: : "r" (sem), "i" (__up)
+	: "g1", "g2", "g3", "g7", "memory", "cc");
+}
+
+static void __sched __down(struct semaphore * sem)
+{
+	struct task_struct *tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
+
+	tsk->state = TASK_UNINTERRUPTIBLE;
+	add_wait_queue_exclusive(&sem->wait, &wait);
+
+	while (__sem_update_count(sem, -1) <= 0) {
+		schedule();
+		tsk->state = TASK_UNINTERRUPTIBLE;
+	}
+	remove_wait_queue(&sem->wait, &wait);
+	tsk->state = TASK_RUNNING;
+
+	wake_up(&sem->wait);
+}
+
+void __sched down(struct semaphore *sem)
+{
+	might_sleep();
+	/* This atomically does:
+	 * 	old_val = sem->count;
+	 *	new_val = sem->count - 1;
+	 *	sem->count = new_val;
+	 *	if (old_val < 1)
+	 *		__down(sem);
+	 *
+	 * The (old_val < 1) test is equivalent to
+	 * the more straightforward (new_val < 0),
+	 * but it is easier to test the former because
+	 * of how the CAS instruction works.
+	 */
+
+	__asm__ __volatile__("\n"
+"	! down sem(%0)\n"
+"1:	lduw	[%0], %%g1\n"
+"	sub	%%g1, 1, %%g7\n"
+"	cas	[%0], %%g1, %%g7\n"
+"	cmp	%%g1, %%g7\n"
+"	bne,pn	%%icc, 1b\n"
+"	 cmp	%%g7, 1\n"
+"	bl,pn	%%icc, 3f\n"
+"	 membar	#StoreLoad | #StoreStore\n"
+"2:\n"
+"	.subsection 2\n"
+"3:	mov	%0, %%g1\n"
+"	save	%%sp, -160, %%sp\n"
+"	call	%1\n"
+"	 mov	%%g1, %%o0\n"
+"	ba,pt	%%xcc, 2b\n"
+"	 restore\n"
+"	.previous\n"
+	: : "r" (sem), "i" (__down)
+	: "g1", "g2", "g3", "g7", "memory", "cc");
+}
+
+int down_trylock(struct semaphore *sem)
+{
+	int ret;
+
+	/* This atomically does:
+	 * 	old_val = sem->count;
+	 *	new_val = sem->count - 1;
+	 *	if (old_val < 1) {
+	 *		ret = 1;
+	 *	} else {
+	 *		sem->count = new_val;
+	 *		ret = 0;
+	 *	}
+	 *
+	 * The (old_val < 1) test is equivalent to
+	 * the more straightforward (new_val < 0),
+	 * but it is easier to test the former because
+	 * of how the CAS instruction works.
+	 */
+
+	__asm__ __volatile__("\n"
+"	! down_trylock sem(%1) ret(%0)\n"
+"1:	lduw	[%1], %%g1\n"
+"	sub	%%g1, 1, %%g7\n"
+"	cmp	%%g1, 1\n"
+"	bl,pn	%%icc, 2f\n"
+"	 mov	1, %0\n"
+"	cas	[%1], %%g1, %%g7\n"
+"	cmp	%%g1, %%g7\n"
+"	bne,pn	%%icc, 1b\n"
+"	 mov	0, %0\n"
+"	membar	#StoreLoad | #StoreStore\n"
+"2:\n"
+	: "=&r" (ret)
+	: "r" (sem)
+	: "g1", "g7", "memory", "cc");
+
+	return ret;
+}
+
+static int __sched __down_interruptible(struct semaphore * sem)
+{
+	int retval = 0;
+	struct task_struct *tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
+
+	tsk->state = TASK_INTERRUPTIBLE;
+	add_wait_queue_exclusive(&sem->wait, &wait);
+
+	while (__sem_update_count(sem, -1) <= 0) {
+		if (signal_pending(current)) {
+			__sem_update_count(sem, 0);
+			retval = -EINTR;
+			break;
+		}
+		schedule();
+		tsk->state = TASK_INTERRUPTIBLE;
+	}
+	tsk->state = TASK_RUNNING;
+	remove_wait_queue(&sem->wait, &wait);
+	wake_up(&sem->wait);
+	return retval;
+}
+
+int __sched down_interruptible(struct semaphore *sem)
+{
+	int ret = 0;
+	
+	might_sleep();
+	/* This atomically does:
+	 * 	old_val = sem->count;
+	 *	new_val = sem->count - 1;
+	 *	sem->count = new_val;
+	 *	if (old_val < 1)
+	 *		ret = __down_interruptible(sem);
+	 *
+	 * The (old_val < 1) test is equivalent to
+	 * the more straightforward (new_val < 0),
+	 * but it is easier to test the former because
+	 * of how the CAS instruction works.
+	 */
+
+	__asm__ __volatile__("\n"
+"	! down_interruptible sem(%2) ret(%0)\n"
+"1:	lduw	[%2], %%g1\n"
+"	sub	%%g1, 1, %%g7\n"
+"	cas	[%2], %%g1, %%g7\n"
+"	cmp	%%g1, %%g7\n"
+"	bne,pn	%%icc, 1b\n"
+"	 cmp	%%g7, 1\n"
+"	bl,pn	%%icc, 3f\n"
+"	 membar	#StoreLoad | #StoreStore\n"
+"2:\n"
+"	.subsection 2\n"
+"3:	mov	%2, %%g1\n"
+"	save	%%sp, -160, %%sp\n"
+"	call	%3\n"
+"	 mov	%%g1, %%o0\n"
+"	ba,pt	%%xcc, 2b\n"
+"	 restore\n"
+"	.previous\n"
+	: "=r" (ret)
+	: "0" (ret), "r" (sem), "i" (__down_interruptible)
+	: "g1", "g2", "g3", "g7", "memory", "cc");
+	return ret;
+}
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
new file mode 100644
index 0000000..12c3d84b
--- /dev/null
+++ b/arch/sparc64/kernel/setup.c
@@ -0,0 +1,731 @@
+/*  $Id: setup.c,v 1.72 2002/02/09 19:49:30 davem Exp $
+ *  linux/arch/sparc64/kernel/setup.c
+ *
+ *  Copyright (C) 1995,1996  David S. Miller (davem@caip.rutgers.edu)
+ *  Copyright (C) 1997       Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <asm/smp.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/tty.h>
+#include <linux/delay.h>
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/syscalls.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/inet.h>
+#include <linux/console.h>
+#include <linux/root_dev.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+#include <linux/initrd.h>
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/oplib.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/idprom.h>
+#include <asm/head.h>
+#include <asm/starfire.h>
+#include <asm/mmu_context.h>
+#include <asm/timer.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/mmu.h>
+
+#ifdef CONFIG_IP_PNP
+#include <net/ipconfig.h>
+#endif
+
+struct screen_info screen_info = {
+	0, 0,			/* orig-x, orig-y */
+	0,			/* unused */
+	0,			/* orig-video-page */
+	0,			/* orig-video-mode */
+	128,			/* orig-video-cols */
+	0, 0, 0,		/* unused, ega_bx, unused */
+	54,			/* orig-video-lines */
+	0,                      /* orig-video-isVGA */
+	16                      /* orig-video-points */
+};
+
+/* Typing sync at the prom prompt calls the function pointed to by
+ * the sync callback which I set to the following function.
+ * This should sync all filesystems and return, for now it just
+ * prints out pretty messages and returns.
+ */
+
+void (*prom_palette)(int);
+void (*prom_keyboard)(void);
+
+static void
+prom_console_write(struct console *con, const char *s, unsigned n)
+{
+	prom_write(s, n);
+}
+
+static struct console prom_console = {
+	.name =		"prom",
+	.write =	prom_console_write,
+	.flags =	CON_CONSDEV | CON_ENABLED,
+	.index =	-1,
+};
+
+#define PROM_TRUE	-1
+#define PROM_FALSE	0
+
+/* Pretty sick eh? */
+int prom_callback(long *args)
+{
+	struct console *cons, *saved_console = NULL;
+	unsigned long flags;
+	char *cmd;
+	extern spinlock_t prom_entry_lock;
+
+	if (!args)
+		return -1;
+	if (!(cmd = (char *)args[0]))
+		return -1;
+
+	/*
+	 * The callback can be invoked on the cpu that first dropped 
+	 * into prom_cmdline after taking the serial interrupt, or on 
+	 * a slave processor that was smp_captured() if the 
+	 * administrator has done a switch-cpu inside obp. In either 
+	 * case, the cpu is marked as in-interrupt. Drop IRQ locks.
+	 */
+	irq_exit();
+
+	/* XXX Revisit the locking here someday.  This is a debugging
+	 * XXX feature so it isnt all that critical.  -DaveM
+	 */
+	local_irq_save(flags);
+
+	spin_unlock(&prom_entry_lock);
+	cons = console_drivers;
+	while (cons) {
+		unregister_console(cons);
+		cons->flags &= ~(CON_PRINTBUFFER);
+		cons->next = saved_console;
+		saved_console = cons;
+		cons = console_drivers;
+	}
+	register_console(&prom_console);
+	if (!strcmp(cmd, "sync")) {
+		prom_printf("PROM `%s' command...\n", cmd);
+		show_free_areas();
+		if (current->pid != 0) {
+			local_irq_enable();
+			sys_sync();
+			local_irq_disable();
+		}
+		args[2] = 0;
+		args[args[1] + 3] = -1;
+		prom_printf("Returning to PROM\n");
+	} else if (!strcmp(cmd, "va>tte-data")) {
+		unsigned long ctx, va;
+		unsigned long tte = 0;
+		long res = PROM_FALSE;
+
+		ctx = args[3];
+		va = args[4];
+		if (ctx) {
+			/*
+			 * Find process owning ctx, lookup mapping.
+			 */
+			struct task_struct *p;
+			struct mm_struct *mm = NULL;
+			pgd_t *pgdp;
+			pud_t *pudp;
+			pmd_t *pmdp;
+			pte_t *ptep;
+
+			for_each_process(p) {
+				mm = p->mm;
+				if (CTX_NRBITS(mm->context) == ctx)
+					break;
+			}
+			if (!mm ||
+			    CTX_NRBITS(mm->context) != ctx)
+				goto done;
+
+			pgdp = pgd_offset(mm, va);
+			if (pgd_none(*pgdp))
+				goto done;
+			pudp = pud_offset(pgdp, va);
+			if (pud_none(*pudp))
+				goto done;
+			pmdp = pmd_offset(pudp, va);
+			if (pmd_none(*pmdp))
+				goto done;
+
+			/* Preemption implicitly disabled by virtue of
+			 * being called from inside OBP.
+			 */
+			ptep = pte_offset_map(pmdp, va);
+			if (pte_present(*ptep)) {
+				tte = pte_val(*ptep);
+				res = PROM_TRUE;
+			}
+			pte_unmap(ptep);
+			goto done;
+		}
+
+		if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) {
+			unsigned long kernel_pctx = 0;
+
+			if (tlb_type == cheetah_plus)
+				kernel_pctx |= (CTX_CHEETAH_PLUS_NUC |
+						CTX_CHEETAH_PLUS_CTX0);
+
+			/* Spitfire Errata #32 workaround */
+			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
+					     "flush	%%g6"
+					     : /* No outputs */
+					     : "r" (kernel_pctx),
+					       "r" (PRIMARY_CONTEXT),
+					       "i" (ASI_DMMU));
+
+			/*
+			 * Locked down tlb entry.
+			 */
+
+			if (tlb_type == spitfire)
+				tte = spitfire_get_dtlb_data(SPITFIRE_HIGHEST_LOCKED_TLBENT);
+			else if (tlb_type == cheetah || tlb_type == cheetah_plus)
+				tte = cheetah_get_ldtlb_data(CHEETAH_HIGHEST_LOCKED_TLBENT);
+
+			res = PROM_TRUE;
+			goto done;
+		}
+
+		if (va < PGDIR_SIZE) {
+			/*
+			 * vmalloc or prom_inherited mapping.
+			 */
+			pgd_t *pgdp;
+			pud_t *pudp;
+			pmd_t *pmdp;
+			pte_t *ptep;
+			int error;
+
+			if ((va >= LOW_OBP_ADDRESS) && (va < HI_OBP_ADDRESS)) {
+				tte = prom_virt_to_phys(va, &error);
+				if (!error)
+					res = PROM_TRUE;
+				goto done;
+			}
+			pgdp = pgd_offset_k(va);
+			if (pgd_none(*pgdp))
+				goto done;
+			pudp = pud_offset(pgdp, va);
+			if (pud_none(*pudp))
+				goto done;
+			pmdp = pmd_offset(pudp, va);
+			if (pmd_none(*pmdp))
+				goto done;
+
+			/* Preemption implicitly disabled by virtue of
+			 * being called from inside OBP.
+			 */
+			ptep = pte_offset_kernel(pmdp, va);
+			if (pte_present(*ptep)) {
+				tte = pte_val(*ptep);
+				res = PROM_TRUE;
+			}
+			goto done;
+		}
+
+		if (va < PAGE_OFFSET) {
+			/*
+			 * No mappings here.
+			 */
+			goto done;
+		}
+
+		if (va & (1UL << 40)) {
+			/*
+			 * I/O page.
+			 */
+
+			tte = (__pa(va) & _PAGE_PADDR) |
+			      _PAGE_VALID | _PAGE_SZ4MB |
+			      _PAGE_E | _PAGE_P | _PAGE_W;
+			res = PROM_TRUE;
+			goto done;
+		}
+
+		/*
+		 * Normal page.
+		 */
+		tte = (__pa(va) & _PAGE_PADDR) |
+		      _PAGE_VALID | _PAGE_SZ4MB |
+		      _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W;
+		res = PROM_TRUE;
+
+	done:
+		if (res == PROM_TRUE) {
+			args[2] = 3;
+			args[args[1] + 3] = 0;
+			args[args[1] + 4] = res;
+			args[args[1] + 5] = tte;
+		} else {
+			args[2] = 2;
+			args[args[1] + 3] = 0;
+			args[args[1] + 4] = res;
+		}
+	} else if (!strcmp(cmd, ".soft1")) {
+		unsigned long tte;
+
+		tte = args[3];
+		prom_printf("%lx:\"%s%s%s%s%s\" ",
+			    (tte & _PAGE_SOFT) >> 7,
+			    tte & _PAGE_MODIFIED ? "M" : "-",
+			    tte & _PAGE_ACCESSED ? "A" : "-",
+			    tte & _PAGE_READ     ? "W" : "-",
+			    tte & _PAGE_WRITE    ? "R" : "-",
+			    tte & _PAGE_PRESENT  ? "P" : "-");
+
+		args[2] = 2;
+		args[args[1] + 3] = 0;
+		args[args[1] + 4] = PROM_TRUE;
+	} else if (!strcmp(cmd, ".soft2")) {
+		unsigned long tte;
+
+		tte = args[3];
+		prom_printf("%lx ", (tte & 0x07FC000000000000UL) >> 50);
+
+		args[2] = 2;
+		args[args[1] + 3] = 0;
+		args[args[1] + 4] = PROM_TRUE;
+	} else {
+		prom_printf("unknown PROM `%s' command...\n", cmd);
+	}
+	unregister_console(&prom_console);
+	while (saved_console) {
+		cons = saved_console;
+		saved_console = cons->next;
+		register_console(cons);
+	}
+	spin_lock(&prom_entry_lock);
+	local_irq_restore(flags);
+
+	/*
+	 * Restore in-interrupt status for a resume from obp.
+	 */
+	irq_enter();
+	return 0;
+}
+
+unsigned int boot_flags = 0;
+#define BOOTME_DEBUG  0x1
+#define BOOTME_SINGLE 0x2
+
+/* Exported for mm/init.c:paging_init. */
+unsigned long cmdline_memory_size = 0;
+
+static struct console prom_debug_console = {
+	.name =		"debug",
+	.write =	prom_console_write,
+	.flags =	CON_PRINTBUFFER,
+	.index =	-1,
+};
+
+/* XXX Implement this at some point... */
+void kernel_enter_debugger(void)
+{
+}
+
+int obp_system_intr(void)
+{
+	if (boot_flags & BOOTME_DEBUG) {
+		printk("OBP: system interrupted\n");
+		prom_halt();
+		return 1;
+	}
+	return 0;
+}
+
+/* 
+ * Process kernel command line switches that are specific to the
+ * SPARC or that require special low-level processing.
+ */
+static void __init process_switch(char c)
+{
+	switch (c) {
+	case 'd':
+		boot_flags |= BOOTME_DEBUG;
+		break;
+	case 's':
+		boot_flags |= BOOTME_SINGLE;
+		break;
+	case 'h':
+		prom_printf("boot_flags_init: Halt!\n");
+		prom_halt();
+		break;
+	case 'p':
+		/* Use PROM debug console. */
+		register_console(&prom_debug_console);
+		break;
+	default:
+		printk("Unknown boot switch (-%c)\n", c);
+		break;
+	}
+}
+
+static void __init process_console(char *commands)
+{
+	serial_console = 0;
+	commands += 8;
+	/* Linux-style serial */
+	if (!strncmp(commands, "ttyS", 4))
+		serial_console = simple_strtoul(commands + 4, NULL, 10) + 1;
+	else if (!strncmp(commands, "tty", 3)) {
+		char c = *(commands + 3);
+		/* Solaris-style serial */
+		if (c == 'a' || c == 'b') {
+			serial_console = c - 'a' + 1;
+			prom_printf ("Using /dev/tty%c as console.\n", c);
+		}
+		/* else Linux-style fbcon, not serial */
+	}
+#if defined(CONFIG_PROM_CONSOLE)
+	if (!strncmp(commands, "prom", 4)) {
+		char *p;
+
+		for (p = commands - 8; *p && *p != ' '; p++)
+			*p = ' ';
+		conswitchp = &prom_con;
+	}
+#endif
+}
+
+static void __init boot_flags_init(char *commands)
+{
+	while (*commands) {
+		/* Move to the start of the next "argument". */
+		while (*commands && *commands == ' ')
+			commands++;
+
+		/* Process any command switches, otherwise skip it. */
+		if (*commands == '\0')
+			break;
+		if (*commands == '-') {
+			commands++;
+			while (*commands && *commands != ' ')
+				process_switch(*commands++);
+			continue;
+		}
+		if (!strncmp(commands, "console=", 8)) {
+			process_console(commands);
+		} else if (!strncmp(commands, "mem=", 4)) {
+			/*
+			 * "mem=XXX[kKmM]" overrides the PROM-reported
+			 * memory size.
+			 */
+			cmdline_memory_size = simple_strtoul(commands + 4,
+							     &commands, 0);
+			if (*commands == 'K' || *commands == 'k') {
+				cmdline_memory_size <<= 10;
+				commands++;
+			} else if (*commands=='M' || *commands=='m') {
+				cmdline_memory_size <<= 20;
+				commands++;
+			}
+		}
+		while (*commands && *commands != ' ')
+			commands++;
+	}
+}
+
+extern int prom_probe_memory(void);
+extern unsigned long start, end;
+extern void panic_setup(char *, int *);
+
+extern unsigned short root_flags;
+extern unsigned short root_dev;
+extern unsigned short ram_flags;
+#define RAMDISK_IMAGE_START_MASK	0x07FF
+#define RAMDISK_PROMPT_FLAG		0x8000
+#define RAMDISK_LOAD_FLAG		0x4000
+
+extern int root_mountflags;
+
+char reboot_command[COMMAND_LINE_SIZE];
+
+static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
+
+void register_prom_callbacks(void)
+{
+	prom_setcallback(prom_callback);
+	prom_feval(": linux-va>tte-data 2 \" va>tte-data\" $callback drop ; "
+		   "' linux-va>tte-data to va>tte-data");
+	prom_feval(": linux-.soft1 1 \" .soft1\" $callback 2drop ; "
+		   "' linux-.soft1 to .soft1");
+	prom_feval(": linux-.soft2 1 \" .soft2\" $callback 2drop ; "
+		   "' linux-.soft2 to .soft2");
+}
+
+extern void paging_init(void);
+
+void __init setup_arch(char **cmdline_p)
+{
+	unsigned long highest_paddr;
+	int i;
+
+	/* Initialize PROM console and command line. */
+	*cmdline_p = prom_getbootargs();
+	strcpy(saved_command_line, *cmdline_p);
+
+	printk("ARCH: SUN4U\n");
+
+#ifdef CONFIG_DUMMY_CONSOLE
+	conswitchp = &dummy_con;
+#elif defined(CONFIG_PROM_CONSOLE)
+	conswitchp = &prom_con;
+#endif
+
+#ifdef CONFIG_SMP
+	i = (unsigned long)&irq_stat[1] - (unsigned long)&irq_stat[0];
+	if ((i == SMP_CACHE_BYTES) || (i == (2 * SMP_CACHE_BYTES))) {
+		extern unsigned int irqsz_patchme[1];
+		irqsz_patchme[0] |= ((i == SMP_CACHE_BYTES) ? SMP_CACHE_BYTES_SHIFT : \
+							SMP_CACHE_BYTES_SHIFT + 1);
+		flushi((long)&irqsz_patchme[0]);
+	} else {
+		prom_printf("Unexpected size of irq_stat[] elements\n");
+		prom_halt();
+	}
+#endif
+	/* Work out if we are starfire early on */
+	check_if_starfire();
+
+	boot_flags_init(*cmdline_p);
+
+	idprom_init();
+	(void) prom_probe_memory();
+
+	/* In paging_init() we tip off this value to see if we need
+	 * to change init_mm.pgd to point to the real alias mapping.
+	 */
+	phys_base = 0xffffffffffffffffUL;
+	highest_paddr = 0UL;
+	for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+		unsigned long top;
+
+		if (sp_banks[i].base_addr < phys_base)
+			phys_base = sp_banks[i].base_addr;
+		top = sp_banks[i].base_addr +
+			sp_banks[i].num_bytes;
+		if (highest_paddr < top)
+			highest_paddr = top;
+	}
+	pfn_base = phys_base >> PAGE_SHIFT;
+
+	switch (tlb_type) {
+	default:
+	case spitfire:
+		kern_base = spitfire_get_itlb_data(sparc64_highest_locked_tlbent());
+		kern_base &= _PAGE_PADDR_SF;
+		break;
+
+	case cheetah:
+	case cheetah_plus:
+		kern_base = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
+		kern_base &= _PAGE_PADDR;
+		break;
+	};
+
+	kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
+
+	if (!root_flags)
+		root_mountflags &= ~MS_RDONLY;
+	ROOT_DEV = old_decode_dev(root_dev);
+#ifdef CONFIG_BLK_DEV_INITRD
+	rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
+	rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
+	rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);	
+#endif
+
+	init_task.thread_info->kregs = &fake_swapper_regs;
+
+#ifdef CONFIG_IP_PNP
+	if (!ic_set_manually) {
+		int chosen = prom_finddevice ("/chosen");
+		u32 cl, sv, gw;
+		
+		cl = prom_getintdefault (chosen, "client-ip", 0);
+		sv = prom_getintdefault (chosen, "server-ip", 0);
+		gw = prom_getintdefault (chosen, "gateway-ip", 0);
+		if (cl && sv) {
+			ic_myaddr = cl;
+			ic_servaddr = sv;
+			if (gw)
+				ic_gateway = gw;
+#if defined(CONFIG_IP_PNP_BOOTP) || defined(CONFIG_IP_PNP_RARP)
+			ic_proto_enabled = 0;
+#endif
+		}
+	}
+#endif
+
+	paging_init();
+}
+
+static int __init set_preferred_console(void)
+{
+	int idev, odev;
+
+	/* The user has requested a console so this is already set up. */
+	if (serial_console >= 0)
+		return -EBUSY;
+
+	idev = prom_query_input_device();
+	odev = prom_query_output_device();
+	if (idev == PROMDEV_IKBD && odev == PROMDEV_OSCREEN) {
+		serial_console = 0;
+	} else if (idev == PROMDEV_ITTYA && odev == PROMDEV_OTTYA) {
+		serial_console = 1;
+	} else if (idev == PROMDEV_ITTYB && odev == PROMDEV_OTTYB) {
+		serial_console = 2;
+	} else {
+		prom_printf("Inconsistent console: "
+			    "input %d, output %d\n",
+			    idev, odev);
+		prom_halt();
+	}
+
+	if (serial_console)
+		return add_preferred_console("ttyS", serial_console - 1, NULL);
+
+	return -ENODEV;
+}
+console_initcall(set_preferred_console);
+
+/* BUFFER is PAGE_SIZE bytes long. */
+
+extern char *sparc_cpu_type;
+extern char *sparc_fpu_type;
+
+extern void smp_info(struct seq_file *);
+extern void smp_bogo(struct seq_file *);
+extern void mmu_info(struct seq_file *);
+
+static int show_cpuinfo(struct seq_file *m, void *__unused)
+{
+	seq_printf(m, 
+		   "cpu\t\t: %s\n"
+		   "fpu\t\t: %s\n"
+		   "promlib\t\t: Version 3 Revision %d\n"
+		   "prom\t\t: %d.%d.%d\n"
+		   "type\t\t: sun4u\n"
+		   "ncpus probed\t: %ld\n"
+		   "ncpus active\t: %ld\n"
+#ifndef CONFIG_SMP
+		   "Cpu0Bogo\t: %lu.%02lu\n"
+		   "Cpu0ClkTck\t: %016lx\n"
+#endif
+		   ,
+		   sparc_cpu_type,
+		   sparc_fpu_type,
+		   prom_rev,
+		   prom_prev >> 16,
+		   (prom_prev >> 8) & 0xff,
+		   prom_prev & 0xff,
+		   (long)num_possible_cpus(),
+		   (long)num_online_cpus()
+#ifndef CONFIG_SMP
+		   , cpu_data(0).udelay_val/(500000/HZ),
+		   (cpu_data(0).udelay_val/(5000/HZ)) % 100,
+		   cpu_data(0).clock_tick
+#endif
+		);
+#ifdef CONFIG_SMP
+	smp_bogo(m);
+#endif
+	mmu_info(m);
+#ifdef CONFIG_SMP
+	smp_info(m);
+#endif
+	return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+	/* The pointer we are returning is arbitrary,
+	 * it just has to be non-NULL and not IS_ERR
+	 * in the success case.
+	 */
+	return *pos == 0 ? &c_start : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	++*pos;
+	return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+struct seq_operations cpuinfo_op = {
+	.start =c_start,
+	.next =	c_next,
+	.stop =	c_stop,
+	.show =	show_cpuinfo,
+};
+
+extern int stop_a_enabled;
+
+void sun_do_break(void)
+{
+	if (!stop_a_enabled)
+		return;
+
+	prom_printf("\n");
+	flush_user_windows();
+
+	prom_cmdline();
+}
+
+int serial_console = -1;
+int stop_a_enabled = 1;
+
+static int __init topology_init(void)
+{
+	int i, err;
+
+	err = -ENOMEM;
+	for (i = 0; i < NR_CPUS; i++) {
+		if (cpu_possible(i)) {
+			struct cpu *p = kmalloc(sizeof(*p), GFP_KERNEL);
+
+			if (p) {
+				memset(p, 0, sizeof(*p));
+				register_cpu(p, i, NULL);
+				err = 0;
+			}
+		}
+	}
+
+	return err;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
new file mode 100644
index 0000000..b279346
--- /dev/null
+++ b/arch/sparc64/kernel/signal.c
@@ -0,0 +1,688 @@
+/*  $Id: signal.c,v 1.60 2002/02/09 19:49:31 davem Exp $
+ *  arch/sparc64/kernel/signal.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ *  Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ *  Copyright (C) 1997 Eddie C. Dost   (ecd@skynet.be)
+ *  Copyright (C) 1997,1998 Jakub Jelinek   (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/config.h>
+#ifdef CONFIG_SPARC32_COMPAT
+#include <linux/compat.h>	/* for compat_old_sigset_t */
+#endif
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/smp_lock.h>
+#include <linux/binfmts.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/svr4.h>
+#include <asm/pgtable.h>
+#include <asm/fpumacro.h>
+#include <asm/uctx.h>
+#include <asm/siginfo.h>
+#include <asm/visasm.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+static int do_signal(sigset_t *oldset, struct pt_regs * regs,
+		     unsigned long orig_o0, int ret_from_syscall);
+
+/* {set, get}context() needed for 64-bit SparcLinux userland. */
+asmlinkage void sparc64_set_context(struct pt_regs *regs)
+{
+	struct ucontext __user *ucp = (struct ucontext __user *)
+		regs->u_regs[UREG_I0];
+	mc_gregset_t __user *grp;
+	unsigned long pc, npc, tstate;
+	unsigned long fp, i7;
+	unsigned char fenab;
+	int err;
+
+	flush_user_windows();
+	if (get_thread_wsaved()					||
+	    (((unsigned long)ucp) & (sizeof(unsigned long)-1))	||
+	    (!__access_ok(ucp, sizeof(*ucp))))
+		goto do_sigsegv;
+	grp  = &ucp->uc_mcontext.mc_gregs;
+	err  = __get_user(pc, &((*grp)[MC_PC]));
+	err |= __get_user(npc, &((*grp)[MC_NPC]));
+	if (err || ((pc | npc) & 3))
+		goto do_sigsegv;
+	if (regs->u_regs[UREG_I1]) {
+		sigset_t set;
+
+		if (_NSIG_WORDS == 1) {
+			if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0]))
+				goto do_sigsegv;
+		} else {
+			if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t)))
+				goto do_sigsegv;
+		}
+		sigdelsetmask(&set, ~_BLOCKABLE);
+		spin_lock_irq(&current->sighand->siglock);
+		current->blocked = set;
+		recalc_sigpending();
+		spin_unlock_irq(&current->sighand->siglock);
+	}
+	if (test_thread_flag(TIF_32BIT)) {
+		pc &= 0xffffffff;
+		npc &= 0xffffffff;
+	}
+	regs->tpc = pc;
+	regs->tnpc = npc;
+	err |= __get_user(regs->y, &((*grp)[MC_Y]));
+	err |= __get_user(tstate, &((*grp)[MC_TSTATE]));
+	regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
+	regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
+	err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1]));
+	err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2]));
+	err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3]));
+	err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4]));
+	err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5]));
+	err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6]));
+	err |= __get_user(regs->u_regs[UREG_G7], (&(*grp)[MC_G7]));
+	err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0]));
+	err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1]));
+	err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2]));
+	err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3]));
+	err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4]));
+	err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5]));
+	err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6]));
+	err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7]));
+
+	err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp));
+	err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7));
+	err |= __put_user(fp,
+	      (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
+	err |= __put_user(i7,
+	      (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
+
+	err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab));
+	if (fenab) {
+		unsigned long *fpregs = current_thread_info()->fpregs;
+		unsigned long fprs;
+		
+		fprs_write(0);
+		err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs));
+		if (fprs & FPRS_DL)
+			err |= copy_from_user(fpregs,
+					      &(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs),
+					      (sizeof(unsigned int) * 32));
+		if (fprs & FPRS_DU)
+			err |= copy_from_user(fpregs+16,
+			 ((unsigned long __user *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16,
+			 (sizeof(unsigned int) * 32));
+		err |= __get_user(current_thread_info()->xfsr[0],
+				  &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr));
+		err |= __get_user(current_thread_info()->gsr[0],
+				  &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr));
+		regs->tstate &= ~TSTATE_PEF;
+	}
+	if (err)
+		goto do_sigsegv;
+
+	return;
+do_sigsegv:
+	force_sig(SIGSEGV, current);
+}
+
+asmlinkage void sparc64_get_context(struct pt_regs *regs)
+{
+	struct ucontext __user *ucp = (struct ucontext __user *)
+		regs->u_regs[UREG_I0];
+	mc_gregset_t __user *grp;
+	mcontext_t __user *mcp;
+	unsigned long fp, i7;
+	unsigned char fenab;
+	int err;
+
+	synchronize_user_stack();
+	if (get_thread_wsaved() || clear_user(ucp, sizeof(*ucp)))
+		goto do_sigsegv;
+
+#if 1
+	fenab = 0; /* IMO get_context is like any other system call, thus modifies FPU state -jj */
+#else
+	fenab = (current_thread_info()->fpsaved[0] & FPRS_FEF);
+#endif
+		
+	mcp = &ucp->uc_mcontext;
+	grp = &mcp->mc_gregs;
+
+	/* Skip over the trap instruction, first. */
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc   = (regs->tnpc & 0xffffffff);
+		regs->tnpc  = (regs->tnpc + 4) & 0xffffffff;
+	} else {
+		regs->tpc   = regs->tnpc;
+		regs->tnpc += 4;
+	}
+	err = 0;
+	if (_NSIG_WORDS == 1)
+		err |= __put_user(current->blocked.sig[0],
+				  (unsigned long __user *)&ucp->uc_sigmask);
+	else
+		err |= __copy_to_user(&ucp->uc_sigmask, &current->blocked,
+				      sizeof(sigset_t));
+
+	err |= __put_user(regs->tstate, &((*grp)[MC_TSTATE]));
+	err |= __put_user(regs->tpc, &((*grp)[MC_PC]));
+	err |= __put_user(regs->tnpc, &((*grp)[MC_NPC]));
+	err |= __put_user(regs->y, &((*grp)[MC_Y]));
+	err |= __put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1]));
+	err |= __put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2]));
+	err |= __put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3]));
+	err |= __put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4]));
+	err |= __put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5]));
+	err |= __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6]));
+	err |= __put_user(regs->u_regs[UREG_G7], &((*grp)[MC_G7]));
+	err |= __put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0]));
+	err |= __put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1]));
+	err |= __put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2]));
+	err |= __put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3]));
+	err |= __put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4]));
+	err |= __put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5]));
+	err |= __put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6]));
+	err |= __put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7]));
+
+	err |= __get_user(fp,
+		 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
+	err |= __get_user(i7,
+		 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
+	err |= __put_user(fp, &(mcp->mc_fp));
+	err |= __put_user(i7, &(mcp->mc_i7));
+
+	err |= __put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab));
+	if (fenab) {
+		unsigned long *fpregs = current_thread_info()->fpregs;
+		unsigned long fprs;
+		
+		fprs = current_thread_info()->fpsaved[0];
+		if (fprs & FPRS_DL)
+			err |= copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs,
+					    (sizeof(unsigned int) * 32));
+		if (fprs & FPRS_DU)
+			err |= copy_to_user(
+                          ((unsigned long __user *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16,
+			  (sizeof(unsigned int) * 32));
+		err |= __put_user(current_thread_info()->xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr));
+		err |= __put_user(current_thread_info()->gsr[0], &(mcp->mc_fpregs.mcfpu_gsr));
+		err |= __put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs));
+	}
+	if (err)
+		goto do_sigsegv;
+
+	return;
+do_sigsegv:
+	force_sig(SIGSEGV, current);
+}
+
+struct rt_signal_frame {
+	struct sparc_stackf	ss;
+	siginfo_t		info;
+	struct pt_regs		regs;
+	__siginfo_fpu_t __user	*fpu_save;
+	stack_t			stack;
+	sigset_t		mask;
+	__siginfo_fpu_t		fpu_state;
+};
+
+/* Align macros */
+#define RT_ALIGNEDSZ  (((sizeof(struct rt_signal_frame) + 7) & (~7)))
+
+/*
+ * atomically swap in the new signal mask, and wait for a signal.
+ * This is really tricky on the Sparc, watch out...
+ */
+asmlinkage void _sigpause_common(old_sigset_t set, struct pt_regs *regs)
+{
+	sigset_t saveset;
+
+#ifdef CONFIG_SPARC32_COMPAT
+	if (test_thread_flag(TIF_32BIT)) {
+		extern asmlinkage void _sigpause32_common(compat_old_sigset_t,
+							  struct pt_regs *);
+		_sigpause32_common(set, regs);
+		return;
+	}
+#endif
+	set &= _BLOCKABLE;
+	spin_lock_irq(&current->sighand->siglock);
+	saveset = current->blocked;
+	siginitset(&current->blocked, set);
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+	
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc = (regs->tnpc & 0xffffffff);
+		regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
+	} else {
+		regs->tpc = regs->tnpc;
+		regs->tnpc += 4;
+	}
+
+	/* Condition codes and return value where set here for sigpause,
+	 * and so got used by setup_frame, which again causes sigreturn()
+	 * to return -EINTR.
+	 */
+	while (1) {
+		current->state = TASK_INTERRUPTIBLE;
+		schedule();
+		/*
+		 * Return -EINTR and set condition code here,
+		 * so the interrupted system call actually returns
+		 * these.
+		 */
+		regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
+		regs->u_regs[UREG_I0] = EINTR;
+		if (do_signal(&saveset, regs, 0, 0))
+			return;
+	}
+}
+
+asmlinkage void do_sigpause(unsigned int set, struct pt_regs *regs)
+{
+	_sigpause_common(set, regs);
+}
+
+asmlinkage void do_sigsuspend(struct pt_regs *regs)
+{
+	_sigpause_common(regs->u_regs[UREG_I0], regs);
+}
+
+asmlinkage void do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize, struct pt_regs *regs)
+{
+	sigset_t oldset, set;
+        
+	/* XXX: Don't preclude handling different sized sigset_t's.  */
+	if (sigsetsize != sizeof(sigset_t)) {
+		regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
+		regs->u_regs[UREG_I0] = EINVAL;
+		return;
+	}
+	if (copy_from_user(&set, uset, sizeof(set))) {
+		regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
+		regs->u_regs[UREG_I0] = EFAULT;
+		return;
+	}
+                                                                
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	spin_lock_irq(&current->sighand->siglock);
+	oldset = current->blocked;
+	current->blocked = set;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+	
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc = (regs->tnpc & 0xffffffff);
+		regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
+	} else {
+		regs->tpc = regs->tnpc;
+		regs->tnpc += 4;
+	}
+
+	/* Condition codes and return value where set here for sigpause,
+	 * and so got used by setup_frame, which again causes sigreturn()
+	 * to return -EINTR.
+	 */
+	while (1) {
+		current->state = TASK_INTERRUPTIBLE;
+		schedule();
+		/*
+		 * Return -EINTR and set condition code here,
+		 * so the interrupted system call actually returns
+		 * these.
+		 */
+		regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
+		regs->u_regs[UREG_I0] = EINTR;
+		if (do_signal(&oldset, regs, 0, 0))
+			return;
+	}
+}
+
+static inline int
+restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
+{
+	unsigned long *fpregs = current_thread_info()->fpregs;
+	unsigned long fprs;
+	int err;
+
+	err = __get_user(fprs, &fpu->si_fprs);
+	fprs_write(0);
+	regs->tstate &= ~TSTATE_PEF;
+	if (fprs & FPRS_DL)
+		err |= copy_from_user(fpregs, &fpu->si_float_regs[0],
+		       	       (sizeof(unsigned int) * 32));
+	if (fprs & FPRS_DU)
+		err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32],
+		       	       (sizeof(unsigned int) * 32));
+	err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
+	err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
+	current_thread_info()->fpsaved[0] |= fprs;
+	return err;
+}
+
+void do_rt_sigreturn(struct pt_regs *regs)
+{
+	struct rt_signal_frame __user *sf;
+	unsigned long tpc, tnpc, tstate;
+	__siginfo_fpu_t __user *fpu_save;
+	mm_segment_t old_fs;
+	sigset_t set;
+	stack_t st;
+	int err;
+
+	/* Always make any pending restarted system calls return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+	synchronize_user_stack ();
+	sf = (struct rt_signal_frame __user *)
+		(regs->u_regs [UREG_FP] + STACK_BIAS);
+
+	/* 1. Make sure we are not getting garbage from the user */
+	if (((unsigned long) sf) & 3)
+		goto segv;
+
+	err = get_user(tpc, &sf->regs.tpc);
+	err |= __get_user(tnpc, &sf->regs.tnpc);
+	if (test_thread_flag(TIF_32BIT)) {
+		tpc &= 0xffffffff;
+		tnpc &= 0xffffffff;
+	}
+	err |= ((tpc | tnpc) & 3);
+
+	/* 2. Restore the state */
+	err |= __get_user(regs->y, &sf->regs.y);
+	err |= __get_user(tstate, &sf->regs.tstate);
+	err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs));
+
+	/* User can only change condition codes and %asi in %tstate. */
+	regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
+	regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
+
+	err |= __get_user(fpu_save, &sf->fpu_save);
+	if (fpu_save)
+		err |= restore_fpu_state(regs, &sf->fpu_state);
+
+	err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
+	err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t));
+	
+	if (err)
+		goto segv;
+		
+	regs->tpc = tpc;
+	regs->tnpc = tnpc;
+	
+	/* It is more difficult to avoid calling this function than to
+	   call it and ignore errors.  */
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+	do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
+	set_fs(old_fs);
+
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	spin_lock_irq(&current->sighand->siglock);
+	current->blocked = set;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+	return;
+segv:
+	force_sig(SIGSEGV, current);
+}
+
+/* Checks if the fp is valid */
+static int invalid_frame_pointer(void __user *fp, int fplen)
+{
+	if (((unsigned long) fp) & 7)
+		return 1;
+	return 0;
+}
+
+static inline int
+save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
+{
+	unsigned long *fpregs = (unsigned long *)(regs+1);
+	unsigned long fprs;
+	int err = 0;
+	
+	fprs = current_thread_info()->fpsaved[0];
+	if (fprs & FPRS_DL)
+		err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
+				    (sizeof(unsigned int) * 32));
+	if (fprs & FPRS_DU)
+		err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
+				    (sizeof(unsigned int) * 32));
+	err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
+	err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
+	err |= __put_user(fprs, &fpu->si_fprs);
+
+	return err;
+}
+
+static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, unsigned long framesize)
+{
+	unsigned long sp;
+
+	sp = regs->u_regs[UREG_FP] + STACK_BIAS;
+
+	/* This is the X/Open sanctioned signal stack switching.  */
+	if (ka->sa.sa_flags & SA_ONSTACK) {
+		if (!on_sig_stack(sp) &&
+		    !((current->sas_ss_sp + current->sas_ss_size) & 7))
+			sp = current->sas_ss_sp + current->sas_ss_size;
+	}
+	return (void __user *)(sp - framesize);
+}
+
+static inline void
+setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
+	       int signo, sigset_t *oldset, siginfo_t *info)
+{
+	struct rt_signal_frame __user *sf;
+	int sigframe_size, err;
+
+	/* 1. Make sure everything is clean */
+	synchronize_user_stack();
+	save_and_clear_fpu();
+	
+	sigframe_size = RT_ALIGNEDSZ;
+	if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
+		sigframe_size -= sizeof(__siginfo_fpu_t);
+
+	sf = (struct rt_signal_frame __user *)
+		get_sigframe(ka, regs, sigframe_size);
+	
+	if (invalid_frame_pointer (sf, sigframe_size))
+		goto sigill;
+
+	if (get_thread_wsaved() != 0)
+		goto sigill;
+
+	/* 2. Save the current process state */
+	err = copy_to_user(&sf->regs, regs, sizeof (*regs));
+
+	if (current_thread_info()->fpsaved[0] & FPRS_FEF) {
+		err |= save_fpu_state(regs, &sf->fpu_state);
+		err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
+	} else {
+		err |= __put_user(0, &sf->fpu_save);
+	}
+	
+	/* Setup sigaltstack */
+	err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
+	err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
+	err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
+
+	err |= copy_to_user(&sf->mask, oldset, sizeof(sigset_t));
+
+	err |= copy_in_user((u64 __user *)sf,
+			    (u64 __user *)(regs->u_regs[UREG_FP]+STACK_BIAS),
+			    sizeof(struct reg_window));
+
+	if (info)
+		err |= copy_siginfo_to_user(&sf->info, info);
+	else {
+		err |= __put_user(signo, &sf->info.si_signo);
+		err |= __put_user(SI_NOINFO, &sf->info.si_code);
+	}
+	if (err)
+		goto sigsegv;
+	
+	/* 3. signal handler back-trampoline and parameters */
+	regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS;
+	regs->u_regs[UREG_I0] = signo;
+	regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
+
+	/* The sigcontext is passed in this way because of how it
+	 * is defined in GLIBC's /usr/include/bits/sigcontext.h
+	 * for sparc64.  It includes the 128 bytes of siginfo_t.
+	 */
+	regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
+
+	/* 5. signal handler */
+	regs->tpc = (unsigned long) ka->sa.sa_handler;
+	regs->tnpc = (regs->tpc + 4);
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+	/* 4. return to kernel instructions */
+	regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
+	return;
+
+sigill:
+	do_exit(SIGILL);
+sigsegv:
+	force_sigsegv(signo, current);
+}
+
+static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
+				 siginfo_t *info,
+				 sigset_t *oldset, struct pt_regs *regs)
+{
+	setup_rt_frame(ka, regs, signr, oldset,
+		       (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
+	if (!(ka->sa.sa_flags & SA_NOMASK)) {
+		spin_lock_irq(&current->sighand->siglock);
+		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+		sigaddset(&current->blocked,signr);
+		recalc_sigpending();
+		spin_unlock_irq(&current->sighand->siglock);
+	}
+}
+
+static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
+				     struct sigaction *sa)
+{
+	switch (regs->u_regs[UREG_I0]) {
+	case ERESTART_RESTARTBLOCK:
+	case ERESTARTNOHAND:
+	no_system_call_restart:
+		regs->u_regs[UREG_I0] = EINTR;
+		regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
+		break;
+	case ERESTARTSYS:
+		if (!(sa->sa_flags & SA_RESTART))
+			goto no_system_call_restart;
+		/* fallthrough */
+	case ERESTARTNOINTR:
+		regs->u_regs[UREG_I0] = orig_i0;
+		regs->tpc -= 4;
+		regs->tnpc -= 4;
+	}
+}
+
+/* Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+static int do_signal(sigset_t *oldset, struct pt_regs * regs,
+		     unsigned long orig_i0, int restart_syscall)
+{
+	siginfo_t info;
+	struct signal_deliver_cookie cookie;
+	struct k_sigaction ka;
+	int signr;
+	
+	cookie.restart_syscall = restart_syscall;
+	cookie.orig_i0 = orig_i0;
+
+	if (!oldset)
+		oldset = &current->blocked;
+
+#ifdef CONFIG_SPARC32_COMPAT
+	if (test_thread_flag(TIF_32BIT)) {
+		extern int do_signal32(sigset_t *, struct pt_regs *,
+				       unsigned long, int);
+		return do_signal32(oldset, regs, orig_i0,
+				   cookie.restart_syscall);
+	}
+#endif	
+
+	signr = get_signal_to_deliver(&info, &ka, regs, &cookie);
+	if (signr > 0) {
+		if (cookie.restart_syscall)
+			syscall_restart(orig_i0, regs, &ka.sa);
+		handle_signal(signr, &ka, &info, oldset, regs);
+		return 1;
+	}
+	if (cookie.restart_syscall &&
+	    (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
+	     regs->u_regs[UREG_I0] == ERESTARTSYS ||
+	     regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
+		/* replay the system call when we are done */
+		regs->u_regs[UREG_I0] = cookie.orig_i0;
+		regs->tpc -= 4;
+		regs->tnpc -= 4;
+	}
+	if (cookie.restart_syscall &&
+	    regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
+		regs->u_regs[UREG_G1] = __NR_restart_syscall;
+		regs->tpc -= 4;
+		regs->tnpc -= 4;
+	}
+	return 0;
+}
+
+void do_notify_resume(sigset_t *oldset, struct pt_regs *regs,
+		      unsigned long orig_i0, int restart_syscall,
+		      unsigned long thread_info_flags)
+{
+	if (thread_info_flags & _TIF_SIGPENDING)
+		do_signal(oldset, regs, orig_i0, restart_syscall);
+}
+
+void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
+{
+	struct signal_deliver_cookie *cp = cookie;
+
+	if (cp->restart_syscall &&
+	    (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
+	     regs->u_regs[UREG_I0] == ERESTARTSYS ||
+	     regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
+		/* replay the system call when we are done */
+		regs->u_regs[UREG_I0] = cp->orig_i0;
+		regs->tpc -= 4;
+		regs->tnpc -= 4;
+		cp->restart_syscall = 0;
+	}
+	if (cp->restart_syscall &&
+	    regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
+		regs->u_regs[UREG_G1] = __NR_restart_syscall;
+		regs->tpc -= 4;
+		regs->tnpc -= 4;
+		cp->restart_syscall = 0;
+	}
+}
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c
new file mode 100644
index 0000000..859255c
--- /dev/null
+++ b/arch/sparc64/kernel/signal32.c
@@ -0,0 +1,1469 @@
+/*  $Id: signal32.c,v 1.74 2002/02/09 19:49:30 davem Exp $
+ *  arch/sparc64/kernel/signal32.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ *  Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ *  Copyright (C) 1997 Eddie C. Dost   (ecd@skynet.be)
+ *  Copyright (C) 1997,1998 Jakub Jelinek   (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/smp_lock.h>
+#include <linux/binfmts.h>
+#include <linux/compat.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/svr4.h>
+#include <asm/pgtable.h>
+#include <asm/psrcompat.h>
+#include <asm/fpumacro.h>
+#include <asm/visasm.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+int do_signal32(sigset_t *oldset, struct pt_regs *regs,
+		unsigned long orig_o0, int ret_from_syscall);
+
+/* Signal frames: the original one (compatible with SunOS):
+ *
+ * Set up a signal frame... Make the stack look the way SunOS
+ * expects it to look which is basically:
+ *
+ * ---------------------------------- <-- %sp at signal time
+ * Struct sigcontext
+ * Signal address
+ * Ptr to sigcontext area above
+ * Signal code
+ * The signal number itself
+ * One register window
+ * ---------------------------------- <-- New %sp
+ */
+struct signal_sframe32 {
+	struct reg_window32 sig_window;
+	int sig_num;
+	int sig_code;
+	/* struct sigcontext32 * */ u32 sig_scptr;
+	int sig_address;
+	struct sigcontext32 sig_context;
+	unsigned int extramask[_COMPAT_NSIG_WORDS - 1];
+};
+
+/* This magic should be in g_upper[0] for all upper parts
+ * to be valid.
+ */
+#define SIGINFO_EXTRA_V8PLUS_MAGIC	0x130e269
+typedef struct {
+	unsigned int g_upper[8];
+	unsigned int o_upper[8];
+	unsigned int asi;
+} siginfo_extra_v8plus_t;
+
+/* 
+ * And the new one, intended to be used for Linux applications only
+ * (we have enough in there to work with clone).
+ * All the interesting bits are in the info field.
+ */
+struct new_signal_frame32 {
+	struct sparc_stackf32	ss;
+	__siginfo32_t		info;
+	/* __siginfo_fpu32_t * */ u32 fpu_save;
+	unsigned int		insns[2];
+	unsigned int		extramask[_COMPAT_NSIG_WORDS - 1];
+	unsigned int		extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */
+	/* Only valid if (info.si_regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */
+	siginfo_extra_v8plus_t	v8plus;
+	__siginfo_fpu_t		fpu_state;
+};
+
+typedef struct compat_siginfo{
+	int si_signo;
+	int si_errno;
+	int si_code;
+
+	union {
+		int _pad[SI_PAD_SIZE32];
+
+		/* kill() */
+		struct {
+			compat_pid_t _pid;		/* sender's pid */
+			unsigned int _uid;		/* sender's uid */
+		} _kill;
+
+		/* POSIX.1b timers */
+		struct {
+			timer_t _tid;			/* timer id */
+			int _overrun;			/* overrun count */
+			compat_sigval_t _sigval;		/* same as below */
+			int _sys_private;		/* not to be passed to user */
+		} _timer;
+
+		/* POSIX.1b signals */
+		struct {
+			compat_pid_t _pid;		/* sender's pid */
+			unsigned int _uid;		/* sender's uid */
+			compat_sigval_t _sigval;
+		} _rt;
+
+		/* SIGCHLD */
+		struct {
+			compat_pid_t _pid;		/* which child */
+			unsigned int _uid;		/* sender's uid */
+			int _status;			/* exit code */
+			compat_clock_t _utime;
+			compat_clock_t _stime;
+		} _sigchld;
+
+		/* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
+		struct {
+			u32 _addr; /* faulting insn/memory ref. */
+			int _trapno;
+		} _sigfault;
+
+		/* SIGPOLL */
+		struct {
+			int _band;	/* POLL_IN, POLL_OUT, POLL_MSG */
+			int _fd;
+		} _sigpoll;
+	} _sifields;
+}compat_siginfo_t;
+
+struct rt_signal_frame32 {
+	struct sparc_stackf32	ss;
+	compat_siginfo_t	info;
+	struct pt_regs32	regs;
+	compat_sigset_t		mask;
+	/* __siginfo_fpu32_t * */ u32 fpu_save;
+	unsigned int		insns[2];
+	stack_t32		stack;
+	unsigned int		extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */
+	/* Only valid if (regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */
+	siginfo_extra_v8plus_t	v8plus;
+	__siginfo_fpu_t		fpu_state;
+};
+
+/* Align macros */
+#define SF_ALIGNEDSZ  (((sizeof(struct signal_sframe32) + 7) & (~7)))
+#define NF_ALIGNEDSZ  (((sizeof(struct new_signal_frame32) + 7) & (~7)))
+#define RT_ALIGNEDSZ  (((sizeof(struct rt_signal_frame32) + 7) & (~7)))
+
+int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+{
+	int err;
+
+	if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
+		return -EFAULT;
+
+	/* If you change siginfo_t structure, please be sure
+	   this code is fixed accordingly.
+	   It should never copy any pad contained in the structure
+	   to avoid security leaks, but must copy the generic
+	   3 ints plus the relevant union member.
+	   This routine must convert siginfo from 64bit to 32bit as well
+	   at the same time.  */
+	err = __put_user(from->si_signo, &to->si_signo);
+	err |= __put_user(from->si_errno, &to->si_errno);
+	err |= __put_user((short)from->si_code, &to->si_code);
+	if (from->si_code < 0)
+		err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
+	else {
+		switch (from->si_code >> 16) {
+		case __SI_TIMER >> 16:
+			err |= __put_user(from->si_tid, &to->si_tid);
+			err |= __put_user(from->si_overrun, &to->si_overrun);
+			err |= __put_user(from->si_int, &to->si_int);
+			break;
+		case __SI_CHLD >> 16:
+			err |= __put_user(from->si_utime, &to->si_utime);
+			err |= __put_user(from->si_stime, &to->si_stime);
+			err |= __put_user(from->si_status, &to->si_status);
+		default:
+			err |= __put_user(from->si_pid, &to->si_pid);
+			err |= __put_user(from->si_uid, &to->si_uid);
+			break;
+		case __SI_FAULT >> 16:
+		case __SI_POLL >> 16:
+			err |= __put_user(from->si_trapno, &to->si_trapno);
+			err |= __put_user((unsigned long)from->si_addr, &to->si_addr);
+			break;
+		case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
+		case __SI_MESGQ >> 16:
+			err |= __put_user(from->si_pid, &to->si_pid);
+			err |= __put_user(from->si_uid, &to->si_uid);
+			err |= __put_user(from->si_int, &to->si_int);
+			break;
+		}
+	}
+	return err;
+}
+
+/* CAUTION: This is just a very minimalist implementation for the
+ *          sake of compat_sys_rt_sigqueueinfo()
+ */
+int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+{
+	if (!access_ok(VERIFY_WRITE, from, sizeof(compat_siginfo_t)))
+		return -EFAULT;
+
+	if (copy_from_user(to, from, 3*sizeof(int)) ||
+	    copy_from_user(to->_sifields._pad, from->_sifields._pad,
+			   SI_PAD_SIZE))
+		return -EFAULT;
+
+	return 0;
+}
+
+/*
+ * atomically swap in the new signal mask, and wait for a signal.
+ * This is really tricky on the Sparc, watch out...
+ */
+asmlinkage void _sigpause32_common(compat_old_sigset_t set, struct pt_regs *regs)
+{
+	sigset_t saveset;
+
+	set &= _BLOCKABLE;
+	spin_lock_irq(&current->sighand->siglock);
+	saveset = current->blocked;
+	siginitset(&current->blocked, set);
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+	
+	regs->tpc = regs->tnpc;
+	regs->tnpc += 4;
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+
+	/* Condition codes and return value where set here for sigpause,
+	 * and so got used by setup_frame, which again causes sigreturn()
+	 * to return -EINTR.
+	 */
+	while (1) {
+		current->state = TASK_INTERRUPTIBLE;
+		schedule();
+		/*
+		 * Return -EINTR and set condition code here,
+		 * so the interrupted system call actually returns
+		 * these.
+		 */
+		regs->tstate |= TSTATE_ICARRY;
+		regs->u_regs[UREG_I0] = EINTR;
+		if (do_signal32(&saveset, regs, 0, 0))
+			return;
+	}
+}
+
+asmlinkage void do_rt_sigsuspend32(u32 uset, size_t sigsetsize, struct pt_regs *regs)
+{
+	sigset_t oldset, set;
+	compat_sigset_t set32;
+        
+	/* XXX: Don't preclude handling different sized sigset_t's.  */
+	if (((compat_size_t)sigsetsize) != sizeof(sigset_t)) {
+		regs->tstate |= TSTATE_ICARRY;
+		regs->u_regs[UREG_I0] = EINVAL;
+		return;
+	}
+	if (copy_from_user(&set32, compat_ptr(uset), sizeof(set32))) {
+		regs->tstate |= TSTATE_ICARRY;
+		regs->u_regs[UREG_I0] = EFAULT;
+		return;
+	}
+	switch (_NSIG_WORDS) {
+	case 4: set.sig[3] = set32.sig[6] + (((long)set32.sig[7]) << 32);
+	case 3: set.sig[2] = set32.sig[4] + (((long)set32.sig[5]) << 32);
+	case 2: set.sig[1] = set32.sig[2] + (((long)set32.sig[3]) << 32);
+	case 1: set.sig[0] = set32.sig[0] + (((long)set32.sig[1]) << 32);
+	}
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	spin_lock_irq(&current->sighand->siglock);
+	oldset = current->blocked;
+	current->blocked = set;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+	
+	regs->tpc = regs->tnpc;
+	regs->tnpc += 4;
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+
+	/* Condition codes and return value where set here for sigpause,
+	 * and so got used by setup_frame, which again causes sigreturn()
+	 * to return -EINTR.
+	 */
+	while (1) {
+		current->state = TASK_INTERRUPTIBLE;
+		schedule();
+		/*
+		 * Return -EINTR and set condition code here,
+		 * so the interrupted system call actually returns
+		 * these.
+		 */
+		regs->tstate |= TSTATE_ICARRY;
+		regs->u_regs[UREG_I0] = EINTR;
+		if (do_signal32(&oldset, regs, 0, 0))
+			return;
+	}
+}
+
+static int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
+{
+	unsigned long *fpregs = current_thread_info()->fpregs;
+	unsigned long fprs;
+	int err;
+	
+	err = __get_user(fprs, &fpu->si_fprs);
+	fprs_write(0);
+	regs->tstate &= ~TSTATE_PEF;
+	if (fprs & FPRS_DL)
+		err |= copy_from_user(fpregs, &fpu->si_float_regs[0], (sizeof(unsigned int) * 32));
+	if (fprs & FPRS_DU)
+		err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], (sizeof(unsigned int) * 32));
+	err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
+	err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
+	current_thread_info()->fpsaved[0] |= fprs;
+	return err;
+}
+
+void do_new_sigreturn32(struct pt_regs *regs)
+{
+	struct new_signal_frame32 __user *sf;
+	unsigned int psr;
+	unsigned pc, npc, fpu_save;
+	sigset_t set;
+	unsigned seta[_COMPAT_NSIG_WORDS];
+	int err, i;
+	
+	regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
+	sf = (struct new_signal_frame32 __user *) regs->u_regs[UREG_FP];
+
+	/* 1. Make sure we are not getting garbage from the user */
+	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
+	    (((unsigned long) sf) & 3))
+		goto segv;
+
+	get_user(pc, &sf->info.si_regs.pc);
+	__get_user(npc, &sf->info.si_regs.npc);
+
+	if ((pc | npc) & 3)
+		goto segv;
+
+	if (test_thread_flag(TIF_32BIT)) {
+		pc &= 0xffffffff;
+		npc &= 0xffffffff;
+	}
+	regs->tpc = pc;
+	regs->tnpc = npc;
+
+	/* 2. Restore the state */
+	err = __get_user(regs->y, &sf->info.si_regs.y);
+	err |= __get_user(psr, &sf->info.si_regs.psr);
+
+	for (i = UREG_G1; i <= UREG_I7; i++)
+		err |= __get_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]);
+	if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS) {
+		err |= __get_user(i, &sf->v8plus.g_upper[0]);
+		if (i == SIGINFO_EXTRA_V8PLUS_MAGIC) {
+			unsigned long asi;
+
+			for (i = UREG_G1; i <= UREG_I7; i++)
+				err |= __get_user(((u32 *)regs->u_regs)[2*i], &sf->v8plus.g_upper[i]);
+			err |= __get_user(asi, &sf->v8plus.asi);
+			regs->tstate &= ~TSTATE_ASI;
+			regs->tstate |= ((asi & 0xffUL) << 24UL);
+		}
+	}
+
+	/* User can only change condition codes in %tstate. */
+	regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
+	regs->tstate |= psr_to_tstate_icc(psr);
+
+	err |= __get_user(fpu_save, &sf->fpu_save);
+	if (fpu_save)
+		err |= restore_fpu_state32(regs, &sf->fpu_state);
+	err |= __get_user(seta[0], &sf->info.si_mask);
+	err |= copy_from_user(seta+1, &sf->extramask,
+			      (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
+	if (err)
+	    	goto segv;
+	switch (_NSIG_WORDS) {
+		case 4: set.sig[3] = seta[6] + (((long)seta[7]) << 32);
+		case 3: set.sig[2] = seta[4] + (((long)seta[5]) << 32);
+		case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32);
+		case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
+	}
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	spin_lock_irq(&current->sighand->siglock);
+	current->blocked = set;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+	return;
+
+segv:
+	force_sig(SIGSEGV, current);
+}
+
+asmlinkage void do_sigreturn32(struct pt_regs *regs)
+{
+	struct sigcontext32 __user *scptr;
+	unsigned int pc, npc, psr;
+	sigset_t set;
+	unsigned int seta[_COMPAT_NSIG_WORDS];
+	int err;
+
+	/* Always make any pending restarted system calls return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+	synchronize_user_stack();
+	if (test_thread_flag(TIF_NEWSIGNALS)) {
+		do_new_sigreturn32(regs);
+		return;
+	}
+
+	scptr = (struct sigcontext32 __user *)
+		(regs->u_regs[UREG_I0] & 0x00000000ffffffffUL);
+	/* Check sanity of the user arg. */
+	if (!access_ok(VERIFY_READ, scptr, sizeof(struct sigcontext32)) ||
+	    (((unsigned long) scptr) & 3))
+		goto segv;
+
+	err = __get_user(pc, &scptr->sigc_pc);
+	err |= __get_user(npc, &scptr->sigc_npc);
+
+	if ((pc | npc) & 3)
+		goto segv; /* Nice try. */
+
+	err |= __get_user(seta[0], &scptr->sigc_mask);
+	/* Note that scptr + 1 points to extramask */
+	err |= copy_from_user(seta+1, scptr + 1,
+			      (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
+	if (err)
+	    	goto segv;
+	switch (_NSIG_WORDS) {
+		case 4: set.sig[3] = seta[6] + (((long)seta[7]) << 32);
+		case 3: set.sig[2] = seta[4] + (((long)seta[5]) << 32);
+		case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32);
+		case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
+	}
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	spin_lock_irq(&current->sighand->siglock);
+	current->blocked = set;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+	
+	if (test_thread_flag(TIF_32BIT)) {
+		pc &= 0xffffffff;
+		npc &= 0xffffffff;
+	}
+	regs->tpc = pc;
+	regs->tnpc = npc;
+	err = __get_user(regs->u_regs[UREG_FP], &scptr->sigc_sp);
+	err |= __get_user(regs->u_regs[UREG_I0], &scptr->sigc_o0);
+	err |= __get_user(regs->u_regs[UREG_G1], &scptr->sigc_g1);
+
+	/* User can only change condition codes in %tstate. */
+	err |= __get_user(psr, &scptr->sigc_psr);
+	if (err)
+		goto segv;
+	regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
+	regs->tstate |= psr_to_tstate_icc(psr);
+	return;
+
+segv:
+	force_sig(SIGSEGV, current);
+}
+
+asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
+{
+	struct rt_signal_frame32 __user *sf;
+	unsigned int psr, pc, npc, fpu_save, u_ss_sp;
+	mm_segment_t old_fs;
+	sigset_t set;
+	compat_sigset_t seta;
+	stack_t st;
+	int err, i;
+	
+	/* Always make any pending restarted system calls return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+	synchronize_user_stack();
+	regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
+	sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
+
+	/* 1. Make sure we are not getting garbage from the user */
+	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
+	    (((unsigned long) sf) & 3))
+		goto segv;
+
+	get_user(pc, &sf->regs.pc);
+	__get_user(npc, &sf->regs.npc);
+
+	if ((pc | npc) & 3)
+		goto segv;
+
+	if (test_thread_flag(TIF_32BIT)) {
+		pc &= 0xffffffff;
+		npc &= 0xffffffff;
+	}
+	regs->tpc = pc;
+	regs->tnpc = npc;
+
+	/* 2. Restore the state */
+	err = __get_user(regs->y, &sf->regs.y);
+	err |= __get_user(psr, &sf->regs.psr);
+	
+	for (i = UREG_G1; i <= UREG_I7; i++)
+		err |= __get_user(regs->u_regs[i], &sf->regs.u_regs[i]);
+	if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS) {
+		err |= __get_user(i, &sf->v8plus.g_upper[0]);
+		if (i == SIGINFO_EXTRA_V8PLUS_MAGIC) {
+			unsigned long asi;
+
+			for (i = UREG_G1; i <= UREG_I7; i++)
+				err |= __get_user(((u32 *)regs->u_regs)[2*i], &sf->v8plus.g_upper[i]);
+			err |= __get_user(asi, &sf->v8plus.asi);
+			regs->tstate &= ~TSTATE_ASI;
+			regs->tstate |= ((asi & 0xffUL) << 24UL);
+		}
+	}
+
+	/* User can only change condition codes in %tstate. */
+	regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
+	regs->tstate |= psr_to_tstate_icc(psr);
+
+	err |= __get_user(fpu_save, &sf->fpu_save);
+	if (fpu_save)
+		err |= restore_fpu_state32(regs, &sf->fpu_state);
+	err |= copy_from_user(&seta, &sf->mask, sizeof(compat_sigset_t));
+	err |= __get_user(u_ss_sp, &sf->stack.ss_sp);
+	st.ss_sp = compat_ptr(u_ss_sp);
+	err |= __get_user(st.ss_flags, &sf->stack.ss_flags);
+	err |= __get_user(st.ss_size, &sf->stack.ss_size);
+	if (err)
+		goto segv;
+		
+	/* It is more difficult to avoid calling this function than to
+	   call it and ignore errors.  */
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+	do_sigaltstack((stack_t __user *) &st, NULL, (unsigned long)sf);
+	set_fs(old_fs);
+	
+	switch (_NSIG_WORDS) {
+		case 4: set.sig[3] = seta.sig[6] + (((long)seta.sig[7]) << 32);
+		case 3: set.sig[2] = seta.sig[4] + (((long)seta.sig[5]) << 32);
+		case 2: set.sig[1] = seta.sig[2] + (((long)seta.sig[3]) << 32);
+		case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
+	}
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	spin_lock_irq(&current->sighand->siglock);
+	current->blocked = set;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+	return;
+segv:
+	force_sig(SIGSEGV, current);
+}
+
+/* Checks if the fp is valid */
+static int invalid_frame_pointer(void __user *fp, int fplen)
+{
+	if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
+		return 1;
+	return 0;
+}
+
+static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, unsigned long framesize)
+{
+	unsigned long sp;
+	
+	regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
+	sp = regs->u_regs[UREG_FP];
+	
+	/* This is the X/Open sanctioned signal stack switching.  */
+	if (sa->sa_flags & SA_ONSTACK) {
+		if (!on_sig_stack(sp) && !((current->sas_ss_sp + current->sas_ss_size) & 7))
+			sp = current->sas_ss_sp + current->sas_ss_size;
+	}
+	return (void __user *)(sp - framesize);
+}
+
+static void
+setup_frame32(struct sigaction *sa, struct pt_regs *regs, int signr, sigset_t *oldset, siginfo_t *info)
+{
+	struct signal_sframe32 __user *sframep;
+	struct sigcontext32 __user *sc;
+	unsigned int seta[_COMPAT_NSIG_WORDS];
+	int err = 0;
+	void __user *sig_address;
+	int sig_code;
+	unsigned long pc = regs->tpc;
+	unsigned long npc = regs->tnpc;
+	unsigned int psr;
+
+	if (test_thread_flag(TIF_32BIT)) {
+		pc &= 0xffffffff;
+		npc &= 0xffffffff;
+	}
+
+	synchronize_user_stack();
+	save_and_clear_fpu();
+
+	sframep = (struct signal_sframe32 __user *)
+		get_sigframe(sa, regs, SF_ALIGNEDSZ);
+	if (invalid_frame_pointer(sframep, sizeof(*sframep))){
+		/* Don't change signal code and address, so that
+		 * post mortem debuggers can have a look.
+		 */
+		do_exit(SIGILL);
+	}
+
+	sc = &sframep->sig_context;
+
+	/* We've already made sure frame pointer isn't in kernel space... */
+	err = __put_user((sas_ss_flags(regs->u_regs[UREG_FP]) == SS_ONSTACK),
+			 &sc->sigc_onstack);
+	
+	switch (_NSIG_WORDS) {
+	case 4: seta[7] = (oldset->sig[3] >> 32);
+	        seta[6] = oldset->sig[3];
+	case 3: seta[5] = (oldset->sig[2] >> 32);
+	        seta[4] = oldset->sig[2];
+	case 2: seta[3] = (oldset->sig[1] >> 32);
+	        seta[2] = oldset->sig[1];
+	case 1: seta[1] = (oldset->sig[0] >> 32);
+	        seta[0] = oldset->sig[0];
+	}
+	err |= __put_user(seta[0], &sc->sigc_mask);
+	err |= __copy_to_user(sframep->extramask, seta + 1,
+			      (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
+	err |= __put_user(regs->u_regs[UREG_FP], &sc->sigc_sp);
+	err |= __put_user(pc, &sc->sigc_pc);
+	err |= __put_user(npc, &sc->sigc_npc);
+	psr = tstate_to_psr(regs->tstate);
+	if (current_thread_info()->fpsaved[0] & FPRS_FEF)
+		psr |= PSR_EF;
+	err |= __put_user(psr, &sc->sigc_psr);
+	err |= __put_user(regs->u_regs[UREG_G1], &sc->sigc_g1);
+	err |= __put_user(regs->u_regs[UREG_I0], &sc->sigc_o0);
+	err |= __put_user(get_thread_wsaved(), &sc->sigc_oswins);
+
+	err |= copy_in_user((u32 __user *)sframep,
+			    (u32 __user *)(regs->u_regs[UREG_FP]),
+			    sizeof(struct reg_window32));
+		       
+	set_thread_wsaved(0); /* So process is allowed to execute. */
+	err |= __put_user(signr, &sframep->sig_num);
+	sig_address = NULL;
+	sig_code = 0;
+	if (SI_FROMKERNEL (info) && (info->si_code & __SI_MASK) == __SI_FAULT) {
+		sig_address = info->si_addr;
+		switch (signr) {
+		case SIGSEGV:
+			switch (info->si_code) {
+			case SEGV_MAPERR: sig_code = SUBSIG_NOMAPPING; break;
+			default: sig_code = SUBSIG_PROTECTION; break;
+			}
+			break;
+		case SIGILL:
+			switch (info->si_code) {
+			case ILL_ILLOPC: sig_code = SUBSIG_ILLINST; break;
+			case ILL_PRVOPC: sig_code = SUBSIG_PRIVINST; break;
+			case ILL_ILLTRP: sig_code = SUBSIG_BADTRAP(info->si_trapno); break;
+			default: sig_code = SUBSIG_STACK; break;
+			}
+			break;
+		case SIGFPE:
+			switch (info->si_code) {
+			case FPE_INTDIV: sig_code = SUBSIG_IDIVZERO; break;
+			case FPE_INTOVF: sig_code = SUBSIG_FPINTOVFL; break;
+			case FPE_FLTDIV: sig_code = SUBSIG_FPDIVZERO; break;
+			case FPE_FLTOVF: sig_code = SUBSIG_FPOVFLOW; break;
+			case FPE_FLTUND: sig_code = SUBSIG_FPUNFLOW; break;
+			case FPE_FLTRES: sig_code = SUBSIG_FPINEXACT; break;
+			case FPE_FLTINV: sig_code = SUBSIG_FPOPERROR; break;
+			default: sig_code = SUBSIG_FPERROR; break;
+			}
+			break;
+		case SIGBUS:
+			switch (info->si_code) {
+			case BUS_ADRALN: sig_code = SUBSIG_ALIGNMENT; break;
+			case BUS_ADRERR: sig_code = SUBSIG_MISCERROR; break;
+			default: sig_code = SUBSIG_BUSTIMEOUT; break;
+			}
+			break;
+		case SIGEMT:
+			switch (info->si_code) {
+			case EMT_TAGOVF: sig_code = SUBSIG_TAG; break;
+			}
+			break;
+		case SIGSYS:
+			if (info->si_code == (__SI_FAULT|0x100)) {
+				/* See sys_sunos32.c */
+				sig_code = info->si_trapno;
+				break;
+			}
+		default:
+			sig_address = NULL;
+		}
+	}
+	err |= __put_user(ptr_to_compat(sig_address), &sframep->sig_address);
+	err |= __put_user(sig_code, &sframep->sig_code);
+	err |= __put_user(ptr_to_compat(sc), &sframep->sig_scptr);
+	if (err)
+		goto sigsegv;
+
+	regs->u_regs[UREG_FP] = (unsigned long) sframep;
+	regs->tpc = (unsigned long) sa->sa_handler;
+	regs->tnpc = (regs->tpc + 4);
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+	return;
+
+sigsegv:
+	force_sigsegv(signr, current);
+}
+
+
+static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
+{
+	unsigned long *fpregs = current_thread_info()->fpregs;
+	unsigned long fprs;
+	int err = 0;
+	
+	fprs = current_thread_info()->fpsaved[0];
+	if (fprs & FPRS_DL)
+		err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
+				    (sizeof(unsigned int) * 32));
+	if (fprs & FPRS_DU)
+		err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
+				    (sizeof(unsigned int) * 32));
+	err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
+	err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
+	err |= __put_user(fprs, &fpu->si_fprs);
+
+	return err;
+}
+
+static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+			      int signo, sigset_t *oldset)
+{
+	struct new_signal_frame32 __user *sf;
+	int sigframe_size;
+	u32 psr;
+	int i, err;
+	unsigned int seta[_COMPAT_NSIG_WORDS];
+
+	/* 1. Make sure everything is clean */
+	synchronize_user_stack();
+	save_and_clear_fpu();
+	
+	sigframe_size = NF_ALIGNEDSZ;
+	if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
+		sigframe_size -= sizeof(__siginfo_fpu_t);
+
+	sf = (struct new_signal_frame32 __user *)
+		get_sigframe(&ka->sa, regs, sigframe_size);
+	
+	if (invalid_frame_pointer(sf, sigframe_size))
+		goto sigill;
+
+	if (get_thread_wsaved() != 0)
+		goto sigill;
+
+	/* 2. Save the current process state */
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+	err  = put_user(regs->tpc, &sf->info.si_regs.pc);
+	err |= __put_user(regs->tnpc, &sf->info.si_regs.npc);
+	err |= __put_user(regs->y, &sf->info.si_regs.y);
+	psr = tstate_to_psr(regs->tstate);
+	if (current_thread_info()->fpsaved[0] & FPRS_FEF)
+		psr |= PSR_EF;
+	err |= __put_user(psr, &sf->info.si_regs.psr);
+	for (i = 0; i < 16; i++)
+		err |= __put_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]);
+	err |= __put_user(sizeof(siginfo_extra_v8plus_t), &sf->extra_size);
+	err |= __put_user(SIGINFO_EXTRA_V8PLUS_MAGIC, &sf->v8plus.g_upper[0]);
+	for (i = 1; i < 16; i++)
+		err |= __put_user(((u32 *)regs->u_regs)[2*i],
+				  &sf->v8plus.g_upper[i]);
+	err |= __put_user((regs->tstate & TSTATE_ASI) >> 24UL,
+			  &sf->v8plus.asi);
+
+	if (psr & PSR_EF) {
+		err |= save_fpu_state32(regs, &sf->fpu_state);
+		err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
+	} else {
+		err |= __put_user(0, &sf->fpu_save);
+	}
+
+	switch (_NSIG_WORDS) {
+	case 4: seta[7] = (oldset->sig[3] >> 32);
+	        seta[6] = oldset->sig[3];
+	case 3: seta[5] = (oldset->sig[2] >> 32);
+	        seta[4] = oldset->sig[2];
+	case 2: seta[3] = (oldset->sig[1] >> 32);
+	        seta[2] = oldset->sig[1];
+	case 1: seta[1] = (oldset->sig[0] >> 32);
+	        seta[0] = oldset->sig[0];
+	}
+	err |= __put_user(seta[0], &sf->info.si_mask);
+	err |= __copy_to_user(sf->extramask, seta + 1,
+			      (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
+
+	err |= copy_in_user((u32 __user *)sf,
+			    (u32 __user *)(regs->u_regs[UREG_FP]),
+			    sizeof(struct reg_window32));
+	
+	if (err)
+		goto sigsegv;
+
+	/* 3. signal handler back-trampoline and parameters */
+	regs->u_regs[UREG_FP] = (unsigned long) sf;
+	regs->u_regs[UREG_I0] = signo;
+	regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
+	regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
+
+	/* 4. signal handler */
+	regs->tpc = (unsigned long) ka->sa.sa_handler;
+	regs->tnpc = (regs->tpc + 4);
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+
+	/* 5. return to kernel instructions */
+	if (ka->ka_restorer) {
+		regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
+	} else {
+		/* Flush instruction space. */
+		unsigned long address = ((unsigned long)&(sf->insns[0]));
+		pgd_t *pgdp = pgd_offset(current->mm, address);
+		pud_t *pudp = pud_offset(pgdp, address);
+		pmd_t *pmdp = pmd_offset(pudp, address);
+		pte_t *ptep;
+
+		regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
+	
+		err  = __put_user(0x821020d8, &sf->insns[0]); /*mov __NR_sigreturn, %g1*/
+		err |= __put_user(0x91d02010, &sf->insns[1]); /*t 0x10*/
+		if (err)
+			goto sigsegv;
+
+		preempt_disable();
+		ptep = pte_offset_map(pmdp, address);
+		if (pte_present(*ptep)) {
+			unsigned long page = (unsigned long)
+				page_address(pte_page(*ptep));
+
+			__asm__ __volatile__(
+			"	membar	#StoreStore\n"
+			"	flush	%0 + %1"
+			: : "r" (page), "r" (address & (PAGE_SIZE - 1))
+			: "memory");
+		}
+		pte_unmap(ptep);
+		preempt_enable();
+	}
+	return;
+
+sigill:
+	do_exit(SIGILL);
+sigsegv:
+	force_sigsegv(signo, current);
+}
+
+/* Setup a Solaris stack frame */
+static void
+setup_svr4_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc,
+		   struct pt_regs *regs, int signr, sigset_t *oldset)
+{
+	svr4_signal_frame_t __user *sfp;
+	svr4_gregset_t  __user *gr;
+	svr4_siginfo_t  __user *si;
+	svr4_mcontext_t __user *mc;
+	svr4_gwindows_t __user *gw;
+	svr4_ucontext_t __user *uc;
+	svr4_sigset_t setv;
+	unsigned int psr;
+	int i, err;
+
+	synchronize_user_stack();
+	save_and_clear_fpu();
+	
+	regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
+	sfp = (svr4_signal_frame_t __user *)
+		get_sigframe(sa, regs,
+			     sizeof(struct reg_window32) + SVR4_SF_ALIGNED);
+
+	if (invalid_frame_pointer(sfp, sizeof(*sfp)))
+		do_exit(SIGILL);
+
+	/* Start with a clean frame pointer and fill it */
+	err = clear_user(sfp, sizeof(*sfp));
+
+	/* Setup convenience variables */
+	si = &sfp->si;
+	uc = &sfp->uc;
+	gw = &sfp->gw;
+	mc = &uc->mcontext;
+	gr = &mc->greg;
+	
+	/* FIXME: where am I supposed to put this?
+	 * sc->sigc_onstack = old_status;
+	 * anyways, it does not look like it is used for anything at all.
+	 */
+	setv.sigbits[0] = oldset->sig[0];
+	setv.sigbits[1] = (oldset->sig[0] >> 32);
+	if (_NSIG_WORDS >= 2) {
+		setv.sigbits[2] = oldset->sig[1];
+		setv.sigbits[3] = (oldset->sig[1] >> 32);
+		err |= __copy_to_user(&uc->sigmask, &setv, sizeof(svr4_sigset_t));
+	} else
+		err |= __copy_to_user(&uc->sigmask, &setv,
+				      2 * sizeof(unsigned int));
+	
+	/* Store registers */
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+	err |= __put_user(regs->tpc, &((*gr)[SVR4_PC]));
+	err |= __put_user(regs->tnpc, &((*gr)[SVR4_NPC]));
+	psr = tstate_to_psr(regs->tstate);
+	if (current_thread_info()->fpsaved[0] & FPRS_FEF)
+		psr |= PSR_EF;
+	err |= __put_user(psr, &((*gr)[SVR4_PSR]));
+	err |= __put_user(regs->y, &((*gr)[SVR4_Y]));
+	
+	/* Copy g[1..7] and o[0..7] registers */
+	for (i = 0; i < 7; i++)
+		err |= __put_user(regs->u_regs[UREG_G1+i], (&(*gr)[SVR4_G1])+i);
+	for (i = 0; i < 8; i++)
+		err |= __put_user(regs->u_regs[UREG_I0+i], (&(*gr)[SVR4_O0])+i);
+
+	/* Setup sigaltstack */
+	err |= __put_user(current->sas_ss_sp, &uc->stack.sp);
+	err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &uc->stack.flags);
+	err |= __put_user(current->sas_ss_size, &uc->stack.size);
+
+	/* Save the currently window file: */
+
+	/* 1. Link sfp->uc->gwins to our windows */
+	err |= __put_user(ptr_to_compat(gw), &mc->gwin);
+	    
+	/* 2. Number of windows to restore at setcontext (): */
+	err |= __put_user(get_thread_wsaved(), &gw->count);
+
+	/* 3. We just pay attention to the gw->count field on setcontext */
+	set_thread_wsaved(0); /* So process is allowed to execute. */
+
+	/* Setup the signal information.  Solaris expects a bunch of
+	 * information to be passed to the signal handler, we don't provide
+	 * that much currently, should use siginfo.
+	 */
+	err |= __put_user(signr, &si->siginfo.signo);
+	err |= __put_user(SVR4_SINOINFO, &si->siginfo.code);
+	if (err)
+		goto sigsegv;
+
+	regs->u_regs[UREG_FP] = (unsigned long) sfp;
+	regs->tpc = (unsigned long) sa->sa_handler;
+	regs->tnpc = (regs->tpc + 4);
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+
+	/* Arguments passed to signal handler */
+	if (regs->u_regs[14]){
+		struct reg_window32 __user *rw = (struct reg_window32 __user *)
+			(regs->u_regs[14] & 0x00000000ffffffffUL);
+
+		err |= __put_user(signr, &rw->ins[0]);
+		err |= __put_user((u64)si, &rw->ins[1]);
+		err |= __put_user((u64)uc, &rw->ins[2]);
+		err |= __put_user((u64)sfp, &rw->ins[6]);	/* frame pointer */
+		if (err)
+			goto sigsegv;
+
+		regs->u_regs[UREG_I0] = signr;
+		regs->u_regs[UREG_I1] = (u32)(u64) si;
+		regs->u_regs[UREG_I2] = (u32)(u64) uc;
+	}
+	return;
+
+sigsegv:
+	force_sigsegv(signr, current);
+}
+
+asmlinkage int
+svr4_getcontext(svr4_ucontext_t __user *uc, struct pt_regs *regs)
+{
+	svr4_gregset_t  __user *gr;
+	svr4_mcontext_t __user *mc;
+	svr4_sigset_t setv;
+	int i, err;
+	u32 psr;
+
+	synchronize_user_stack();
+	save_and_clear_fpu();
+	
+	if (get_thread_wsaved())
+		do_exit(SIGSEGV);
+
+	err = clear_user(uc, sizeof(*uc));
+
+	/* Setup convenience variables */
+	mc = &uc->mcontext;
+	gr = &mc->greg;
+
+	setv.sigbits[0] = current->blocked.sig[0];
+	setv.sigbits[1] = (current->blocked.sig[0] >> 32);
+	if (_NSIG_WORDS >= 2) {
+		setv.sigbits[2] = current->blocked.sig[1];
+		setv.sigbits[3] = (current->blocked.sig[1] >> 32);
+		err |= __copy_to_user(&uc->sigmask, &setv, sizeof(svr4_sigset_t));
+	} else
+		err |= __copy_to_user(&uc->sigmask, &setv, 2 * sizeof(unsigned));
+
+	/* Store registers */
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+	err |= __put_user(regs->tpc, &uc->mcontext.greg[SVR4_PC]);
+	err |= __put_user(regs->tnpc, &uc->mcontext.greg[SVR4_NPC]);
+
+	psr = tstate_to_psr(regs->tstate) & ~PSR_EF;		   
+	if (current_thread_info()->fpsaved[0] & FPRS_FEF)
+		psr |= PSR_EF;
+	err |= __put_user(psr, &uc->mcontext.greg[SVR4_PSR]);
+
+	err |= __put_user(regs->y, &uc->mcontext.greg[SVR4_Y]);
+	
+	/* Copy g[1..7] and o[0..7] registers */
+	for (i = 0; i < 7; i++)
+		err |= __put_user(regs->u_regs[UREG_G1+i], (&(*gr)[SVR4_G1])+i);
+	for (i = 0; i < 8; i++)
+		err |= __put_user(regs->u_regs[UREG_I0+i], (&(*gr)[SVR4_O0])+i);
+
+	/* Setup sigaltstack */
+	err |= __put_user(current->sas_ss_sp, &uc->stack.sp);
+	err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &uc->stack.flags);
+	err |= __put_user(current->sas_ss_size, &uc->stack.size);
+
+	/* The register file is not saved
+	 * we have already stuffed all of it with sync_user_stack
+	 */
+	return (err ? -EFAULT : 0);
+}
+
+
+/* Set the context for a svr4 application, this is Solaris way to sigreturn */
+asmlinkage int svr4_setcontext(svr4_ucontext_t __user *c, struct pt_regs *regs)
+{
+	svr4_gregset_t  __user *gr;
+	mm_segment_t old_fs;
+	u32 pc, npc, psr, u_ss_sp;
+	sigset_t set;
+	svr4_sigset_t setv;
+	int i, err;
+	stack_t st;
+	
+	/* Fixme: restore windows, or is this already taken care of in
+	 * svr4_setup_frame when sync_user_windows is done?
+	 */
+	flush_user_windows();
+	
+	if (get_thread_wsaved())
+		goto sigsegv;
+
+	if (((unsigned long) c) & 3){
+		printk("Unaligned structure passed\n");
+		goto sigsegv;
+	}
+
+	if (!__access_ok(c, sizeof(*c))) {
+		/* Miguel, add nice debugging msg _here_. ;-) */
+		goto sigsegv;
+	}
+
+	/* Check for valid PC and nPC */
+	gr = &c->mcontext.greg;
+	err = __get_user(pc, &((*gr)[SVR4_PC]));
+	err |= __get_user(npc, &((*gr)[SVR4_NPC]));
+	if ((pc | npc) & 3)
+		goto sigsegv;
+	
+	/* Retrieve information from passed ucontext */
+	/* note that nPC is ored a 1, this is used to inform entry.S */
+	/* that we don't want it to mess with our PC and nPC */
+	
+	err |= copy_from_user(&setv, &c->sigmask, sizeof(svr4_sigset_t));
+	set.sig[0] = setv.sigbits[0] | (((long)setv.sigbits[1]) << 32);
+	if (_NSIG_WORDS >= 2)
+		set.sig[1] = setv.sigbits[2] | (((long)setv.sigbits[3]) << 32);
+	
+	err |= __get_user(u_ss_sp, &c->stack.sp);
+	st.ss_sp = compat_ptr(u_ss_sp);
+	err |= __get_user(st.ss_flags, &c->stack.flags);
+	err |= __get_user(st.ss_size, &c->stack.size);
+	if (err)
+		goto sigsegv;
+		
+	/* It is more difficult to avoid calling this function than to
+	   call it and ignore errors.  */
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+	do_sigaltstack((stack_t __user *) &st, NULL, regs->u_regs[UREG_I6]);
+	set_fs(old_fs);
+	
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	spin_lock_irq(&current->sighand->siglock);
+	current->blocked = set;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+	regs->tpc = pc;
+	regs->tnpc = npc | 1;
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+	err |= __get_user(regs->y, &((*gr)[SVR4_Y]));
+	err |= __get_user(psr, &((*gr)[SVR4_PSR]));
+	regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
+	regs->tstate |= psr_to_tstate_icc(psr);
+
+	/* Restore g[1..7] and o[0..7] registers */
+	for (i = 0; i < 7; i++)
+		err |= __get_user(regs->u_regs[UREG_G1+i], (&(*gr)[SVR4_G1])+i);
+	for (i = 0; i < 8; i++)
+		err |= __get_user(regs->u_regs[UREG_I0+i], (&(*gr)[SVR4_O0])+i);
+	if (err)
+		goto sigsegv;
+
+	return -EINTR;
+sigsegv:
+	return -EFAULT;
+}
+
+static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+			     unsigned long signr, sigset_t *oldset,
+			     siginfo_t *info)
+{
+	struct rt_signal_frame32 __user *sf;
+	int sigframe_size;
+	u32 psr;
+	int i, err;
+	compat_sigset_t seta;
+
+	/* 1. Make sure everything is clean */
+	synchronize_user_stack();
+	save_and_clear_fpu();
+	
+	sigframe_size = RT_ALIGNEDSZ;
+	if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
+		sigframe_size -= sizeof(__siginfo_fpu_t);
+
+	sf = (struct rt_signal_frame32 __user *)
+		get_sigframe(&ka->sa, regs, sigframe_size);
+	
+	if (invalid_frame_pointer(sf, sigframe_size))
+		goto sigill;
+
+	if (get_thread_wsaved() != 0)
+		goto sigill;
+
+	/* 2. Save the current process state */
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+	err  = put_user(regs->tpc, &sf->regs.pc);
+	err |= __put_user(regs->tnpc, &sf->regs.npc);
+	err |= __put_user(regs->y, &sf->regs.y);
+	psr = tstate_to_psr(regs->tstate);
+	if (current_thread_info()->fpsaved[0] & FPRS_FEF)
+		psr |= PSR_EF;
+	err |= __put_user(psr, &sf->regs.psr);
+	for (i = 0; i < 16; i++)
+		err |= __put_user(regs->u_regs[i], &sf->regs.u_regs[i]);
+	err |= __put_user(sizeof(siginfo_extra_v8plus_t), &sf->extra_size);
+	err |= __put_user(SIGINFO_EXTRA_V8PLUS_MAGIC, &sf->v8plus.g_upper[0]);
+	for (i = 1; i < 16; i++)
+		err |= __put_user(((u32 *)regs->u_regs)[2*i],
+				  &sf->v8plus.g_upper[i]);
+	err |= __put_user((regs->tstate & TSTATE_ASI) >> 24UL,
+			  &sf->v8plus.asi);
+
+	if (psr & PSR_EF) {
+		err |= save_fpu_state32(regs, &sf->fpu_state);
+		err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
+	} else {
+		err |= __put_user(0, &sf->fpu_save);
+	}
+
+	/* Update the siginfo structure.  */
+	err |= copy_siginfo_to_user32(&sf->info, info);
+	
+	/* Setup sigaltstack */
+	err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
+	err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
+	err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
+
+	switch (_NSIG_WORDS) {
+	case 4: seta.sig[7] = (oldset->sig[3] >> 32);
+		seta.sig[6] = oldset->sig[3];
+	case 3: seta.sig[5] = (oldset->sig[2] >> 32);
+		seta.sig[4] = oldset->sig[2];
+	case 2: seta.sig[3] = (oldset->sig[1] >> 32);
+		seta.sig[2] = oldset->sig[1];
+	case 1: seta.sig[1] = (oldset->sig[0] >> 32);
+		seta.sig[0] = oldset->sig[0];
+	}
+	err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t));
+
+	err |= copy_in_user((u32 __user *)sf,
+			    (u32 __user *)(regs->u_regs[UREG_FP]),
+			    sizeof(struct reg_window32));
+	if (err)
+		goto sigsegv;
+	
+	/* 3. signal handler back-trampoline and parameters */
+	regs->u_regs[UREG_FP] = (unsigned long) sf;
+	regs->u_regs[UREG_I0] = signr;
+	regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
+	regs->u_regs[UREG_I2] = (unsigned long) &sf->regs;
+
+	/* 4. signal handler */
+	regs->tpc = (unsigned long) ka->sa.sa_handler;
+	regs->tnpc = (regs->tpc + 4);
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+
+	/* 5. return to kernel instructions */
+	if (ka->ka_restorer)
+		regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
+	else {
+		/* Flush instruction space. */
+		unsigned long address = ((unsigned long)&(sf->insns[0]));
+		pgd_t *pgdp = pgd_offset(current->mm, address);
+		pud_t *pudp = pud_offset(pgdp, address);
+		pmd_t *pmdp = pmd_offset(pudp, address);
+		pte_t *ptep;
+
+		regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
+	
+		/* mov __NR_rt_sigreturn, %g1 */
+		err |= __put_user(0x82102065, &sf->insns[0]);
+
+		/* t 0x10 */
+		err |= __put_user(0x91d02010, &sf->insns[1]);
+		if (err)
+			goto sigsegv;
+
+		preempt_disable();
+		ptep = pte_offset_map(pmdp, address);
+		if (pte_present(*ptep)) {
+			unsigned long page = (unsigned long)
+				page_address(pte_page(*ptep));
+
+			__asm__ __volatile__(
+			"	membar	#StoreStore\n"
+			"	flush	%0 + %1"
+			: : "r" (page), "r" (address & (PAGE_SIZE - 1))
+			: "memory");
+		}
+		pte_unmap(ptep);
+		preempt_enable();
+	}
+	return;
+
+sigill:
+	do_exit(SIGILL);
+sigsegv:
+	force_sigsegv(signr, current);
+}
+
+static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
+				   siginfo_t *info,
+				   sigset_t *oldset, struct pt_regs *regs,
+				   int svr4_signal)
+{
+	if (svr4_signal)
+		setup_svr4_frame32(&ka->sa, regs->tpc, regs->tnpc,
+				   regs, signr, oldset);
+	else {
+		if (ka->sa.sa_flags & SA_SIGINFO)
+			setup_rt_frame32(ka, regs, signr, oldset, info);
+		else if (test_thread_flag(TIF_NEWSIGNALS))
+			new_setup_frame32(ka, regs, signr, oldset);
+		else
+			setup_frame32(&ka->sa, regs, signr, oldset, info);
+	}
+	if (!(ka->sa.sa_flags & SA_NOMASK)) {
+		spin_lock_irq(&current->sighand->siglock);
+		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+		sigaddset(&current->blocked,signr);
+		recalc_sigpending();
+		spin_unlock_irq(&current->sighand->siglock);
+	}
+}
+
+static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
+				     struct sigaction *sa)
+{
+	switch (regs->u_regs[UREG_I0]) {
+	case ERESTART_RESTARTBLOCK:
+	case ERESTARTNOHAND:
+	no_system_call_restart:
+		regs->u_regs[UREG_I0] = EINTR;
+		regs->tstate |= TSTATE_ICARRY;
+		break;
+	case ERESTARTSYS:
+		if (!(sa->sa_flags & SA_RESTART))
+			goto no_system_call_restart;
+		/* fallthrough */
+	case ERESTARTNOINTR:
+		regs->u_regs[UREG_I0] = orig_i0;
+		regs->tpc -= 4;
+		regs->tnpc -= 4;
+	}
+}
+
+/* Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+int do_signal32(sigset_t *oldset, struct pt_regs * regs,
+		unsigned long orig_i0, int restart_syscall)
+{
+	siginfo_t info;
+	struct signal_deliver_cookie cookie;
+	struct k_sigaction ka;
+	int signr;
+	int svr4_signal = current->personality == PER_SVR4;
+	
+	cookie.restart_syscall = restart_syscall;
+	cookie.orig_i0 = orig_i0;
+
+	signr = get_signal_to_deliver(&info, &ka, regs, &cookie);
+	if (signr > 0) {
+		if (cookie.restart_syscall)
+			syscall_restart32(orig_i0, regs, &ka.sa);
+		handle_signal32(signr, &ka, &info, oldset,
+				regs, svr4_signal);
+		return 1;
+	}
+	if (cookie.restart_syscall &&
+	    (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
+	     regs->u_regs[UREG_I0] == ERESTARTSYS ||
+	     regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
+		/* replay the system call when we are done */
+		regs->u_regs[UREG_I0] = cookie.orig_i0;
+		regs->tpc -= 4;
+		regs->tnpc -= 4;
+	}
+	if (cookie.restart_syscall &&
+	    regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
+		regs->u_regs[UREG_G1] = __NR_restart_syscall;
+		regs->tpc -= 4;
+		regs->tnpc -= 4;
+	}
+	return 0;
+}
+
+struct sigstack32 {
+	u32 the_stack;
+	int cur_status;
+};
+
+asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp)
+{
+	struct sigstack32 __user *ssptr =
+		(struct sigstack32 __user *)((unsigned long)(u_ssptr));
+	struct sigstack32 __user *ossptr =
+		(struct sigstack32 __user *)((unsigned long)(u_ossptr));
+	int ret = -EFAULT;
+
+	/* First see if old state is wanted. */
+	if (ossptr) {
+		if (put_user(current->sas_ss_sp + current->sas_ss_size,
+			     &ossptr->the_stack) ||
+		    __put_user(on_sig_stack(sp), &ossptr->cur_status))
+			goto out;
+	}
+	
+	/* Now see if we want to update the new state. */
+	if (ssptr) {
+		u32 ss_sp;
+
+		if (get_user(ss_sp, &ssptr->the_stack))
+			goto out;
+
+		/* If the current stack was set with sigaltstack, don't
+		 * swap stacks while we are on it.
+		 */
+		ret = -EPERM;
+		if (current->sas_ss_sp && on_sig_stack(sp))
+			goto out;
+			
+		/* Since we don't know the extent of the stack, and we don't
+		 * track onstack-ness, but rather calculate it, we must
+		 * presume a size.  Ho hum this interface is lossy.
+		 */
+		current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ;
+		current->sas_ss_size = SIGSTKSZ;
+	}
+	
+	ret = 0;
+out:
+	return ret;
+}
+
+asmlinkage long do_sys32_sigaltstack(u32 ussa, u32 uossa, unsigned long sp)
+{
+	stack_t uss, uoss;
+	u32 u_ss_sp = 0;
+	int ret;
+	mm_segment_t old_fs;
+	stack_t32 __user *uss32 = compat_ptr(ussa);
+	stack_t32 __user *uoss32 = compat_ptr(uossa);
+	
+	if (ussa && (get_user(u_ss_sp, &uss32->ss_sp) ||
+		    __get_user(uss.ss_flags, &uss32->ss_flags) ||
+		    __get_user(uss.ss_size, &uss32->ss_size)))
+		return -EFAULT;
+	uss.ss_sp = compat_ptr(u_ss_sp);
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+	ret = do_sigaltstack(ussa ? (stack_t __user *) &uss : NULL,
+			     uossa ? (stack_t __user *) &uoss : NULL, sp);
+	set_fs(old_fs);
+	if (!ret && uossa && (put_user(ptr_to_compat(uoss.ss_sp), &uoss32->ss_sp) ||
+		    __put_user(uoss.ss_flags, &uoss32->ss_flags) ||
+		    __put_user(uoss.ss_size, &uoss32->ss_size)))
+		return -EFAULT;
+	return ret;
+}
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
new file mode 100644
index 0000000..6dff06a
--- /dev/null
+++ b/arch/sparc64/kernel/smp.c
@@ -0,0 +1,1244 @@
+/* smp.c: Sparc64 SMP support.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/threads.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/cache.h>
+#include <linux/jiffies.h>
+#include <linux/profile.h>
+#include <linux/bootmem.h>
+
+#include <asm/head.h>
+#include <asm/ptrace.h>
+#include <asm/atomic.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+#include <asm/cpudata.h>
+
+#include <asm/irq.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/oplib.h>
+#include <asm/uaccess.h>
+#include <asm/timer.h>
+#include <asm/starfire.h>
+#include <asm/tlb.h>
+
+extern int linux_num_cpus;
+extern void calibrate_delay(void);
+
+/* Please don't make this stuff initdata!!!  --DaveM */
+static unsigned char boot_cpu_id;
+
+cpumask_t cpu_online_map = CPU_MASK_NONE;
+cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
+static cpumask_t smp_commenced_mask;
+static cpumask_t cpu_callout_map;
+
+void smp_info(struct seq_file *m)
+{
+	int i;
+	
+	seq_printf(m, "State:\n");
+	for (i = 0; i < NR_CPUS; i++) {
+		if (cpu_online(i))
+			seq_printf(m,
+				   "CPU%d:\t\tonline\n", i);
+	}
+}
+
+void smp_bogo(struct seq_file *m)
+{
+	int i;
+	
+	for (i = 0; i < NR_CPUS; i++)
+		if (cpu_online(i))
+			seq_printf(m,
+				   "Cpu%dBogo\t: %lu.%02lu\n"
+				   "Cpu%dClkTck\t: %016lx\n",
+				   i, cpu_data(i).udelay_val / (500000/HZ),
+				   (cpu_data(i).udelay_val / (5000/HZ)) % 100,
+				   i, cpu_data(i).clock_tick);
+}
+
+void __init smp_store_cpu_info(int id)
+{
+	int cpu_node;
+
+	/* multiplier and counter set by
+	   smp_setup_percpu_timer()  */
+	cpu_data(id).udelay_val			= loops_per_jiffy;
+
+	cpu_find_by_mid(id, &cpu_node);
+	cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
+						     "clock-frequency", 0);
+
+	cpu_data(id).pgcache_size		= 0;
+	cpu_data(id).pte_cache[0]		= NULL;
+	cpu_data(id).pte_cache[1]		= NULL;
+	cpu_data(id).pgd_cache			= NULL;
+	cpu_data(id).idle_volume		= 1;
+}
+
+static void smp_setup_percpu_timer(void);
+
+static volatile unsigned long callin_flag = 0;
+
+extern void inherit_locked_prom_mappings(int save_p);
+
+static inline void cpu_setup_percpu_base(unsigned long cpu_id)
+{
+	__asm__ __volatile__("mov	%0, %%g5\n\t"
+			     "stxa	%0, [%1] %2\n\t"
+			     "membar	#Sync"
+			     : /* no outputs */
+			     : "r" (__per_cpu_offset(cpu_id)),
+			       "r" (TSB_REG), "i" (ASI_IMMU));
+}
+
+void __init smp_callin(void)
+{
+	int cpuid = hard_smp_processor_id();
+
+	inherit_locked_prom_mappings(0);
+
+	__flush_tlb_all();
+
+	cpu_setup_percpu_base(cpuid);
+
+	smp_setup_percpu_timer();
+
+	local_irq_enable();
+
+	calibrate_delay();
+	smp_store_cpu_info(cpuid);
+	callin_flag = 1;
+	__asm__ __volatile__("membar #Sync\n\t"
+			     "flush  %%g6" : : : "memory");
+
+	/* Clear this or we will die instantly when we
+	 * schedule back to this idler...
+	 */
+	clear_thread_flag(TIF_NEWCHILD);
+
+	/* Attach to the address space of init_task. */
+	atomic_inc(&init_mm.mm_count);
+	current->active_mm = &init_mm;
+
+	while (!cpu_isset(cpuid, smp_commenced_mask))
+		membar("#LoadLoad");
+
+	cpu_set(cpuid, cpu_online_map);
+}
+
+void cpu_panic(void)
+{
+	printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
+	panic("SMP bolixed\n");
+}
+
+static unsigned long current_tick_offset;
+
+/* This tick register synchronization scheme is taken entirely from
+ * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
+ *
+ * The only change I've made is to rework it so that the master
+ * initiates the synchonization instead of the slave. -DaveM
+ */
+
+#define MASTER	0
+#define SLAVE	(SMP_CACHE_BYTES/sizeof(unsigned long))
+
+#define NUM_ROUNDS	64	/* magic value */
+#define NUM_ITERS	5	/* likewise */
+
+static DEFINE_SPINLOCK(itc_sync_lock);
+static unsigned long go[SLAVE + 1];
+
+#define DEBUG_TICK_SYNC	0
+
+static inline long get_delta (long *rt, long *master)
+{
+	unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
+	unsigned long tcenter, t0, t1, tm;
+	unsigned long i;
+
+	for (i = 0; i < NUM_ITERS; i++) {
+		t0 = tick_ops->get_tick();
+		go[MASTER] = 1;
+		membar("#StoreLoad");
+		while (!(tm = go[SLAVE]))
+			membar("#LoadLoad");
+		go[SLAVE] = 0;
+		membar("#StoreStore");
+		t1 = tick_ops->get_tick();
+
+		if (t1 - t0 < best_t1 - best_t0)
+			best_t0 = t0, best_t1 = t1, best_tm = tm;
+	}
+
+	*rt = best_t1 - best_t0;
+	*master = best_tm - best_t0;
+
+	/* average best_t0 and best_t1 without overflow: */
+	tcenter = (best_t0/2 + best_t1/2);
+	if (best_t0 % 2 + best_t1 % 2 == 2)
+		tcenter++;
+	return tcenter - best_tm;
+}
+
+void smp_synchronize_tick_client(void)
+{
+	long i, delta, adj, adjust_latency = 0, done = 0;
+	unsigned long flags, rt, master_time_stamp, bound;
+#if DEBUG_TICK_SYNC
+	struct {
+		long rt;	/* roundtrip time */
+		long master;	/* master's timestamp */
+		long diff;	/* difference between midpoint and master's timestamp */
+		long lat;	/* estimate of itc adjustment latency */
+	} t[NUM_ROUNDS];
+#endif
+
+	go[MASTER] = 1;
+
+	while (go[MASTER])
+		membar("#LoadLoad");
+
+	local_irq_save(flags);
+	{
+		for (i = 0; i < NUM_ROUNDS; i++) {
+			delta = get_delta(&rt, &master_time_stamp);
+			if (delta == 0) {
+				done = 1;	/* let's lock on to this... */
+				bound = rt;
+			}
+
+			if (!done) {
+				if (i > 0) {
+					adjust_latency += -delta;
+					adj = -delta + adjust_latency/4;
+				} else
+					adj = -delta;
+
+				tick_ops->add_tick(adj, current_tick_offset);
+			}
+#if DEBUG_TICK_SYNC
+			t[i].rt = rt;
+			t[i].master = master_time_stamp;
+			t[i].diff = delta;
+			t[i].lat = adjust_latency/4;
+#endif
+		}
+	}
+	local_irq_restore(flags);
+
+#if DEBUG_TICK_SYNC
+	for (i = 0; i < NUM_ROUNDS; i++)
+		printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
+		       t[i].rt, t[i].master, t[i].diff, t[i].lat);
+#endif
+
+	printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
+	       "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
+}
+
+static void smp_start_sync_tick_client(int cpu);
+
+static void smp_synchronize_one_tick(int cpu)
+{
+	unsigned long flags, i;
+
+	go[MASTER] = 0;
+
+	smp_start_sync_tick_client(cpu);
+
+	/* wait for client to be ready */
+	while (!go[MASTER])
+		membar("#LoadLoad");
+
+	/* now let the client proceed into his loop */
+	go[MASTER] = 0;
+	membar("#StoreLoad");
+
+	spin_lock_irqsave(&itc_sync_lock, flags);
+	{
+		for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
+			while (!go[MASTER])
+				membar("#LoadLoad");
+			go[MASTER] = 0;
+			membar("#StoreStore");
+			go[SLAVE] = tick_ops->get_tick();
+			membar("#StoreLoad");
+		}
+	}
+	spin_unlock_irqrestore(&itc_sync_lock, flags);
+}
+
+extern unsigned long sparc64_cpu_startup;
+
+/* The OBP cpu startup callback truncates the 3rd arg cookie to
+ * 32-bits (I think) so to be safe we have it read the pointer
+ * contained here so we work on >4GB machines. -DaveM
+ */
+static struct thread_info *cpu_new_thread = NULL;
+
+static int __devinit smp_boot_one_cpu(unsigned int cpu)
+{
+	unsigned long entry =
+		(unsigned long)(&sparc64_cpu_startup);
+	unsigned long cookie =
+		(unsigned long)(&cpu_new_thread);
+	struct task_struct *p;
+	int timeout, ret, cpu_node;
+
+	p = fork_idle(cpu);
+	callin_flag = 0;
+	cpu_new_thread = p->thread_info;
+	cpu_set(cpu, cpu_callout_map);
+
+	cpu_find_by_mid(cpu, &cpu_node);
+	prom_startcpu(cpu_node, entry, cookie);
+
+	for (timeout = 0; timeout < 5000000; timeout++) {
+		if (callin_flag)
+			break;
+		udelay(100);
+	}
+	if (callin_flag) {
+		ret = 0;
+	} else {
+		printk("Processor %d is stuck.\n", cpu);
+		cpu_clear(cpu, cpu_callout_map);
+		ret = -ENODEV;
+	}
+	cpu_new_thread = NULL;
+
+	return ret;
+}
+
+static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
+{
+	u64 result, target;
+	int stuck, tmp;
+
+	if (this_is_starfire) {
+		/* map to real upaid */
+		cpu = (((cpu & 0x3c) << 1) |
+			((cpu & 0x40) >> 4) |
+			(cpu & 0x3));
+	}
+
+	target = (cpu << 14) | 0x70;
+again:
+	/* Ok, this is the real Spitfire Errata #54.
+	 * One must read back from a UDB internal register
+	 * after writes to the UDB interrupt dispatch, but
+	 * before the membar Sync for that write.
+	 * So we use the high UDB control register (ASI 0x7f,
+	 * ADDR 0x20) for the dummy read. -DaveM
+	 */
+	tmp = 0x40;
+	__asm__ __volatile__(
+	"wrpr	%1, %2, %%pstate\n\t"
+	"stxa	%4, [%0] %3\n\t"
+	"stxa	%5, [%0+%8] %3\n\t"
+	"add	%0, %8, %0\n\t"
+	"stxa	%6, [%0+%8] %3\n\t"
+	"membar	#Sync\n\t"
+	"stxa	%%g0, [%7] %3\n\t"
+	"membar	#Sync\n\t"
+	"mov	0x20, %%g1\n\t"
+	"ldxa	[%%g1] 0x7f, %%g0\n\t"
+	"membar	#Sync"
+	: "=r" (tmp)
+	: "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
+	  "r" (data0), "r" (data1), "r" (data2), "r" (target),
+	  "r" (0x10), "0" (tmp)
+        : "g1");
+
+	/* NOTE: PSTATE_IE is still clear. */
+	stuck = 100000;
+	do {
+		__asm__ __volatile__("ldxa [%%g0] %1, %0"
+			: "=r" (result)
+			: "i" (ASI_INTR_DISPATCH_STAT));
+		if (result == 0) {
+			__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
+					     : : "r" (pstate));
+			return;
+		}
+		stuck -= 1;
+		if (stuck == 0)
+			break;
+	} while (result & 0x1);
+	__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
+			     : : "r" (pstate));
+	if (stuck == 0) {
+		printk("CPU[%d]: mondo stuckage result[%016lx]\n",
+		       smp_processor_id(), result);
+	} else {
+		udelay(2);
+		goto again;
+	}
+}
+
+static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
+{
+	u64 pstate;
+	int i;
+
+	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
+	for_each_cpu_mask(i, mask)
+		spitfire_xcall_helper(data0, data1, data2, pstate, i);
+}
+
+/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
+ * packet, but we have no use for that.  However we do take advantage of
+ * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
+ */
+static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
+{
+	u64 pstate, ver;
+	int nack_busy_id, is_jalapeno;
+
+	if (cpus_empty(mask))
+		return;
+
+	/* Unfortunately, someone at Sun had the brilliant idea to make the
+	 * busy/nack fields hard-coded by ITID number for this Ultra-III
+	 * derivative processor.
+	 */
+	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
+	is_jalapeno = ((ver >> 32) == 0x003e0016);
+
+	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
+
+retry:
+	__asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
+			     : : "r" (pstate), "i" (PSTATE_IE));
+
+	/* Setup the dispatch data registers. */
+	__asm__ __volatile__("stxa	%0, [%3] %6\n\t"
+			     "stxa	%1, [%4] %6\n\t"
+			     "stxa	%2, [%5] %6\n\t"
+			     "membar	#Sync\n\t"
+			     : /* no outputs */
+			     : "r" (data0), "r" (data1), "r" (data2),
+			       "r" (0x40), "r" (0x50), "r" (0x60),
+			       "i" (ASI_INTR_W));
+
+	nack_busy_id = 0;
+	{
+		int i;
+
+		for_each_cpu_mask(i, mask) {
+			u64 target = (i << 14) | 0x70;
+
+			if (!is_jalapeno)
+				target |= (nack_busy_id << 24);
+			__asm__ __volatile__(
+				"stxa	%%g0, [%0] %1\n\t"
+				"membar	#Sync\n\t"
+				: /* no outputs */
+				: "r" (target), "i" (ASI_INTR_W));
+			nack_busy_id++;
+		}
+	}
+
+	/* Now, poll for completion. */
+	{
+		u64 dispatch_stat;
+		long stuck;
+
+		stuck = 100000 * nack_busy_id;
+		do {
+			__asm__ __volatile__("ldxa	[%%g0] %1, %0"
+					     : "=r" (dispatch_stat)
+					     : "i" (ASI_INTR_DISPATCH_STAT));
+			if (dispatch_stat == 0UL) {
+				__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
+						     : : "r" (pstate));
+				return;
+			}
+			if (!--stuck)
+				break;
+		} while (dispatch_stat & 0x5555555555555555UL);
+
+		__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
+				     : : "r" (pstate));
+
+		if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
+			/* Busy bits will not clear, continue instead
+			 * of freezing up on this cpu.
+			 */
+			printk("CPU[%d]: mondo stuckage result[%016lx]\n",
+			       smp_processor_id(), dispatch_stat);
+		} else {
+			int i, this_busy_nack = 0;
+
+			/* Delay some random time with interrupts enabled
+			 * to prevent deadlock.
+			 */
+			udelay(2 * nack_busy_id);
+
+			/* Clear out the mask bits for cpus which did not
+			 * NACK us.
+			 */
+			for_each_cpu_mask(i, mask) {
+				u64 check_mask;
+
+				if (is_jalapeno)
+					check_mask = (0x2UL << (2*i));
+				else
+					check_mask = (0x2UL <<
+						      this_busy_nack);
+				if ((dispatch_stat & check_mask) == 0)
+					cpu_clear(i, mask);
+				this_busy_nack += 2;
+			}
+
+			goto retry;
+		}
+	}
+}
+
+/* Send cross call to all processors mentioned in MASK
+ * except self.
+ */
+static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
+{
+	u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
+	int this_cpu = get_cpu();
+
+	cpus_and(mask, mask, cpu_online_map);
+	cpu_clear(this_cpu, mask);
+
+	if (tlb_type == spitfire)
+		spitfire_xcall_deliver(data0, data1, data2, mask);
+	else
+		cheetah_xcall_deliver(data0, data1, data2, mask);
+	/* NOTE: Caller runs local copy on master. */
+
+	put_cpu();
+}
+
+extern unsigned long xcall_sync_tick;
+
+static void smp_start_sync_tick_client(int cpu)
+{
+	cpumask_t mask = cpumask_of_cpu(cpu);
+
+	smp_cross_call_masked(&xcall_sync_tick,
+			      0, 0, 0, mask);
+}
+
+/* Send cross call to all processors except self. */
+#define smp_cross_call(func, ctx, data1, data2) \
+	smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
+
+struct call_data_struct {
+	void (*func) (void *info);
+	void *info;
+	atomic_t finished;
+	int wait;
+};
+
+static DEFINE_SPINLOCK(call_lock);
+static struct call_data_struct *call_data;
+
+extern unsigned long xcall_call_function;
+
+/*
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function(void (*func)(void *info), void *info,
+		      int nonatomic, int wait)
+{
+	struct call_data_struct data;
+	int cpus = num_online_cpus() - 1;
+	long timeout;
+
+	if (!cpus)
+		return 0;
+
+	/* Can deadlock when called with interrupts disabled */
+	WARN_ON(irqs_disabled());
+
+	data.func = func;
+	data.info = info;
+	atomic_set(&data.finished, 0);
+	data.wait = wait;
+
+	spin_lock(&call_lock);
+
+	call_data = &data;
+
+	smp_cross_call(&xcall_call_function, 0, 0, 0);
+
+	/* 
+	 * Wait for other cpus to complete function or at
+	 * least snap the call data.
+	 */
+	timeout = 1000000;
+	while (atomic_read(&data.finished) != cpus) {
+		if (--timeout <= 0)
+			goto out_timeout;
+		barrier();
+		udelay(1);
+	}
+
+	spin_unlock(&call_lock);
+
+	return 0;
+
+out_timeout:
+	spin_unlock(&call_lock);
+	printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n",
+	       (long) num_online_cpus() - 1L,
+	       (long) atomic_read(&data.finished));
+	return 0;
+}
+
+void smp_call_function_client(int irq, struct pt_regs *regs)
+{
+	void (*func) (void *info) = call_data->func;
+	void *info = call_data->info;
+
+	clear_softint(1 << irq);
+	if (call_data->wait) {
+		/* let initiator proceed only after completion */
+		func(info);
+		atomic_inc(&call_data->finished);
+	} else {
+		/* let initiator proceed after getting data */
+		atomic_inc(&call_data->finished);
+		func(info);
+	}
+}
+
+extern unsigned long xcall_flush_tlb_mm;
+extern unsigned long xcall_flush_tlb_pending;
+extern unsigned long xcall_flush_tlb_kernel_range;
+extern unsigned long xcall_flush_tlb_all_spitfire;
+extern unsigned long xcall_flush_tlb_all_cheetah;
+extern unsigned long xcall_report_regs;
+extern unsigned long xcall_receive_signal;
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+extern unsigned long xcall_flush_dcache_page_cheetah;
+#endif
+extern unsigned long xcall_flush_dcache_page_spitfire;
+
+#ifdef CONFIG_DEBUG_DCFLUSH
+extern atomic_t dcpage_flushes;
+extern atomic_t dcpage_flushes_xcall;
+#endif
+
+static __inline__ void __local_flush_dcache_page(struct page *page)
+{
+#ifdef DCACHE_ALIASING_POSSIBLE
+	__flush_dcache_page(page_address(page),
+			    ((tlb_type == spitfire) &&
+			     page_mapping(page) != NULL));
+#else
+	if (page_mapping(page) != NULL &&
+	    tlb_type == spitfire)
+		__flush_icache_page(__pa(page_address(page)));
+#endif
+}
+
+void smp_flush_dcache_page_impl(struct page *page, int cpu)
+{
+	cpumask_t mask = cpumask_of_cpu(cpu);
+	int this_cpu = get_cpu();
+
+#ifdef CONFIG_DEBUG_DCFLUSH
+	atomic_inc(&dcpage_flushes);
+#endif
+	if (cpu == this_cpu) {
+		__local_flush_dcache_page(page);
+	} else if (cpu_online(cpu)) {
+		void *pg_addr = page_address(page);
+		u64 data0;
+
+		if (tlb_type == spitfire) {
+			data0 =
+				((u64)&xcall_flush_dcache_page_spitfire);
+			if (page_mapping(page) != NULL)
+				data0 |= ((u64)1 << 32);
+			spitfire_xcall_deliver(data0,
+					       __pa(pg_addr),
+					       (u64) pg_addr,
+					       mask);
+		} else {
+#ifdef DCACHE_ALIASING_POSSIBLE
+			data0 =
+				((u64)&xcall_flush_dcache_page_cheetah);
+			cheetah_xcall_deliver(data0,
+					      __pa(pg_addr),
+					      0, mask);
+#endif
+		}
+#ifdef CONFIG_DEBUG_DCFLUSH
+		atomic_inc(&dcpage_flushes_xcall);
+#endif
+	}
+
+	put_cpu();
+}
+
+void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+{
+	void *pg_addr = page_address(page);
+	cpumask_t mask = cpu_online_map;
+	u64 data0;
+	int this_cpu = get_cpu();
+
+	cpu_clear(this_cpu, mask);
+
+#ifdef CONFIG_DEBUG_DCFLUSH
+	atomic_inc(&dcpage_flushes);
+#endif
+	if (cpus_empty(mask))
+		goto flush_self;
+	if (tlb_type == spitfire) {
+		data0 = ((u64)&xcall_flush_dcache_page_spitfire);
+		if (page_mapping(page) != NULL)
+			data0 |= ((u64)1 << 32);
+		spitfire_xcall_deliver(data0,
+				       __pa(pg_addr),
+				       (u64) pg_addr,
+				       mask);
+	} else {
+#ifdef DCACHE_ALIASING_POSSIBLE
+		data0 = ((u64)&xcall_flush_dcache_page_cheetah);
+		cheetah_xcall_deliver(data0,
+				      __pa(pg_addr),
+				      0, mask);
+#endif
+	}
+#ifdef CONFIG_DEBUG_DCFLUSH
+	atomic_inc(&dcpage_flushes_xcall);
+#endif
+ flush_self:
+	__local_flush_dcache_page(page);
+
+	put_cpu();
+}
+
+void smp_receive_signal(int cpu)
+{
+	cpumask_t mask = cpumask_of_cpu(cpu);
+
+	if (cpu_online(cpu)) {
+		u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);
+
+		if (tlb_type == spitfire)
+			spitfire_xcall_deliver(data0, 0, 0, mask);
+		else
+			cheetah_xcall_deliver(data0, 0, 0, mask);
+	}
+}
+
+void smp_receive_signal_client(int irq, struct pt_regs *regs)
+{
+	/* Just return, rtrap takes care of the rest. */
+	clear_softint(1 << irq);
+}
+
+void smp_report_regs(void)
+{
+	smp_cross_call(&xcall_report_regs, 0, 0, 0);
+}
+
+void smp_flush_tlb_all(void)
+{
+	if (tlb_type == spitfire)
+		smp_cross_call(&xcall_flush_tlb_all_spitfire, 0, 0, 0);
+	else
+		smp_cross_call(&xcall_flush_tlb_all_cheetah, 0, 0, 0);
+	__flush_tlb_all();
+}
+
+/* We know that the window frames of the user have been flushed
+ * to the stack before we get here because all callers of us
+ * are flush_tlb_*() routines, and these run after flush_cache_*()
+ * which performs the flushw.
+ *
+ * The SMP TLB coherency scheme we use works as follows:
+ *
+ * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
+ *    space has (potentially) executed on, this is the heuristic
+ *    we use to avoid doing cross calls.
+ *
+ *    Also, for flushing from kswapd and also for clones, we
+ *    use cpu_vm_mask as the list of cpus to make run the TLB.
+ *
+ * 2) TLB context numbers are shared globally across all processors
+ *    in the system, this allows us to play several games to avoid
+ *    cross calls.
+ *
+ *    One invariant is that when a cpu switches to a process, and
+ *    that processes tsk->active_mm->cpu_vm_mask does not have the
+ *    current cpu's bit set, that tlb context is flushed locally.
+ *
+ *    If the address space is non-shared (ie. mm->count == 1) we avoid
+ *    cross calls when we want to flush the currently running process's
+ *    tlb state.  This is done by clearing all cpu bits except the current
+ *    processor's in current->active_mm->cpu_vm_mask and performing the
+ *    flush locally only.  This will force any subsequent cpus which run
+ *    this task to flush the context from the local tlb if the process
+ *    migrates to another cpu (again).
+ *
+ * 3) For shared address spaces (threads) and swapping we bite the
+ *    bullet for most cases and perform the cross call (but only to
+ *    the cpus listed in cpu_vm_mask).
+ *
+ *    The performance gain from "optimizing" away the cross call for threads is
+ *    questionable (in theory the big win for threads is the massive sharing of
+ *    address space state across processors).
+ */
+void smp_flush_tlb_mm(struct mm_struct *mm)
+{
+        /*
+         * This code is called from two places, dup_mmap and exit_mmap. In the
+         * former case, we really need a flush. In the later case, the callers
+         * are single threaded exec_mmap (really need a flush), multithreaded
+         * exec_mmap case (do not need to flush, since the caller gets a new
+         * context via activate_mm), and all other callers of mmput() whence
+         * the flush can be optimized since the associated threads are dead and
+         * the mm is being torn down (__exit_mm and other mmput callers) or the
+         * owning thread is dissociating itself from the mm. The
+         * (atomic_read(&mm->mm_users) == 0) check ensures real work is done
+         * for single thread exec and dup_mmap cases. An alternate check might
+         * have been (current->mm != mm).
+         *                                              Kanoj Sarcar
+         */
+        if (atomic_read(&mm->mm_users) == 0)
+                return;
+
+	{
+		u32 ctx = CTX_HWBITS(mm->context);
+		int cpu = get_cpu();
+
+		if (atomic_read(&mm->mm_users) == 1) {
+			mm->cpu_vm_mask = cpumask_of_cpu(cpu);
+			goto local_flush_and_out;
+		}
+
+		smp_cross_call_masked(&xcall_flush_tlb_mm,
+				      ctx, 0, 0,
+				      mm->cpu_vm_mask);
+
+	local_flush_and_out:
+		__flush_tlb_mm(ctx, SECONDARY_CONTEXT);
+
+		put_cpu();
+	}
+}
+
+void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
+{
+	u32 ctx = CTX_HWBITS(mm->context);
+	int cpu = get_cpu();
+
+	if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) {
+		mm->cpu_vm_mask = cpumask_of_cpu(cpu);
+		goto local_flush_and_out;
+	} else {
+		/* This optimization is not valid.  Normally
+		 * we will be holding the page_table_lock, but
+		 * there is an exception which is copy_page_range()
+		 * when forking.  The lock is held during the individual
+		 * page table updates in the parent, but not at the
+		 * top level, which is where we are invoked.
+		 */
+		if (0) {
+			cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
+
+			/* By virtue of running under the mm->page_table_lock,
+			 * and mmu_context.h:switch_mm doing the same, the
+			 * following operation is safe.
+			 */
+			if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
+				goto local_flush_and_out;
+		}
+	}
+
+	smp_cross_call_masked(&xcall_flush_tlb_pending,
+			      ctx, nr, (unsigned long) vaddrs,
+			      mm->cpu_vm_mask);
+
+local_flush_and_out:
+	__flush_tlb_pending(ctx, nr, vaddrs);
+
+	put_cpu();
+}
+
+void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+	start &= PAGE_MASK;
+	end    = PAGE_ALIGN(end);
+	if (start != end) {
+		smp_cross_call(&xcall_flush_tlb_kernel_range,
+			       0, start, end);
+
+		__flush_tlb_kernel_range(start, end);
+	}
+}
+
+/* CPU capture. */
+/* #define CAPTURE_DEBUG */
+extern unsigned long xcall_capture;
+
+static atomic_t smp_capture_depth = ATOMIC_INIT(0);
+static atomic_t smp_capture_registry = ATOMIC_INIT(0);
+static unsigned long penguins_are_doing_time;
+
+void smp_capture(void)
+{
+	int result = atomic_add_ret(1, &smp_capture_depth);
+
+	if (result == 1) {
+		int ncpus = num_online_cpus();
+
+#ifdef CAPTURE_DEBUG
+		printk("CPU[%d]: Sending penguins to jail...",
+		       smp_processor_id());
+#endif
+		penguins_are_doing_time = 1;
+		membar("#StoreStore | #LoadStore");
+		atomic_inc(&smp_capture_registry);
+		smp_cross_call(&xcall_capture, 0, 0, 0);
+		while (atomic_read(&smp_capture_registry) != ncpus)
+			membar("#LoadLoad");
+#ifdef CAPTURE_DEBUG
+		printk("done\n");
+#endif
+	}
+}
+
+void smp_release(void)
+{
+	if (atomic_dec_and_test(&smp_capture_depth)) {
+#ifdef CAPTURE_DEBUG
+		printk("CPU[%d]: Giving pardon to "
+		       "imprisoned penguins\n",
+		       smp_processor_id());
+#endif
+		penguins_are_doing_time = 0;
+		membar("#StoreStore | #StoreLoad");
+		atomic_dec(&smp_capture_registry);
+	}
+}
+
+/* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
+ * can service tlb flush xcalls...
+ */
+extern void prom_world(int);
+extern void save_alternate_globals(unsigned long *);
+extern void restore_alternate_globals(unsigned long *);
+void smp_penguin_jailcell(int irq, struct pt_regs *regs)
+{
+	unsigned long global_save[24];
+
+	clear_softint(1 << irq);
+
+	preempt_disable();
+
+	__asm__ __volatile__("flushw");
+	save_alternate_globals(global_save);
+	prom_world(1);
+	atomic_inc(&smp_capture_registry);
+	membar("#StoreLoad | #StoreStore");
+	while (penguins_are_doing_time)
+		membar("#LoadLoad");
+	restore_alternate_globals(global_save);
+	atomic_dec(&smp_capture_registry);
+	prom_world(0);
+
+	preempt_enable();
+}
+
+extern unsigned long xcall_promstop;
+
+void smp_promstop_others(void)
+{
+	smp_cross_call(&xcall_promstop, 0, 0, 0);
+}
+
+#define prof_multiplier(__cpu)		cpu_data(__cpu).multiplier
+#define prof_counter(__cpu)		cpu_data(__cpu).counter
+
+void smp_percpu_timer_interrupt(struct pt_regs *regs)
+{
+	unsigned long compare, tick, pstate;
+	int cpu = smp_processor_id();
+	int user = user_mode(regs);
+
+	/*
+	 * Check for level 14 softint.
+	 */
+	{
+		unsigned long tick_mask = tick_ops->softint_mask;
+
+		if (!(get_softint() & tick_mask)) {
+			extern void handler_irq(int, struct pt_regs *);
+
+			handler_irq(14, regs);
+			return;
+		}
+		clear_softint(tick_mask);
+	}
+
+	do {
+		profile_tick(CPU_PROFILING, regs);
+		if (!--prof_counter(cpu)) {
+			irq_enter();
+
+			if (cpu == boot_cpu_id) {
+				kstat_this_cpu.irqs[0]++;
+				timer_tick_interrupt(regs);
+			}
+
+			update_process_times(user);
+
+			irq_exit();
+
+			prof_counter(cpu) = prof_multiplier(cpu);
+		}
+
+		/* Guarantee that the following sequences execute
+		 * uninterrupted.
+		 */
+		__asm__ __volatile__("rdpr	%%pstate, %0\n\t"
+				     "wrpr	%0, %1, %%pstate"
+				     : "=r" (pstate)
+				     : "i" (PSTATE_IE));
+
+		compare = tick_ops->add_compare(current_tick_offset);
+		tick = tick_ops->get_tick();
+
+		/* Restore PSTATE_IE. */
+		__asm__ __volatile__("wrpr	%0, 0x0, %%pstate"
+				     : /* no outputs */
+				     : "r" (pstate));
+	} while (time_after_eq(tick, compare));
+}
+
+static void __init smp_setup_percpu_timer(void)
+{
+	int cpu = smp_processor_id();
+	unsigned long pstate;
+
+	prof_counter(cpu) = prof_multiplier(cpu) = 1;
+
+	/* Guarantee that the following sequences execute
+	 * uninterrupted.
+	 */
+	__asm__ __volatile__("rdpr	%%pstate, %0\n\t"
+			     "wrpr	%0, %1, %%pstate"
+			     : "=r" (pstate)
+			     : "i" (PSTATE_IE));
+
+	tick_ops->init_tick(current_tick_offset);
+
+	/* Restore PSTATE_IE. */
+	__asm__ __volatile__("wrpr	%0, 0x0, %%pstate"
+			     : /* no outputs */
+			     : "r" (pstate));
+}
+
+void __init smp_tick_init(void)
+{
+	boot_cpu_id = hard_smp_processor_id();
+	current_tick_offset = timer_tick_offset;
+
+	cpu_set(boot_cpu_id, cpu_online_map);
+	prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
+}
+
+/* /proc/profile writes can call this, don't __init it please. */
+static DEFINE_SPINLOCK(prof_setup_lock);
+
+int setup_profiling_timer(unsigned int multiplier)
+{
+	unsigned long flags;
+	int i;
+
+	if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
+		return -EINVAL;
+
+	spin_lock_irqsave(&prof_setup_lock, flags);
+	for (i = 0; i < NR_CPUS; i++)
+		prof_multiplier(i) = multiplier;
+	current_tick_offset = (timer_tick_offset / multiplier);
+	spin_unlock_irqrestore(&prof_setup_lock, flags);
+
+	return 0;
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+	int instance, mid;
+
+	instance = 0;
+	while (!cpu_find_by_instance(instance, NULL, &mid)) {
+		if (mid < max_cpus)
+			cpu_set(mid, phys_cpu_present_map);
+		instance++;
+	}
+
+	if (num_possible_cpus() > max_cpus) {
+		instance = 0;
+		while (!cpu_find_by_instance(instance, NULL, &mid)) {
+			if (mid != boot_cpu_id) {
+				cpu_clear(mid, phys_cpu_present_map);
+				if (num_possible_cpus() <= max_cpus)
+					break;
+			}
+			instance++;
+		}
+	}
+
+	smp_store_cpu_info(boot_cpu_id);
+}
+
+void __devinit smp_prepare_boot_cpu(void)
+{
+	if (hard_smp_processor_id() >= NR_CPUS) {
+		prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
+		prom_halt();
+	}
+
+	current_thread_info()->cpu = hard_smp_processor_id();
+
+	cpu_set(smp_processor_id(), cpu_online_map);
+	cpu_set(smp_processor_id(), phys_cpu_present_map);
+}
+
+int __devinit __cpu_up(unsigned int cpu)
+{
+	int ret = smp_boot_one_cpu(cpu);
+
+	if (!ret) {
+		cpu_set(cpu, smp_commenced_mask);
+		while (!cpu_isset(cpu, cpu_online_map))
+			mb();
+		if (!cpu_isset(cpu, cpu_online_map)) {
+			ret = -ENODEV;
+		} else {
+			smp_synchronize_one_tick(cpu);
+		}
+	}
+	return ret;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+	unsigned long bogosum = 0;
+	int i;
+
+	for (i = 0; i < NR_CPUS; i++) {
+		if (cpu_online(i))
+			bogosum += cpu_data(i).udelay_val;
+	}
+	printk("Total of %ld processors activated "
+	       "(%lu.%02lu BogoMIPS).\n",
+	       (long) num_online_cpus(),
+	       bogosum/(500000/HZ),
+	       (bogosum/(5000/HZ))%100);
+}
+
+/* This needn't do anything as we do not sleep the cpu
+ * inside of the idler task, so an interrupt is not needed
+ * to get a clean fast response.
+ *
+ * XXX Reverify this assumption... -DaveM
+ *
+ * Addendum: We do want it to do something for the signal
+ *           delivery case, we detect that by just seeing
+ *           if we are trying to send this to an idler or not.
+ */
+void smp_send_reschedule(int cpu)
+{
+	if (cpu_data(cpu).idle_volume == 0)
+		smp_receive_signal(cpu);
+}
+
+/* This is a nop because we capture all other cpus
+ * anyways when making the PROM active.
+ */
+void smp_send_stop(void)
+{
+}
+
+unsigned long __per_cpu_base;
+unsigned long __per_cpu_shift;
+
+EXPORT_SYMBOL(__per_cpu_base);
+EXPORT_SYMBOL(__per_cpu_shift);
+
+void __init setup_per_cpu_areas(void)
+{
+	unsigned long goal, size, i;
+	char *ptr;
+	/* Created by linker magic */
+	extern char __per_cpu_start[], __per_cpu_end[];
+
+	/* Copy section for each CPU (we discard the original) */
+	goal = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE);
+
+#ifdef CONFIG_MODULES
+	if (goal < PERCPU_ENOUGH_ROOM)
+		goal = PERCPU_ENOUGH_ROOM;
+#endif
+	__per_cpu_shift = 0;
+	for (size = 1UL; size < goal; size <<= 1UL)
+		__per_cpu_shift++;
+
+	/* Make sure the resulting __per_cpu_base value
+	 * will fit in the 43-bit sign extended IMMU
+	 * TSB register.
+	 */
+	ptr = __alloc_bootmem(size * NR_CPUS, PAGE_SIZE,
+			      (unsigned long) __per_cpu_start);
+
+	__per_cpu_base = ptr - __per_cpu_start;
+
+	if ((__per_cpu_shift < PAGE_SHIFT) ||
+	    (__per_cpu_base & ~PAGE_MASK) ||
+	    (__per_cpu_base != (((long) __per_cpu_base << 20) >> 20))) {
+		prom_printf("PER_CPU: Invalid layout, "
+			    "ptr[%p] shift[%lx] base[%lx]\n",
+			    ptr, __per_cpu_shift, __per_cpu_base);
+		prom_halt();
+	}
+
+	for (i = 0; i < NR_CPUS; i++, ptr += size)
+		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+
+	/* Finally, load in the boot cpu's base value.
+	 * We abuse the IMMU TSB register for trap handler
+	 * entry and exit loading of %g5.  That is why it
+	 * has to be page aligned.
+	 */
+	cpu_setup_percpu_base(hard_smp_processor_id());
+}
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
new file mode 100644
index 0000000..cad5a11
--- /dev/null
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -0,0 +1,432 @@
+/* $Id: sparc64_ksyms.c,v 1.121 2002/02/09 19:49:31 davem Exp $
+ * arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+/* Tell string.h we don't want memcpy etc. as cpp defines */
+#define EXPORT_SYMTAB_STROPS
+#define PROMLIB_INTERNAL
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/in6.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/fs_struct.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/syscalls.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <net/compat.h>
+
+#include <asm/oplib.h>
+#include <asm/delay.h>
+#include <asm/system.h>
+#include <asm/auxio.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/idprom.h>
+#include <asm/svr4.h>
+#include <asm/elf.h>
+#include <asm/head.h>
+#include <asm/smp.h>
+#include <asm/mostek.h>
+#include <asm/ptrace.h>
+#include <asm/user.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <asm/fpumacro.h>
+#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
+#ifdef CONFIG_SBUS
+#include <asm/sbus.h>
+#include <asm/dma.h>
+#endif
+#ifdef CONFIG_PCI
+#include <asm/ebus.h>
+#include <asm/isa.h>
+#endif
+#include <asm/a.out.h>
+#include <asm/ns87303.h>
+#include <asm/timer.h>
+#include <asm/cpudata.h>
+#include <asm/rwsem.h>
+
+struct poll {
+	int fd;
+	short events;
+	short revents;
+};
+
+extern void die_if_kernel(char *str, struct pt_regs *regs);
+extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+void _sigpause_common (unsigned int set, struct pt_regs *);
+extern void *__bzero(void *, size_t);
+extern void *__memscan_zero(void *, size_t);
+extern void *__memscan_generic(void *, int, size_t);
+extern int __memcmp(const void *, const void *, __kernel_size_t);
+extern __kernel_size_t strlen(const char *);
+extern void linux_sparc_syscall(void);
+extern void rtrap(void);
+extern void show_regs(struct pt_regs *);
+extern void solaris_syscall(void);
+extern void syscall_trace(void);
+extern u32 sunos_sys_table[], sys_call_table32[];
+extern void tl0_solaris(void);
+extern void sys_sigsuspend(void);
+extern int svr4_getcontext(svr4_ucontext_t *uc, struct pt_regs *regs);
+extern int svr4_setcontext(svr4_ucontext_t *uc, struct pt_regs *regs);
+extern int compat_sys_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
+extern int (*handle_mathemu)(struct pt_regs *, struct fpustate *);
+extern long sparc32_open(const char __user * filename, int flags, int mode);
+extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from,
+	unsigned long offset, unsigned long size, pgprot_t prot, int space);
+extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+	unsigned long pfn, unsigned long size, pgprot_t prot);
+extern void (*prom_palette)(int);
+
+extern int __ashrdi3(int, int);
+
+extern void dump_thread(struct pt_regs *, struct user *);
+extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
+
+#if defined(CONFIG_SMP) && defined(CONFIG_DEBUG_SPINLOCK)
+extern void _do_spin_lock (spinlock_t *lock, char *str);
+extern void _do_spin_unlock (spinlock_t *lock);
+extern int _spin_trylock (spinlock_t *lock);
+extern void _do_read_lock(rwlock_t *rw, char *str);
+extern void _do_read_unlock(rwlock_t *rw, char *str);
+extern void _do_write_lock(rwlock_t *rw, char *str);
+extern void _do_write_unlock(rwlock_t *rw);
+extern int _do_write_trylock(rwlock_t *rw, char *str);
+#endif
+
+extern unsigned long phys_base;
+extern unsigned long pfn_base;
+
+extern unsigned int sys_call_table[];
+
+extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
+extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
+		      unsigned long *);
+extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
+		      unsigned long *, unsigned long *);
+extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
+		      unsigned long *, unsigned long *, unsigned long *);
+
+/* Per-CPU information table */
+EXPORT_PER_CPU_SYMBOL(__cpu_data);
+
+/* used by various drivers */
+#ifdef CONFIG_SMP
+#ifndef CONFIG_DEBUG_SPINLOCK
+/* Out of line rw-locking implementation. */
+EXPORT_SYMBOL(__read_lock);
+EXPORT_SYMBOL(__read_unlock);
+EXPORT_SYMBOL(__write_lock);
+EXPORT_SYMBOL(__write_unlock);
+EXPORT_SYMBOL(__write_trylock);
+/* Out of line spin-locking implementation. */
+EXPORT_SYMBOL(_raw_spin_lock);
+EXPORT_SYMBOL(_raw_spin_lock_flags);
+#endif
+
+/* Hard IRQ locking */
+EXPORT_SYMBOL(synchronize_irq);
+
+#if defined(CONFIG_MCOUNT)
+extern void _mcount(void);
+EXPORT_SYMBOL(_mcount);
+#endif
+
+/* CPU online map and active count.  */
+EXPORT_SYMBOL(cpu_online_map);
+EXPORT_SYMBOL(phys_cpu_present_map);
+
+/* Spinlock debugging library, optional. */
+#ifdef CONFIG_DEBUG_SPINLOCK
+EXPORT_SYMBOL(_do_spin_lock);
+EXPORT_SYMBOL(_do_spin_unlock);
+EXPORT_SYMBOL(_spin_trylock);
+EXPORT_SYMBOL(_do_read_lock);
+EXPORT_SYMBOL(_do_read_unlock);
+EXPORT_SYMBOL(_do_write_lock);
+EXPORT_SYMBOL(_do_write_unlock);
+EXPORT_SYMBOL(_do_write_trylock);
+#endif
+
+EXPORT_SYMBOL(smp_call_function);
+#endif /* CONFIG_SMP */
+
+EXPORT_SYMBOL(sparc64_get_clock_tick);
+
+/* semaphores */
+EXPORT_SYMBOL(down);
+EXPORT_SYMBOL(down_trylock);
+EXPORT_SYMBOL(down_interruptible);
+EXPORT_SYMBOL(up);
+
+/* RW semaphores */
+EXPORT_SYMBOL(__down_read);
+EXPORT_SYMBOL(__down_read_trylock);
+EXPORT_SYMBOL(__down_write);
+EXPORT_SYMBOL(__down_write_trylock);
+EXPORT_SYMBOL(__up_read);
+EXPORT_SYMBOL(__up_write);
+EXPORT_SYMBOL(__downgrade_write);
+
+/* Atomic counter implementation. */
+EXPORT_SYMBOL(atomic_add);
+EXPORT_SYMBOL(atomic_add_ret);
+EXPORT_SYMBOL(atomic_sub);
+EXPORT_SYMBOL(atomic_sub_ret);
+EXPORT_SYMBOL(atomic64_add);
+EXPORT_SYMBOL(atomic64_add_ret);
+EXPORT_SYMBOL(atomic64_sub);
+EXPORT_SYMBOL(atomic64_sub_ret);
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(_atomic_dec_and_lock);
+#endif
+
+/* Atomic bit operations. */
+EXPORT_SYMBOL(test_and_set_bit);
+EXPORT_SYMBOL(test_and_clear_bit);
+EXPORT_SYMBOL(test_and_change_bit);
+EXPORT_SYMBOL(set_bit);
+EXPORT_SYMBOL(clear_bit);
+EXPORT_SYMBOL(change_bit);
+
+/* Bit searching */
+EXPORT_SYMBOL(find_next_bit);
+EXPORT_SYMBOL(find_next_zero_bit);
+EXPORT_SYMBOL(find_next_zero_le_bit);
+
+EXPORT_SYMBOL(ivector_table);
+EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(disable_irq);
+
+EXPORT_SYMBOL(__flushw_user);
+
+EXPORT_SYMBOL(tlb_type);
+EXPORT_SYMBOL(get_fb_unmapped_area);
+EXPORT_SYMBOL(flush_icache_range);
+
+EXPORT_SYMBOL(flush_dcache_page);
+#ifdef DCACHE_ALIASING_POSSIBLE
+EXPORT_SYMBOL(__flush_dcache_range);
+#endif
+
+EXPORT_SYMBOL(mostek_lock);
+EXPORT_SYMBOL(mstk48t02_regs);
+EXPORT_SYMBOL(request_fast_irq);
+#ifdef CONFIG_SUN_AUXIO
+EXPORT_SYMBOL(auxio_set_led);
+EXPORT_SYMBOL(auxio_set_lte);
+#endif
+#ifdef CONFIG_SBUS
+EXPORT_SYMBOL(sbus_root);
+EXPORT_SYMBOL(dma_chain);
+EXPORT_SYMBOL(sbus_set_sbus64);
+EXPORT_SYMBOL(sbus_alloc_consistent);
+EXPORT_SYMBOL(sbus_free_consistent);
+EXPORT_SYMBOL(sbus_map_single);
+EXPORT_SYMBOL(sbus_unmap_single);
+EXPORT_SYMBOL(sbus_map_sg);
+EXPORT_SYMBOL(sbus_unmap_sg);
+EXPORT_SYMBOL(sbus_dma_sync_single_for_cpu);
+EXPORT_SYMBOL(sbus_dma_sync_single_for_device);
+EXPORT_SYMBOL(sbus_dma_sync_sg_for_cpu);
+EXPORT_SYMBOL(sbus_dma_sync_sg_for_device);
+#endif
+EXPORT_SYMBOL(outsb);
+EXPORT_SYMBOL(outsw);
+EXPORT_SYMBOL(outsl);
+EXPORT_SYMBOL(insb);
+EXPORT_SYMBOL(insw);
+EXPORT_SYMBOL(insl);
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(ebus_chain);
+EXPORT_SYMBOL(isa_chain);
+EXPORT_SYMBOL(pci_memspace_mask);
+EXPORT_SYMBOL(pci_alloc_consistent);
+EXPORT_SYMBOL(pci_free_consistent);
+EXPORT_SYMBOL(pci_map_single);
+EXPORT_SYMBOL(pci_unmap_single);
+EXPORT_SYMBOL(pci_map_sg);
+EXPORT_SYMBOL(pci_unmap_sg);
+EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
+EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
+EXPORT_SYMBOL(pci_dma_supported);
+#endif
+
+/* I/O device mmaping on Sparc64. */
+EXPORT_SYMBOL(io_remap_page_range);
+EXPORT_SYMBOL(io_remap_pfn_range);
+
+/* Solaris/SunOS binary compatibility */
+EXPORT_SYMBOL(_sigpause_common);
+EXPORT_SYMBOL(verify_compat_iovec);
+
+EXPORT_SYMBOL(dump_thread);
+EXPORT_SYMBOL(dump_fpu);
+EXPORT_SYMBOL(__pte_alloc_one_kernel);
+#ifndef CONFIG_SMP
+EXPORT_SYMBOL(pgt_quicklists);
+#endif
+EXPORT_SYMBOL(put_fs_struct);
+
+/* math-emu wants this */
+EXPORT_SYMBOL(die_if_kernel);
+
+/* Kernel thread creation. */
+EXPORT_SYMBOL(kernel_thread);
+
+/* prom symbols */
+EXPORT_SYMBOL(idprom);
+EXPORT_SYMBOL(prom_root_node);
+EXPORT_SYMBOL(prom_getchild);
+EXPORT_SYMBOL(prom_getsibling);
+EXPORT_SYMBOL(prom_searchsiblings);
+EXPORT_SYMBOL(prom_firstprop);
+EXPORT_SYMBOL(prom_nextprop);
+EXPORT_SYMBOL(prom_getproplen);
+EXPORT_SYMBOL(prom_getproperty);
+EXPORT_SYMBOL(prom_node_has_property);
+EXPORT_SYMBOL(prom_setprop);
+EXPORT_SYMBOL(saved_command_line);
+EXPORT_SYMBOL(prom_getname);
+EXPORT_SYMBOL(prom_finddevice);
+EXPORT_SYMBOL(prom_feval);
+EXPORT_SYMBOL(prom_getbool);
+EXPORT_SYMBOL(prom_getstring);
+EXPORT_SYMBOL(prom_getint);
+EXPORT_SYMBOL(prom_getintdefault);
+EXPORT_SYMBOL(__prom_getchild);
+EXPORT_SYMBOL(__prom_getsibling);
+
+/* sparc library symbols */
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(__strlen_user);
+EXPORT_SYMBOL(__strnlen_user);
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strncpy);
+EXPORT_SYMBOL(strcat);
+EXPORT_SYMBOL(strncat);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(strstr);
+
+#ifdef CONFIG_SOLARIS_EMUL_MODULE
+EXPORT_SYMBOL(linux_sparc_syscall);
+EXPORT_SYMBOL(rtrap);
+EXPORT_SYMBOL(show_regs);
+EXPORT_SYMBOL(solaris_syscall);
+EXPORT_SYMBOL(syscall_trace);
+EXPORT_SYMBOL(sunos_sys_table);
+EXPORT_SYMBOL(sys_call_table32);
+EXPORT_SYMBOL(tl0_solaris);
+EXPORT_SYMBOL(sys_sigsuspend);
+EXPORT_SYMBOL(sys_getppid);
+EXPORT_SYMBOL(sys_getpid);
+EXPORT_SYMBOL(sys_geteuid);
+EXPORT_SYMBOL(sys_getuid);
+EXPORT_SYMBOL(sys_getegid);
+EXPORT_SYMBOL(sys_getgid);
+EXPORT_SYMBOL(svr4_getcontext);
+EXPORT_SYMBOL(svr4_setcontext);
+EXPORT_SYMBOL(compat_sys_ioctl);
+EXPORT_SYMBOL(sparc32_open);
+EXPORT_SYMBOL(sys_close);
+#endif
+
+/* Special internal versions of library functions. */
+EXPORT_SYMBOL(_clear_page);
+EXPORT_SYMBOL(clear_user_page);
+EXPORT_SYMBOL(copy_user_page);
+EXPORT_SYMBOL(__bzero);
+EXPORT_SYMBOL(__memscan_zero);
+EXPORT_SYMBOL(__memscan_generic);
+EXPORT_SYMBOL(__memcmp);
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(memchr);
+
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+EXPORT_SYMBOL(__csum_partial_copy_from_user);
+EXPORT_SYMBOL(__csum_partial_copy_to_user);
+EXPORT_SYMBOL(ip_fast_csum);
+
+/* Moving data to/from/in userspace. */
+EXPORT_SYMBOL(___copy_to_user);
+EXPORT_SYMBOL(___copy_from_user);
+EXPORT_SYMBOL(___copy_in_user);
+EXPORT_SYMBOL(copy_to_user_fixup);
+EXPORT_SYMBOL(copy_from_user_fixup);
+EXPORT_SYMBOL(copy_in_user_fixup);
+EXPORT_SYMBOL(__strncpy_from_user);
+EXPORT_SYMBOL(__bzero_noasi);
+
+/* Various address conversion macros use this. */
+EXPORT_SYMBOL(phys_base);
+EXPORT_SYMBOL(pfn_base);
+EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
+EXPORT_SYMBOL(page_to_pfn);
+EXPORT_SYMBOL(pfn_to_page);
+
+/* No version information on this, heavily used in inline asm,
+ * and will always be 'void __ret_efault(void)'.
+ */
+EXPORT_SYMBOL(__ret_efault);
+
+/* No version information on these, as gcc produces such symbols. */
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(strncmp);
+
+/* Delay routines. */
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(__ndelay);
+EXPORT_SYMBOL(__const_udelay);
+EXPORT_SYMBOL(__delay);
+
+void VISenter(void);
+/* RAID code needs this */
+EXPORT_SYMBOL(VISenter);
+
+/* for input/keybdev */
+EXPORT_SYMBOL(sun_do_break);
+EXPORT_SYMBOL(serial_console);
+EXPORT_SYMBOL(stop_a_enabled);
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+EXPORT_SYMBOL(do_BUG);
+#endif
+
+/* for ns8703 */
+EXPORT_SYMBOL(ns87303_lock);
+
+/* for solaris compat module */
+EXPORT_SYMBOL_GPL(sys_call_table);
+
+EXPORT_SYMBOL(tick_ops);
+
+EXPORT_SYMBOL(xor_vis_2);
+EXPORT_SYMBOL(xor_vis_3);
+EXPORT_SYMBOL(xor_vis_4);
+EXPORT_SYMBOL(xor_vis_5);
+
+EXPORT_SYMBOL(prom_palette);
diff --git a/arch/sparc64/kernel/starfire.c b/arch/sparc64/kernel/starfire.c
new file mode 100644
index 0000000..ae859d4
--- /dev/null
+++ b/arch/sparc64/kernel/starfire.c
@@ -0,0 +1,123 @@
+/* $Id: starfire.c,v 1.10 2001/04/14 21:13:45 davem Exp $
+ * starfire.c: Starfire/E10000 support.
+ *
+ * Copyright (C) 1998 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2000 Anton Blanchard (anton@samba.org)
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <asm/page.h>
+#include <asm/oplib.h>
+#include <asm/smp.h>
+#include <asm/upa.h>
+#include <asm/starfire.h>
+
+/*
+ * A few places around the kernel check this to see if
+ * they need to call us to do things in a Starfire specific
+ * way.
+ */
+int this_is_starfire = 0;
+
+void check_if_starfire(void)
+{
+	int ssnode = prom_finddevice("/ssp-serial");
+	if (ssnode != 0 && ssnode != -1)
+		this_is_starfire = 1;
+}
+
+void starfire_cpu_setup(void)
+{
+	/* Currently, nothing to do.  */
+}
+
+int starfire_hard_smp_processor_id(void)
+{
+	return upa_readl(0x1fff40000d0UL);
+}
+
+/*
+ * Each Starfire board has 32 registers which perform translation
+ * and delivery of traditional interrupt packets into the extended
+ * Starfire hardware format.  Essentially UPAID's now have 2 more
+ * bits than in all previous Sun5 systems.
+ */
+struct starfire_irqinfo {
+	unsigned long imap_slots[32];
+	unsigned long tregs[32];
+	struct starfire_irqinfo *next;
+	int upaid, hwmid;
+};
+
+static struct starfire_irqinfo *sflist = NULL;
+
+/* Beam me up Scott(McNeil)y... */
+void *starfire_hookup(int upaid)
+{
+	struct starfire_irqinfo *p;
+	unsigned long treg_base, hwmid, i;
+
+	p = kmalloc(sizeof(*p), GFP_KERNEL);
+	if (!p) {
+		prom_printf("starfire_hookup: No memory, this is insane.\n");
+		prom_halt();
+	}
+	treg_base = 0x100fc000000UL;
+	hwmid = ((upaid & 0x3c) << 1) |
+		((upaid & 0x40) >> 4) |
+		(upaid & 0x3);
+	p->hwmid = hwmid;
+	treg_base += (hwmid << 33UL);
+	treg_base += 0x200UL;
+	for (i = 0; i < 32; i++) {
+		p->imap_slots[i] = 0UL;
+		p->tregs[i] = treg_base + (i * 0x10UL);
+		/* Lets play it safe and not overwrite existing mappings */
+		if (upa_readl(p->tregs[i]) != 0)
+			p->imap_slots[i] = 0xdeadbeaf;
+	}
+	p->upaid = upaid;
+	p->next = sflist;
+	sflist = p;
+
+	return (void *) p;
+}
+
+unsigned int starfire_translate(unsigned long imap,
+				unsigned int upaid)
+{
+	struct starfire_irqinfo *p;
+	unsigned int bus_hwmid;
+	unsigned int i;
+
+	bus_hwmid = (((unsigned long)imap) >> 33) & 0x7f;
+	for (p = sflist; p != NULL; p = p->next)
+		if (p->hwmid == bus_hwmid)
+			break;
+	if (p == NULL) {
+		prom_printf("XFIRE: Cannot find irqinfo for imap %016lx\n",
+			    ((unsigned long)imap));
+		prom_halt();
+	}
+	for (i = 0; i < 32; i++) {
+		if (p->imap_slots[i] == imap ||
+		    p->imap_slots[i] == 0UL)
+			break;
+	}
+	if (i == 32) {
+		printk("starfire_translate: Are you kidding me?\n");
+		panic("Lucy in the sky....");
+	}
+	p->imap_slots[i] = imap;
+
+	/* map to real upaid */
+	upaid = (((upaid & 0x3c) << 1) |
+		 ((upaid & 0x40) >> 4) |
+		 (upaid & 0x3));
+
+	upa_writel(upaid, p->tregs[i]);
+
+	return i;
+}
diff --git a/arch/sparc64/kernel/sunos_ioctl32.c b/arch/sparc64/kernel/sunos_ioctl32.c
new file mode 100644
index 0000000..87c1aeb
--- /dev/null
+++ b/arch/sparc64/kernel/sunos_ioctl32.c
@@ -0,0 +1,275 @@
+/* $Id: sunos_ioctl32.c,v 1.11 2000/07/30 23:12:24 davem Exp $
+ * sunos_ioctl32.c: SunOS ioctl compatibility on sparc64.
+ *
+ * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ * Copyright (C) 1995, 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/uaccess.h>
+
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/termios.h>
+#include <linux/ioctl.h>
+#include <linux/route.h>
+#include <linux/sockios.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/syscalls.h>
+#include <linux/compat.h>
+#include <asm/kbio.h>
+
+#define SUNOS_NR_OPEN	256
+
+struct rtentry32 {
+        u32   		rt_pad1;
+        struct sockaddr rt_dst;         /* target address               */
+        struct sockaddr rt_gateway;     /* gateway addr (RTF_GATEWAY)   */
+        struct sockaddr rt_genmask;     /* target network mask (IP)     */
+        unsigned short  rt_flags;
+        short           rt_pad2;
+        u32   		rt_pad3;
+        unsigned char   rt_tos;
+        unsigned char   rt_class;
+        short           rt_pad4;
+        short           rt_metric;      /* +1 for binary compatibility! */
+        /* char * */ u32 rt_dev;        /* forcing the device at add    */
+        u32   		rt_mtu;         /* per route MTU/Window         */
+        u32   		rt_window;      /* Window clamping              */
+        unsigned short  rt_irtt;        /* Initial RTT                  */
+
+};
+
+struct ifmap32 {
+	u32 mem_start;
+	u32 mem_end;
+	unsigned short base_addr;
+	unsigned char irq;
+	unsigned char dma;
+	unsigned char port;
+};
+
+struct ifreq32 {
+#define IFHWADDRLEN     6
+#define IFNAMSIZ        16
+        union {
+                char    ifrn_name[IFNAMSIZ];            /* if name, e.g. "en0" */
+        } ifr_ifrn;
+        union {
+                struct  sockaddr ifru_addr;
+                struct  sockaddr ifru_dstaddr;
+                struct  sockaddr ifru_broadaddr;
+                struct  sockaddr ifru_netmask;
+                struct  sockaddr ifru_hwaddr;
+                short   ifru_flags;
+                int     ifru_ivalue;
+                int     ifru_mtu;
+                struct  ifmap32 ifru_map;
+                char    ifru_slave[IFNAMSIZ];   /* Just fits the size */
+                compat_caddr_t ifru_data;
+        } ifr_ifru;
+};
+
+struct ifconf32 {
+        int     ifc_len;                        /* size of buffer       */
+        compat_caddr_t  ifcbuf;
+};
+
+extern asmlinkage int compat_sys_ioctl(unsigned int, unsigned int, u32);
+
+asmlinkage int sunos_ioctl (int fd, u32 cmd, u32 arg)
+{
+	int ret = -EBADF;
+
+	if(fd >= SUNOS_NR_OPEN)
+		goto out;
+	if(!fcheck(fd))
+		goto out;
+
+	if(cmd == TIOCSETD) {
+		mm_segment_t old_fs = get_fs();
+		int __user *p;
+		int ntty = N_TTY;
+		int tmp;
+
+		p = (int __user *) (unsigned long) arg;
+		ret = -EFAULT;
+		if(get_user(tmp, p))
+			goto out;
+		if(tmp == 2) {
+			set_fs(KERNEL_DS);
+			ret = sys_ioctl(fd, cmd, (unsigned long) &ntty);
+			set_fs(old_fs);
+			ret = (ret == -EINVAL ? -EOPNOTSUPP : ret);
+			goto out;
+		}
+	}
+	if(cmd == TIOCNOTTY) {
+		ret = sys_setsid();
+		goto out;
+	}
+	switch(cmd) {
+	case _IOW('r', 10, struct rtentry32):
+		ret = compat_sys_ioctl(fd, SIOCADDRT, arg);
+		goto out;
+	case _IOW('r', 11, struct rtentry32):
+		ret = compat_sys_ioctl(fd, SIOCDELRT, arg);
+		goto out;
+
+	case _IOW('i', 12, struct ifreq32):
+		ret = compat_sys_ioctl(fd, SIOCSIFADDR, arg);
+		goto out;
+	case _IOWR('i', 13, struct ifreq32):
+		ret = compat_sys_ioctl(fd, SIOCGIFADDR, arg);
+		goto out;
+	case _IOW('i', 14, struct ifreq32):
+		ret = compat_sys_ioctl(fd, SIOCSIFDSTADDR, arg);
+		goto out;
+	case _IOWR('i', 15, struct ifreq32):
+		ret = compat_sys_ioctl(fd, SIOCGIFDSTADDR, arg);
+		goto out;
+	case _IOW('i', 16, struct ifreq32):
+		ret = compat_sys_ioctl(fd, SIOCSIFFLAGS, arg);
+		goto out;
+	case _IOWR('i', 17, struct ifreq32):
+		ret = compat_sys_ioctl(fd, SIOCGIFFLAGS, arg);
+		goto out;
+	case _IOW('i', 18, struct ifreq32):
+		ret = compat_sys_ioctl(fd, SIOCSIFMEM, arg);
+		goto out;
+	case _IOWR('i', 19, struct ifreq32):
+		ret = compat_sys_ioctl(fd, SIOCGIFMEM, arg);
+		goto out;
+
+	case _IOWR('i', 20, struct ifconf32):
+		ret = compat_sys_ioctl(fd, SIOCGIFCONF, arg);
+		goto out;
+
+	case _IOW('i', 21, struct ifreq): /* SIOCSIFMTU */
+		ret = sys_ioctl(fd, SIOCSIFMTU, arg);
+		goto out;
+	case _IOWR('i', 22, struct ifreq): /* SIOCGIFMTU */
+		ret = sys_ioctl(fd, SIOCGIFMTU, arg);
+		goto out;
+
+	case _IOWR('i', 23, struct ifreq32):
+		ret = compat_sys_ioctl(fd, SIOCGIFBRDADDR, arg);
+		goto out;
+	case _IOW('i', 24, struct ifreq32):
+		ret = compat_sys_ioctl(fd, SIOCSIFBRDADDR, arg);
+		goto out;
+	case _IOWR('i', 25, struct ifreq32):
+		ret = compat_sys_ioctl(fd, SIOCGIFNETMASK, arg);
+		goto out;
+	case _IOW('i', 26, struct ifreq32):
+		ret = compat_sys_ioctl(fd, SIOCSIFNETMASK, arg);
+		goto out;
+	case _IOWR('i', 27, struct ifreq32):
+		ret = compat_sys_ioctl(fd, SIOCGIFMETRIC, arg);
+		goto out;
+	case _IOW('i', 28, struct ifreq32):
+		ret = compat_sys_ioctl(fd, SIOCSIFMETRIC, arg);
+		goto out;
+
+	case _IOW('i', 30, struct arpreq):
+		ret = compat_sys_ioctl(fd, SIOCSARP, arg);
+		goto out;
+	case _IOWR('i', 31, struct arpreq):
+		ret = compat_sys_ioctl(fd, SIOCGARP, arg);
+		goto out;
+	case _IOW('i', 32, struct arpreq):
+		ret = compat_sys_ioctl(fd, SIOCDARP, arg);
+		goto out;
+
+	case _IOW('i', 40, struct ifreq32): /* SIOCUPPER */
+	case _IOW('i', 41, struct ifreq32): /* SIOCLOWER */
+	case _IOW('i', 44, struct ifreq32): /* SIOCSETSYNC */
+	case _IOW('i', 45, struct ifreq32): /* SIOCGETSYNC */
+	case _IOW('i', 46, struct ifreq32): /* SIOCSSDSTATS */
+	case _IOW('i', 47, struct ifreq32): /* SIOCSSESTATS */
+	case _IOW('i', 48, struct ifreq32): /* SIOCSPROMISC */
+		ret = -EOPNOTSUPP;
+		goto out;
+
+	case _IOW('i', 49, struct ifreq32):
+		ret = compat_sys_ioctl(fd, SIOCADDMULTI, arg);
+		goto out;
+	case _IOW('i', 50, struct ifreq32):
+		ret = compat_sys_ioctl(fd, SIOCDELMULTI, arg);
+		goto out;
+
+	/* FDDI interface ioctls, unsupported. */
+		
+	case _IOW('i', 51, struct ifreq32): /* SIOCFDRESET */
+	case _IOW('i', 52, struct ifreq32): /* SIOCFDSLEEP */
+	case _IOW('i', 53, struct ifreq32): /* SIOCSTRTFMWAR */
+	case _IOW('i', 54, struct ifreq32): /* SIOCLDNSTRTFW */
+	case _IOW('i', 55, struct ifreq32): /* SIOCGETFDSTAT */
+	case _IOW('i', 56, struct ifreq32): /* SIOCFDNMIINT */
+	case _IOW('i', 57, struct ifreq32): /* SIOCFDEXUSER */
+	case _IOW('i', 58, struct ifreq32): /* SIOCFDGNETMAP */
+	case _IOW('i', 59, struct ifreq32): /* SIOCFDGIOCTL */
+		printk("FDDI ioctl, returning EOPNOTSUPP\n");
+		ret = -EOPNOTSUPP;
+		goto out;
+
+	case _IOW('t', 125, int):
+		/* More stupid tty sunos ioctls, just
+		 * say it worked.
+		 */
+		ret = 0;
+		goto out;
+
+	/* Non posix grp */
+	case _IOW('t', 118, int): {
+		int oldval, newval, __user *ptr;
+
+		cmd = TIOCSPGRP;
+		ptr = (int __user *) (unsigned long) arg;
+		ret = -EFAULT;
+		if(get_user(oldval, ptr))
+			goto out;
+		ret = compat_sys_ioctl(fd, cmd, arg);
+		__get_user(newval, ptr);
+		if(newval == -1) {
+			__put_user(oldval, ptr);
+			ret = -EIO;
+		}
+		if(ret == -ENOTTY)
+			ret = -EIO;
+		goto out;
+	}
+
+	case _IOR('t', 119, int): {
+		int oldval, newval, __user *ptr;
+
+		cmd = TIOCGPGRP;
+		ptr = (int __user *) (unsigned long) arg;
+		ret = -EFAULT;
+		if(get_user(oldval, ptr))
+			goto out;
+		ret = compat_sys_ioctl(fd, cmd, arg);
+		__get_user(newval, ptr);
+		if(newval == -1) {
+			__put_user(oldval, ptr);
+			ret = -EIO;
+		}
+		if(ret == -ENOTTY)
+			ret = -EIO;
+		goto out;
+	}
+	};
+
+	ret = compat_sys_ioctl(fd, cmd, arg);
+	/* so stupid... */
+	ret = (ret == -EINVAL ? -EOPNOTSUPP : ret);
+out:
+	return ret;
+}
diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S
new file mode 100644
index 0000000..5a95e98
--- /dev/null
+++ b/arch/sparc64/kernel/sys32.S
@@ -0,0 +1,327 @@
+/* $Id: sys32.S,v 1.12 2000/03/24 04:17:37 davem Exp $
+ * sys32.S: I-cache tricks for 32-bit compatibility layer simple
+ *          conversions.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1998 Jakub Jelinek   (jj@ultra.linux.cz)
+ */
+
+#include <linux/config.h>
+#include <asm/errno.h>
+
+/* NOTE: call as jump breaks return stack, we have to avoid that */
+
+	.text
+
+#define SIGN1(STUB,SYSCALL,REG1) \
+	.align	32; \
+	.globl	STUB; \
+STUB:	sethi	%hi(SYSCALL), %g1; \
+	jmpl	%g1 + %lo(SYSCALL), %g0; \
+	sra	REG1, 0, REG1
+
+#define SIGN2(STUB,SYSCALL,REG1,REG2) \
+	.align	32; \
+	.globl	STUB; \
+STUB:	sethi	%hi(SYSCALL), %g1; \
+	sra	REG1, 0, REG1; \
+	jmpl	%g1 + %lo(SYSCALL), %g0; \
+	sra	REG2, 0, REG2
+
+#define SIGN3(STUB,SYSCALL,REG1,REG2,REG3) \
+	.align	32; \
+	.globl	STUB; \
+STUB:	sra	REG1, 0, REG1; \
+	sethi	%hi(SYSCALL), %g1; \
+	sra	REG2, 0, REG2; \
+	jmpl	%g1 + %lo(SYSCALL), %g0; \
+	sra	REG3, 0, REG3
+
+#define SIGN4(STUB,SYSCALL,REG1,REG2,REG3,REG4) \
+	.align	32; \
+	.globl	STUB; \
+STUB:	sra	REG1, 0, REG1; \
+	sethi	%hi(SYSCALL), %g1; \
+	sra	REG2, 0, REG2; \
+	sra	REG3, 0, REG3; \
+	jmpl	%g1 + %lo(SYSCALL), %g0; \
+	sra	REG4, 0, REG4
+
+SIGN1(sys32_exit, sparc_exit, %o0)
+SIGN1(sys32_exit_group, sys_exit_group, %o0)
+SIGN1(sys32_wait4, compat_sys_wait4, %o2)
+SIGN1(sys32_creat, sys_creat, %o1)
+SIGN1(sys32_mknod, sys_mknod, %o1)
+SIGN1(sys32_perfctr, sys_perfctr, %o0)
+SIGN1(sys32_umount, sys_umount, %o1)
+SIGN1(sys32_signal, sys_signal, %o0)
+SIGN1(sys32_access, sys_access, %o1)
+SIGN1(sys32_msync, sys_msync, %o2)
+SIGN2(sys32_reboot, sys_reboot, %o0, %o1)
+SIGN1(sys32_setitimer, compat_sys_setitimer, %o0)
+SIGN1(sys32_getitimer, compat_sys_getitimer, %o0)
+SIGN1(sys32_sethostname, sys_sethostname, %o1)
+SIGN1(sys32_swapon, sys_swapon, %o1)
+SIGN1(sys32_sigaction, compat_sys_sigaction, %o0)
+SIGN1(sys32_rt_sigaction, compat_sys_rt_sigaction, %o0)
+SIGN1(sys32_sigprocmask, compat_sys_sigprocmask, %o0)
+SIGN1(sys32_rt_sigprocmask, compat_sys_rt_sigprocmask, %o0)
+SIGN2(sys32_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo, %o0, %o1)
+SIGN1(sys32_getrusage, compat_sys_getrusage, %o0)
+SIGN1(sys32_setxattr, sys_setxattr, %o4)
+SIGN1(sys32_lsetxattr, sys_lsetxattr, %o4)
+SIGN1(sys32_fsetxattr, sys_fsetxattr, %o4)
+SIGN1(sys32_fgetxattr, sys_fgetxattr, %o0)
+SIGN1(sys32_flistxattr, sys_flistxattr, %o0)
+SIGN1(sys32_fremovexattr, sys_fremovexattr, %o0)
+SIGN2(sys32_tkill, sys_tkill, %o0, %o1)
+SIGN1(sys32_epoll_create, sys_epoll_create, %o0)
+SIGN3(sys32_epoll_ctl, sys_epoll_ctl, %o0, %o1, %o2)
+SIGN3(sys32_epoll_wait, sys_epoll_wait, %o0, %o2, %o3)
+SIGN1(sys32_readahead, compat_sys_readahead, %o0)
+SIGN2(sys32_fadvise64, compat_sys_fadvise64, %o0, %o4)
+SIGN2(sys32_fadvise64_64, compat_sys_fadvise64_64, %o0, %o5)
+SIGN2(sys32_bdflush, sys_bdflush, %o0, %o1)
+SIGN1(sys32_mlockall, sys_mlockall, %o0)
+SIGN1(sys32_nfsservctl, compat_sys_nfsservctl, %o0)
+SIGN1(sys32_clock_settime, compat_sys_clock_settime, %o1)
+SIGN1(sys32_clock_nanosleep, compat_sys_clock_nanosleep, %o1)
+SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1)
+SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
+SIGN1(sys32_mq_open, compat_sys_mq_open, %o1)
+SIGN1(sys32_select, compat_sys_select, %o0)
+SIGN1(sys32_mkdir, sys_mkdir, %o1)
+SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5)
+SIGN1(sys32_sysfs, compat_sys_sysfs, %o0)
+SIGN3(sys32_ipc, compat_sys_ipc, %o1, %o2, %o3)
+SIGN2(sys32_sendfile, compat_sys_sendfile, %o0, %o1)
+SIGN2(sys32_sendfile64, compat_sys_sendfile64, %o0, %o1)
+SIGN1(sys32_prctl, sys_prctl, %o0)
+SIGN1(sys32_sched_rr_get_interval, compat_sys_sched_rr_get_interval, %o0)
+SIGN2(sys32_waitpid, sys_waitpid, %o0, %o2)
+SIGN1(sys32_getgroups, sys_getgroups, %o0)
+SIGN1(sys32_getpgid, sys_getpgid, %o0)
+SIGN2(sys32_getpriority, sys_getpriority, %o0, %o1)
+SIGN1(sys32_getsid, sys_getsid, %o0)
+SIGN2(sys32_kill, sys_kill, %o0, %o1)
+SIGN1(sys32_nice, sys_nice, %o0)
+SIGN1(sys32_lseek, sys_lseek, %o1)
+SIGN2(sys32_open, sparc32_open, %o1, %o2)
+SIGN1(sys32_readlink, sys_readlink, %o2)
+SIGN1(sys32_sched_get_priority_max, sys_sched_get_priority_max, %o0)
+SIGN1(sys32_sched_get_priority_min, sys_sched_get_priority_min, %o0)
+SIGN1(sys32_sched_getparam, sys_sched_getparam, %o0)
+SIGN1(sys32_sched_getscheduler, sys_sched_getscheduler, %o0)
+SIGN1(sys32_sched_setparam, sys_sched_setparam, %o0)
+SIGN2(sys32_sched_setscheduler, sys_sched_setscheduler, %o0, %o1)
+SIGN1(sys32_getdomainname, sys_getdomainname, %o1)
+SIGN1(sys32_setdomainname, sys_setdomainname, %o1)
+SIGN1(sys32_setgroups, sys_setgroups, %o0)
+SIGN2(sys32_setpgid, sys_setpgid, %o0, %o1)
+SIGN3(sys32_setpriority, sys_setpriority, %o0, %o1, %o2)
+SIGN1(sys32_ssetmask, sys_ssetmask, %o0)
+SIGN2(sys32_syslog, sys_syslog, %o0, %o2)
+SIGN1(sys32_umask, sys_umask, %o0)
+SIGN3(sys32_tgkill, sys_tgkill, %o0, %o1, %o2)
+SIGN1(sys32_sendto, sys_sendto, %o0)
+SIGN1(sys32_recvfrom, sys_recvfrom, %o0)
+SIGN3(sys32_socket, sys_socket, %o0, %o1, %o2)
+SIGN2(sys32_connect, sys_connect, %o0, %o2)
+SIGN2(sys32_bind, sys_bind, %o0, %o2)
+SIGN2(sys32_listen, sys_listen, %o0, %o1)
+SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
+SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
+SIGN2(sys32_shutdown, sys_shutdown, %o0, %o1)
+SIGN3(sys32_socketpair, sys_socketpair, %o0, %o1, %o2)
+SIGN1(sys32_getpeername, sys_getpeername, %o0)
+SIGN1(sys32_getsockname, sys_getsockname, %o0)
+
+	.globl		sys32_mmap2
+sys32_mmap2:
+	sethi		%hi(sys_mmap), %g1
+	jmpl		%g1 + %lo(sys_mmap), %g0
+	 sllx		%o5, 12, %o5
+
+	.align		32
+	.globl		sys32_socketcall
+sys32_socketcall:	/* %o0=call, %o1=args */
+	cmp		%o0, 1
+	bl,pn		%xcc, do_einval
+	 cmp		%o0, 17
+	bg,pn		%xcc, do_einval
+	 sub		%o0, 1, %o0
+	sllx		%o0, 5, %o0
+	sethi		%hi(__socketcall_table_begin), %g2
+	or		%g2, %lo(__socketcall_table_begin), %g2
+	jmpl		%g2 + %o0, %g0
+	 nop
+
+	/* Each entry is exactly 32 bytes. */
+	.align		32
+__socketcall_table_begin:
+do_sys_socket: /* sys_socket(int, int, int) */
+	ldswa		[%o1 + 0x0] %asi, %o0
+	sethi		%hi(sys_socket), %g1
+	ldswa		[%o1 + 0x8] %asi, %o2
+	jmpl		%g1 + %lo(sys_socket), %g0
+	 ldswa		[%o1 + 0x4] %asi, %o1
+	nop
+	nop
+	nop
+do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */
+	ldswa		[%o1 + 0x0] %asi, %o0
+	sethi		%hi(sys_bind), %g1
+	ldswa		[%o1 + 0x8] %asi, %o2
+	jmpl		%g1 + %lo(sys_bind), %g0
+	 lduwa		[%o1 + 0x4] %asi, %o1
+	nop
+	nop
+	nop
+do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */
+	ldswa		[%o1 + 0x0] %asi, %o0
+	sethi		%hi(sys_connect), %g1
+	ldswa		[%o1 + 0x8] %asi, %o2
+	jmpl		%g1 + %lo(sys_connect), %g0
+	 lduwa		[%o1 + 0x4] %asi, %o1
+	nop
+	nop
+	nop
+do_sys_listen: /* sys_listen(int, int) */
+	ldswa		[%o1 + 0x0] %asi, %o0
+	sethi		%hi(sys_listen), %g1
+	jmpl		%g1 + %lo(sys_listen), %g0
+	 ldswa		[%o1 + 0x4] %asi, %o1
+	nop
+	nop
+	nop
+	nop
+do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */
+	ldswa		[%o1 + 0x0] %asi, %o0
+	sethi		%hi(sys_accept), %g1
+	lduwa		[%o1 + 0x8] %asi, %o2
+	jmpl		%g1 + %lo(sys_accept), %g0
+	 lduwa		[%o1 + 0x4] %asi, %o1
+	nop
+	nop
+	nop
+do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */
+	ldswa		[%o1 + 0x0] %asi, %o0
+	sethi		%hi(sys_getsockname), %g1
+	lduwa		[%o1 + 0x8] %asi, %o2
+	jmpl		%g1 + %lo(sys_getsockname), %g0
+	 lduwa		[%o1 + 0x4] %asi, %o1
+	nop
+	nop
+	nop
+do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */
+	ldswa		[%o1 + 0x0] %asi, %o0
+	sethi		%hi(sys_getpeername), %g1
+	lduwa		[%o1 + 0x8] %asi, %o2
+	jmpl		%g1 + %lo(sys_getpeername), %g0
+	 lduwa		[%o1 + 0x4] %asi, %o1
+	nop
+	nop
+	nop
+do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */
+	ldswa		[%o1 + 0x0] %asi, %o0
+	sethi		%hi(sys_socketpair), %g1
+	ldswa		[%o1 + 0x8] %asi, %o2
+	lduwa		[%o1 + 0xc] %asi, %o3
+	jmpl		%g1 + %lo(sys_socketpair), %g0
+	 ldswa		[%o1 + 0x4] %asi, %o1
+	nop
+	nop
+do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */
+	ldswa		[%o1 + 0x0] %asi, %o0
+	sethi		%hi(sys_send), %g1
+	lduwa		[%o1 + 0x8] %asi, %o2
+	lduwa		[%o1 + 0xc] %asi, %o3
+	jmpl		%g1 + %lo(sys_send), %g0
+	 lduwa		[%o1 + 0x4] %asi, %o1
+	nop
+	nop
+do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */
+	ldswa		[%o1 + 0x0] %asi, %o0
+	sethi		%hi(sys_recv), %g1
+	lduwa		[%o1 + 0x8] %asi, %o2
+	lduwa		[%o1 + 0xc] %asi, %o3
+	jmpl		%g1 + %lo(sys_recv), %g0
+	 lduwa		[%o1 + 0x4] %asi, %o1
+	nop
+	nop
+do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */
+	ldswa		[%o1 + 0x0] %asi, %o0
+	sethi		%hi(sys_sendto), %g1
+	lduwa		[%o1 + 0x8] %asi, %o2
+	lduwa		[%o1 + 0xc] %asi, %o3
+	lduwa		[%o1 + 0x10] %asi, %o4
+	ldswa		[%o1 + 0x14] %asi, %o5
+	jmpl		%g1 + %lo(sys_sendto), %g0
+	 lduwa		[%o1 + 0x4] %asi, %o1
+do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */
+	ldswa		[%o1 + 0x0] %asi, %o0
+	sethi		%hi(sys_recvfrom), %g1
+	lduwa		[%o1 + 0x8] %asi, %o2
+	lduwa		[%o1 + 0xc] %asi, %o3
+	lduwa		[%o1 + 0x10] %asi, %o4
+	lduwa		[%o1 + 0x14] %asi, %o5
+	jmpl		%g1 + %lo(sys_recvfrom), %g0
+	 lduwa		[%o1 + 0x4] %asi, %o1
+do_sys_shutdown: /* sys_shutdown(int, int) */
+	ldswa		[%o1 + 0x0] %asi, %o0
+	sethi		%hi(sys_shutdown), %g1
+	jmpl		%g1 + %lo(sys_shutdown), %g0
+	 ldswa		[%o1 + 0x4] %asi, %o1
+	nop
+	nop
+	nop
+	nop
+do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */
+	ldswa		[%o1 + 0x0] %asi, %o0
+	sethi		%hi(compat_sys_setsockopt), %g1
+	ldswa		[%o1 + 0x8] %asi, %o2
+	lduwa		[%o1 + 0xc] %asi, %o3
+	ldswa		[%o1 + 0x10] %asi, %o4
+	jmpl		%g1 + %lo(compat_sys_setsockopt), %g0
+	 ldswa		[%o1 + 0x4] %asi, %o1
+	nop
+do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */
+	ldswa		[%o1 + 0x0] %asi, %o0
+	sethi		%hi(compat_sys_getsockopt), %g1
+	ldswa		[%o1 + 0x8] %asi, %o2
+	lduwa		[%o1 + 0xc] %asi, %o3
+	lduwa		[%o1 + 0x10] %asi, %o4
+	jmpl		%g1 + %lo(compat_sys_getsockopt), %g0
+	 ldswa		[%o1 + 0x4] %asi, %o1
+	nop
+do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */
+	ldswa		[%o1 + 0x0] %asi, %o0
+	sethi		%hi(compat_sys_sendmsg), %g1
+	lduwa		[%o1 + 0x8] %asi, %o2
+	jmpl		%g1 + %lo(compat_sys_sendmsg), %g0
+	 lduwa		[%o1 + 0x4] %asi, %o1
+	nop
+	nop
+	nop
+do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */
+	ldswa		[%o1 + 0x0] %asi, %o0
+	sethi		%hi(compat_sys_recvmsg), %g1
+	lduwa		[%o1 + 0x8] %asi, %o2
+	jmpl		%g1 + %lo(compat_sys_recvmsg), %g0
+	 lduwa		[%o1 + 0x4] %asi, %o1
+	nop
+	nop
+	nop
+__socketcall_table_end:
+
+do_einval:
+	retl
+	 mov		-EINVAL, %o0
+do_efault:
+	retl
+	 mov		-EFAULT, %o0
+
+	.section	__ex_table
+	.align		4
+	.word		__socketcall_table_begin, 0, __socketcall_table_end, do_efault
+	.previous
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
new file mode 100644
index 0000000..0077f02
--- /dev/null
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -0,0 +1,723 @@
+/* $Id: sys_sparc.c,v 1.57 2002/02/09 19:49:30 davem Exp $
+ * linux/arch/sparc64/kernel/sys_sparc.c
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/sparc
+ * platform.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/mman.h>
+#include <linux/utsname.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/ipc.h>
+#include <linux/personality.h>
+
+#include <asm/uaccess.h>
+#include <asm/ipc.h>
+#include <asm/utrap.h>
+#include <asm/perfctr.h>
+
+/* #define DEBUG_UNIMP_SYSCALL */
+
+/* XXX Make this per-binary type, this way we can detect the type of
+ * XXX a binary.  Every Sparc executable calls this very early on.
+ */
+asmlinkage unsigned long sys_getpagesize(void)
+{
+	return PAGE_SIZE;
+}
+
+#define COLOUR_ALIGN(addr,pgoff)		\
+	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
+	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct * vma;
+	unsigned long task_size = TASK_SIZE;
+	unsigned long start_addr;
+	int do_color_align;
+
+	if (flags & MAP_FIXED) {
+		/* We do not accept a shared mapping if it would violate
+		 * cache aliasing constraints.
+		 */
+		if ((flags & MAP_SHARED) &&
+		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+			return -EINVAL;
+		return addr;
+	}
+
+	if (test_thread_flag(TIF_32BIT))
+		task_size = 0xf0000000UL;
+	if (len > task_size || len > -PAGE_OFFSET)
+		return -ENOMEM;
+
+	do_color_align = 0;
+	if (filp || (flags & MAP_SHARED))
+		do_color_align = 1;
+
+	if (addr) {
+		if (do_color_align)
+			addr = COLOUR_ALIGN(addr, pgoff);
+		else
+			addr = PAGE_ALIGN(addr);
+
+		vma = find_vma(mm, addr);
+		if (task_size - len >= addr &&
+		    (!vma || addr + len <= vma->vm_start))
+			return addr;
+	}
+
+	start_addr = addr = mm->free_area_cache;
+
+	task_size -= len;
+
+full_search:
+	if (do_color_align)
+		addr = COLOUR_ALIGN(addr, pgoff);
+	else
+		addr = PAGE_ALIGN(addr);
+
+	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+		/* At this point:  (!vma || addr < vma->vm_end). */
+		if (addr < PAGE_OFFSET && -PAGE_OFFSET - len < addr) {
+			addr = PAGE_OFFSET;
+			vma = find_vma(mm, PAGE_OFFSET);
+		}
+		if (task_size < addr) {
+			if (start_addr != TASK_UNMAPPED_BASE) {
+				start_addr = addr = TASK_UNMAPPED_BASE;
+				goto full_search;
+			}
+			return -ENOMEM;
+		}
+		if (!vma || addr + len <= vma->vm_start) {
+			/*
+			 * Remember the place where we stopped the search:
+			 */
+			mm->free_area_cache = addr + len;
+			return addr;
+		}
+		addr = vma->vm_end;
+		if (do_color_align)
+			addr = COLOUR_ALIGN(addr, pgoff);
+	}
+}
+
+/* Try to align mapping such that we align it as much as possible. */
+unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+	unsigned long align_goal, addr = -ENOMEM;
+
+	if (flags & MAP_FIXED) {
+		/* Ok, don't mess with it. */
+		return get_unmapped_area(NULL, addr, len, pgoff, flags);
+	}
+	flags &= ~MAP_SHARED;
+
+	align_goal = PAGE_SIZE;
+	if (len >= (4UL * 1024 * 1024))
+		align_goal = (4UL * 1024 * 1024);
+	else if (len >= (512UL * 1024))
+		align_goal = (512UL * 1024);
+	else if (len >= (64UL * 1024))
+		align_goal = (64UL * 1024);
+
+	do {
+		addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
+		if (!(addr & ~PAGE_MASK)) {
+			addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
+			break;
+		}
+
+		if (align_goal == (4UL * 1024 * 1024))
+			align_goal = (512UL * 1024);
+		else if (align_goal == (512UL * 1024))
+			align_goal = (64UL * 1024);
+		else
+			align_goal = PAGE_SIZE;
+	} while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
+
+	/* Mapping is smaller than 64K or larger areas could not
+	 * be obtained.
+	 */
+	if (addr & ~PAGE_MASK)
+		addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
+
+	return addr;
+}
+
+asmlinkage unsigned long sparc_brk(unsigned long brk)
+{
+	/* People could try to be nasty and use ta 0x6d in 32bit programs */
+	if (test_thread_flag(TIF_32BIT) &&
+	    brk >= 0xf0000000UL)
+		return current->mm->brk;
+
+	if ((current->mm->brk & PAGE_OFFSET) != (brk & PAGE_OFFSET))
+		return current->mm->brk;
+	return sys_brk(brk);
+}
+                                                                
+/*
+ * sys_pipe() is the normal C calling standard for creating
+ * a pipe. It's not the way unix traditionally does this, though.
+ */
+asmlinkage long sparc_pipe(struct pt_regs *regs)
+{
+	int fd[2];
+	int error;
+
+	error = do_pipe(fd);
+	if (error)
+		goto out;
+	regs->u_regs[UREG_I1] = fd[1];
+	error = fd[0];
+out:
+	return error;
+}
+
+/*
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+ *
+ * This is really horribly ugly.
+ */
+
+asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
+			unsigned long third, void __user *ptr, long fifth)
+{
+	int err;
+
+	/* No need for backward compatibility. We can start fresh... */
+	if (call <= SEMCTL) {
+		switch (call) {
+		case SEMOP:
+			err = sys_semtimedop(first, ptr,
+					     (unsigned)second, NULL);
+			goto out;
+		case SEMTIMEDOP:
+			err = sys_semtimedop(first, ptr, (unsigned)second,
+				(const struct timespec __user *) fifth);
+			goto out;
+		case SEMGET:
+			err = sys_semget(first, (int)second, (int)third);
+			goto out;
+		case SEMCTL: {
+			union semun fourth;
+			err = -EINVAL;
+			if (!ptr)
+				goto out;
+			err = -EFAULT;
+			if (get_user(fourth.__pad,
+				     (void __user * __user *) ptr))
+				goto out;
+			err = sys_semctl(first, (int)second | IPC_64,
+					 (int)third, fourth);
+			goto out;
+		}
+		default:
+			err = -ENOSYS;
+			goto out;
+		};
+	}
+	if (call <= MSGCTL) {
+		switch (call) {
+		case MSGSND:
+			err = sys_msgsnd(first, ptr, (size_t)second,
+					 (int)third);
+			goto out;
+		case MSGRCV:
+			err = sys_msgrcv(first, ptr, (size_t)second, fifth,
+					 (int)third);
+			goto out;
+		case MSGGET:
+			err = sys_msgget((key_t)first, (int)second);
+			goto out;
+		case MSGCTL:
+			err = sys_msgctl(first, (int)second | IPC_64, ptr);
+			goto out;
+		default:
+			err = -ENOSYS;
+			goto out;
+		};
+	}
+	if (call <= SHMCTL) {
+		switch (call) {
+		case SHMAT: {
+			ulong raddr;
+			err = do_shmat(first, ptr, (int)second, &raddr);
+			if (!err) {
+				if (put_user(raddr,
+					     (ulong __user *) third))
+					err = -EFAULT;
+			}
+			goto out;
+		}
+		case SHMDT:
+			err = sys_shmdt(ptr);
+			goto out;
+		case SHMGET:
+			err = sys_shmget(first, (size_t)second, (int)third);
+			goto out;
+		case SHMCTL:
+			err = sys_shmctl(first, (int)second | IPC_64, ptr);
+			goto out;
+		default:
+			err = -ENOSYS;
+			goto out;
+		};
+	} else {
+		err = -ENOSYS;
+	}
+out:
+	return err;
+}
+
+asmlinkage long sparc64_newuname(struct new_utsname __user *name)
+{
+	int ret = sys_newuname(name);
+	
+	if (current->personality == PER_LINUX32 && !ret) {
+		ret = (copy_to_user(name->machine, "sparc\0\0", 8)
+		       ? -EFAULT : 0);
+	}
+	return ret;
+}
+
+asmlinkage long sparc64_personality(unsigned long personality)
+{
+	int ret;
+
+	if (current->personality == PER_LINUX32 &&
+	    personality == PER_LINUX)
+		personality = PER_LINUX32;
+	ret = sys_personality(personality);
+	if (ret == PER_LINUX32)
+		ret = PER_LINUX;
+
+	return ret;
+}
+
+/* Linux version of mmap */
+asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
+	unsigned long prot, unsigned long flags, unsigned long fd,
+	unsigned long off)
+{
+	struct file * file = NULL;
+	unsigned long retval = -EBADF;
+
+	if (!(flags & MAP_ANONYMOUS)) {
+		file = fget(fd);
+		if (!file)
+			goto out;
+	}
+	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+	len = PAGE_ALIGN(len);
+	retval = -EINVAL;
+
+	if (test_thread_flag(TIF_32BIT)) {
+		if (len > 0xf0000000UL ||
+		    ((flags & MAP_FIXED) && addr > 0xf0000000UL - len))
+			goto out_putf;
+	} else {
+		if (len > -PAGE_OFFSET ||
+		    ((flags & MAP_FIXED) &&
+		     addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET))
+			goto out_putf;
+	}
+
+	down_write(&current->mm->mmap_sem);
+	retval = do_mmap(file, addr, len, prot, flags, off);
+	up_write(&current->mm->mmap_sem);
+
+out_putf:
+	if (file)
+		fput(file);
+out:
+	return retval;
+}
+
+asmlinkage long sys64_munmap(unsigned long addr, size_t len)
+{
+	long ret;
+
+	if (len > -PAGE_OFFSET ||
+	    (addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET))
+		return -EINVAL;
+	down_write(&current->mm->mmap_sem);
+	ret = do_munmap(current->mm, addr, len);
+	up_write(&current->mm->mmap_sem);
+	return ret;
+}
+
+extern unsigned long do_mremap(unsigned long addr,
+	unsigned long old_len, unsigned long new_len,
+	unsigned long flags, unsigned long new_addr);
+                
+asmlinkage unsigned long sys64_mremap(unsigned long addr,
+	unsigned long old_len, unsigned long new_len,
+	unsigned long flags, unsigned long new_addr)
+{
+	struct vm_area_struct *vma;
+	unsigned long ret = -EINVAL;
+	if (test_thread_flag(TIF_32BIT))
+		goto out;
+	if (old_len > -PAGE_OFFSET || new_len > -PAGE_OFFSET)
+		goto out;
+	if (addr < PAGE_OFFSET && addr + old_len > -PAGE_OFFSET)
+		goto out;
+	down_write(&current->mm->mmap_sem);
+	if (flags & MREMAP_FIXED) {
+		if (new_addr < PAGE_OFFSET &&
+		    new_addr + new_len > -PAGE_OFFSET)
+			goto out_sem;
+	} else if (addr < PAGE_OFFSET && addr + new_len > -PAGE_OFFSET) {
+		unsigned long map_flags = 0;
+		struct file *file = NULL;
+
+		ret = -ENOMEM;
+		if (!(flags & MREMAP_MAYMOVE))
+			goto out_sem;
+
+		vma = find_vma(current->mm, addr);
+		if (vma) {
+			if (vma->vm_flags & VM_SHARED)
+				map_flags |= MAP_SHARED;
+			file = vma->vm_file;
+		}
+
+		/* MREMAP_FIXED checked above. */
+		new_addr = get_unmapped_area(file, addr, new_len,
+				    vma ? vma->vm_pgoff : 0,
+				    map_flags);
+		ret = new_addr;
+		if (new_addr & ~PAGE_MASK)
+			goto out_sem;
+		flags |= MREMAP_FIXED;
+	}
+	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
+out_sem:
+	up_write(&current->mm->mmap_sem);
+out:
+	return ret;       
+}
+
+/* we come to here via sys_nis_syscall so it can setup the regs argument */
+asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
+{
+	static int count;
+	
+	/* Don't make the system unusable, if someone goes stuck */
+	if (count++ > 5)
+		return -ENOSYS;
+
+	printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
+#ifdef DEBUG_UNIMP_SYSCALL	
+	show_regs (regs);
+#endif
+
+	return -ENOSYS;
+}
+
+/* #define DEBUG_SPARC_BREAKPOINT */
+
+asmlinkage void sparc_breakpoint(struct pt_regs *regs)
+{
+	siginfo_t info;
+
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+#ifdef DEBUG_SPARC_BREAKPOINT
+        printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
+#endif
+	info.si_signo = SIGTRAP;
+	info.si_errno = 0;
+	info.si_code = TRAP_BRKPT;
+	info.si_addr = (void __user *)regs->tpc;
+	info.si_trapno = 0;
+	force_sig_info(SIGTRAP, &info, current);
+#ifdef DEBUG_SPARC_BREAKPOINT
+	printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
+#endif
+}
+
+extern void check_pending(int signum);
+
+asmlinkage long sys_getdomainname(char __user *name, int len)
+{
+        int nlen;
+	int err = -EFAULT;
+
+ 	down_read(&uts_sem);
+ 	
+	nlen = strlen(system_utsname.domainname) + 1;
+
+        if (nlen < len)
+                len = nlen;
+	if (len > __NEW_UTS_LEN)
+		goto done;
+	if (copy_to_user(name, system_utsname.domainname, len))
+		goto done;
+	err = 0;
+done:
+	up_read(&uts_sem);
+	return err;
+}
+
+asmlinkage long solaris_syscall(struct pt_regs *regs)
+{
+	static int count;
+
+	regs->tpc = regs->tnpc;
+	regs->tnpc += 4;
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+	if (++count <= 5) {
+		printk ("For Solaris binary emulation you need solaris module loaded\n");
+		show_regs (regs);
+	}
+	send_sig(SIGSEGV, current, 1);
+
+	return -ENOSYS;
+}
+
+#ifndef CONFIG_SUNOS_EMUL
+asmlinkage long sunos_syscall(struct pt_regs *regs)
+{
+	static int count;
+
+	regs->tpc = regs->tnpc;
+	regs->tnpc += 4;
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+	if (++count <= 20)
+		printk ("SunOS binary emulation not compiled in\n");
+	force_sig(SIGSEGV, current);
+
+	return -ENOSYS;
+}
+#endif
+
+asmlinkage long sys_utrap_install(utrap_entry_t type,
+				  utrap_handler_t new_p,
+				  utrap_handler_t new_d,
+				  utrap_handler_t __user *old_p,
+				  utrap_handler_t __user *old_d)
+{
+	if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
+		return -EINVAL;
+	if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
+		if (old_p) {
+			if (!current_thread_info()->utraps) {
+				if (put_user(NULL, old_p))
+					return -EFAULT;
+			} else {
+				if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
+					return -EFAULT;
+			}
+		}
+		if (old_d) {
+			if (put_user(NULL, old_d))
+				return -EFAULT;
+		}
+		return 0;
+	}
+	if (!current_thread_info()->utraps) {
+		current_thread_info()->utraps =
+			kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
+		if (!current_thread_info()->utraps)
+			return -ENOMEM;
+		current_thread_info()->utraps[0] = 1;
+		memset(current_thread_info()->utraps+1, 0,
+		       UT_TRAP_INSTRUCTION_31*sizeof(long));
+	} else {
+		if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
+		    current_thread_info()->utraps[0] > 1) {
+			long *p = current_thread_info()->utraps;
+
+			current_thread_info()->utraps =
+				kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
+					GFP_KERNEL);
+			if (!current_thread_info()->utraps) {
+				current_thread_info()->utraps = p;
+				return -ENOMEM;
+			}
+			p[0]--;
+			current_thread_info()->utraps[0] = 1;
+			memcpy(current_thread_info()->utraps+1, p+1,
+			       UT_TRAP_INSTRUCTION_31*sizeof(long));
+		}
+	}
+	if (old_p) {
+		if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
+			return -EFAULT;
+	}
+	if (old_d) {
+		if (put_user(NULL, old_d))
+			return -EFAULT;
+	}
+	current_thread_info()->utraps[type] = (long)new_p;
+
+	return 0;
+}
+
+long sparc_memory_ordering(unsigned long model, struct pt_regs *regs)
+{
+	if (model >= 3)
+		return -EINVAL;
+	regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
+	return 0;
+}
+
+asmlinkage long sys_rt_sigaction(int sig,
+				 const struct sigaction __user *act,
+				 struct sigaction __user *oact,
+				 void __user *restorer,
+				 size_t sigsetsize)
+{
+	struct k_sigaction new_ka, old_ka;
+	int ret;
+
+	/* XXX: Don't preclude handling different sized sigset_t's.  */
+	if (sigsetsize != sizeof(sigset_t))
+		return -EINVAL;
+
+	if (act) {
+		new_ka.ka_restorer = restorer;
+		if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
+			return -EFAULT;
+	}
+
+	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+	if (!ret && oact) {
+		if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
+			return -EFAULT;
+	}
+
+	return ret;
+}
+
+/* Invoked by rtrap code to update performance counters in
+ * user space.
+ */
+asmlinkage void update_perfctrs(void)
+{
+	unsigned long pic, tmp;
+
+	read_pic(pic);
+	tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
+	__put_user(tmp, current_thread_info()->user_cntd0);
+	tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
+	__put_user(tmp, current_thread_info()->user_cntd1);
+	reset_pic();
+}
+
+asmlinkage long sys_perfctr(int opcode, unsigned long arg0, unsigned long arg1, unsigned long arg2)
+{
+	int err = 0;
+
+	switch(opcode) {
+	case PERFCTR_ON:
+		current_thread_info()->pcr_reg = arg2;
+		current_thread_info()->user_cntd0 = (u64 __user *) arg0;
+		current_thread_info()->user_cntd1 = (u64 __user *) arg1;
+		current_thread_info()->kernel_cntd0 =
+			current_thread_info()->kernel_cntd1 = 0;
+		write_pcr(arg2);
+		reset_pic();
+		set_thread_flag(TIF_PERFCTR);
+		break;
+
+	case PERFCTR_OFF:
+		err = -EINVAL;
+		if (test_thread_flag(TIF_PERFCTR)) {
+			current_thread_info()->user_cntd0 =
+				current_thread_info()->user_cntd1 = NULL;
+			current_thread_info()->pcr_reg = 0;
+			write_pcr(0);
+			clear_thread_flag(TIF_PERFCTR);
+			err = 0;
+		}
+		break;
+
+	case PERFCTR_READ: {
+		unsigned long pic, tmp;
+
+		if (!test_thread_flag(TIF_PERFCTR)) {
+			err = -EINVAL;
+			break;
+		}
+		read_pic(pic);
+		tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
+		err |= __put_user(tmp, current_thread_info()->user_cntd0);
+		tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
+		err |= __put_user(tmp, current_thread_info()->user_cntd1);
+		reset_pic();
+		break;
+	}
+
+	case PERFCTR_CLRPIC:
+		if (!test_thread_flag(TIF_PERFCTR)) {
+			err = -EINVAL;
+			break;
+		}
+		current_thread_info()->kernel_cntd0 =
+			current_thread_info()->kernel_cntd1 = 0;
+		reset_pic();
+		break;
+
+	case PERFCTR_SETPCR: {
+		u64 __user *user_pcr = (u64 __user *)arg0;
+
+		if (!test_thread_flag(TIF_PERFCTR)) {
+			err = -EINVAL;
+			break;
+		}
+		err |= __get_user(current_thread_info()->pcr_reg, user_pcr);
+		write_pcr(current_thread_info()->pcr_reg);
+		current_thread_info()->kernel_cntd0 =
+			current_thread_info()->kernel_cntd1 = 0;
+		reset_pic();
+		break;
+	}
+
+	case PERFCTR_GETPCR: {
+		u64 __user *user_pcr = (u64 __user *)arg0;
+
+		if (!test_thread_flag(TIF_PERFCTR)) {
+			err = -EINVAL;
+			break;
+		}
+		err |= __put_user(current_thread_info()->pcr_reg, user_pcr);
+		break;
+	}
+
+	default:
+		err = -EINVAL;
+		break;
+	};
+	return err;
+}
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
new file mode 100644
index 0000000..567c91c
--- /dev/null
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -0,0 +1,1118 @@
+/* $Id: sys_sparc32.c,v 1.184 2002/02/09 19:49:31 davem Exp $
+ * sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
+ *
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ *
+ * These routines maintain argument size conversion between 32bit and 64bit
+ * environment.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h> 
+#include <linux/mm.h> 
+#include <linux/file.h> 
+#include <linux/signal.h>
+#include <linux/resource.h>
+#include <linux/times.h>
+#include <linux/utsname.h>
+#include <linux/timex.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/nfs_fs.h>
+#include <linux/quota.h>
+#include <linux/module.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/cache.h>
+#include <linux/nfsd/xdr.h>
+#include <linux/nfsd/syscall.h>
+#include <linux/poll.h>
+#include <linux/personality.h>
+#include <linux/stat.h>
+#include <linux/filter.h>
+#include <linux/highmem.h>
+#include <linux/highuid.h>
+#include <linux/mman.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/icmpv6.h>
+#include <linux/syscalls.h>
+#include <linux/sysctl.h>
+#include <linux/binfmts.h>
+#include <linux/dnotify.h>
+#include <linux/security.h>
+#include <linux/compat.h>
+#include <linux/vfs.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/ptrace.h>
+#include <linux/highuid.h>
+
+#include <asm/types.h>
+#include <asm/ipc.h>
+#include <asm/uaccess.h>
+#include <asm/fpumacro.h>
+#include <asm/semaphore.h>
+#include <asm/mmu_context.h>
+
+asmlinkage long sys32_chown16(const char __user * filename, u16 user, u16 group)
+{
+	return sys_chown(filename, low2highuid(user), low2highgid(group));
+}
+
+asmlinkage long sys32_lchown16(const char __user * filename, u16 user, u16 group)
+{
+	return sys_lchown(filename, low2highuid(user), low2highgid(group));
+}
+
+asmlinkage long sys32_fchown16(unsigned int fd, u16 user, u16 group)
+{
+	return sys_fchown(fd, low2highuid(user), low2highgid(group));
+}
+
+asmlinkage long sys32_setregid16(u16 rgid, u16 egid)
+{
+	return sys_setregid(low2highgid(rgid), low2highgid(egid));
+}
+
+asmlinkage long sys32_setgid16(u16 gid)
+{
+	return sys_setgid((gid_t)gid);
+}
+
+asmlinkage long sys32_setreuid16(u16 ruid, u16 euid)
+{
+	return sys_setreuid(low2highuid(ruid), low2highuid(euid));
+}
+
+asmlinkage long sys32_setuid16(u16 uid)
+{
+	return sys_setuid((uid_t)uid);
+}
+
+asmlinkage long sys32_setresuid16(u16 ruid, u16 euid, u16 suid)
+{
+	return sys_setresuid(low2highuid(ruid), low2highuid(euid),
+		low2highuid(suid));
+}
+
+asmlinkage long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid)
+{
+	int retval;
+
+	if (!(retval = put_user(high2lowuid(current->uid), ruid)) &&
+	    !(retval = put_user(high2lowuid(current->euid), euid)))
+		retval = put_user(high2lowuid(current->suid), suid);
+
+	return retval;
+}
+
+asmlinkage long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid)
+{
+	return sys_setresgid(low2highgid(rgid), low2highgid(egid),
+		low2highgid(sgid));
+}
+
+asmlinkage long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid)
+{
+	int retval;
+
+	if (!(retval = put_user(high2lowgid(current->gid), rgid)) &&
+	    !(retval = put_user(high2lowgid(current->egid), egid)))
+		retval = put_user(high2lowgid(current->sgid), sgid);
+
+	return retval;
+}
+
+asmlinkage long sys32_setfsuid16(u16 uid)
+{
+	return sys_setfsuid((uid_t)uid);
+}
+
+asmlinkage long sys32_setfsgid16(u16 gid)
+{
+	return sys_setfsgid((gid_t)gid);
+}
+
+static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info)
+{
+	int i;
+	u16 group;
+
+	for (i = 0; i < group_info->ngroups; i++) {
+		group = (u16)GROUP_AT(group_info, i);
+		if (put_user(group, grouplist+i))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int groups16_from_user(struct group_info *group_info, u16 __user *grouplist)
+{
+	int i;
+	u16 group;
+
+	for (i = 0; i < group_info->ngroups; i++) {
+		if (get_user(group, grouplist+i))
+			return  -EFAULT;
+		GROUP_AT(group_info, i) = (gid_t)group;
+	}
+
+	return 0;
+}
+
+asmlinkage long sys32_getgroups16(int gidsetsize, u16 __user *grouplist)
+{
+	int i;
+
+	if (gidsetsize < 0)
+		return -EINVAL;
+
+	get_group_info(current->group_info);
+	i = current->group_info->ngroups;
+	if (gidsetsize) {
+		if (i > gidsetsize) {
+			i = -EINVAL;
+			goto out;
+		}
+		if (groups16_to_user(grouplist, current->group_info)) {
+			i = -EFAULT;
+			goto out;
+		}
+	}
+out:
+	put_group_info(current->group_info);
+	return i;
+}
+
+asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist)
+{
+	struct group_info *group_info;
+	int retval;
+
+	if (!capable(CAP_SETGID))
+		return -EPERM;
+	if ((unsigned)gidsetsize > NGROUPS_MAX)
+		return -EINVAL;
+
+	group_info = groups_alloc(gidsetsize);
+	if (!group_info)
+		return -ENOMEM;
+	retval = groups16_from_user(group_info, grouplist);
+	if (retval) {
+		put_group_info(group_info);
+		return retval;
+	}
+
+	retval = set_current_groups(group_info);
+	put_group_info(group_info);
+
+	return retval;
+}
+
+asmlinkage long sys32_getuid16(void)
+{
+	return high2lowuid(current->uid);
+}
+
+asmlinkage long sys32_geteuid16(void)
+{
+	return high2lowuid(current->euid);
+}
+
+asmlinkage long sys32_getgid16(void)
+{
+	return high2lowgid(current->gid);
+}
+
+asmlinkage long sys32_getegid16(void)
+{
+	return high2lowgid(current->egid);
+}
+
+/* 32-bit timeval and related flotsam.  */
+
+static long get_tv32(struct timeval *o, struct compat_timeval __user *i)
+{
+	return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
+		(__get_user(o->tv_sec, &i->tv_sec) |
+		 __get_user(o->tv_usec, &i->tv_usec)));
+}
+
+static inline long put_tv32(struct compat_timeval __user *o, struct timeval *i)
+{
+	return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
+		(__put_user(i->tv_sec, &o->tv_sec) |
+		 __put_user(i->tv_usec, &o->tv_usec)));
+}
+
+#ifdef CONFIG_SYSVIPC                                                        
+asmlinkage long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr, u32 fifth)
+{
+	int version;
+
+	version = call >> 16; /* hack for backward compatibility */
+	call &= 0xffff;
+
+	switch (call) {
+	case SEMTIMEDOP:
+		if (fifth)
+			/* sign extend semid */
+			return compat_sys_semtimedop((int)first,
+						     compat_ptr(ptr), second,
+						     compat_ptr(fifth));
+		/* else fall through for normal semop() */
+	case SEMOP:
+		/* struct sembuf is the same on 32 and 64bit :)) */
+		/* sign extend semid */
+		return sys_semtimedop((int)first, compat_ptr(ptr), second,
+				      NULL);
+	case SEMGET:
+		/* sign extend key, nsems */
+		return sys_semget((int)first, (int)second, third);
+	case SEMCTL:
+		/* sign extend semid, semnum */
+		return compat_sys_semctl((int)first, (int)second, third,
+					 compat_ptr(ptr));
+
+	case MSGSND:
+		/* sign extend msqid */
+		return compat_sys_msgsnd((int)first, (int)second, third,
+					 compat_ptr(ptr));
+	case MSGRCV:
+		/* sign extend msqid, msgtyp */
+		return compat_sys_msgrcv((int)first, second, (int)fifth,
+					 third, version, compat_ptr(ptr));
+	case MSGGET:
+		/* sign extend key */
+		return sys_msgget((int)first, second);
+	case MSGCTL:
+		/* sign extend msqid */
+		return compat_sys_msgctl((int)first, second, compat_ptr(ptr));
+
+	case SHMAT:
+		/* sign extend shmid */
+		return compat_sys_shmat((int)first, second, third, version,
+					compat_ptr(ptr));
+	case SHMDT:
+		return sys_shmdt(compat_ptr(ptr));
+	case SHMGET:
+		/* sign extend key_t */
+		return sys_shmget((int)first, second, third);
+	case SHMCTL:
+		/* sign extend shmid */
+		return compat_sys_shmctl((int)first, second, compat_ptr(ptr));
+
+	default:
+		return -ENOSYS;
+	};
+
+	return -ENOSYS;
+}
+#endif
+
+asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low)
+{
+	if ((int)high < 0)
+		return -EINVAL;
+	else
+		return sys_truncate(path, (high << 32) | low);
+}
+
+asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low)
+{
+	if ((int)high < 0)
+		return -EINVAL;
+	else
+		return sys_ftruncate(fd, (high << 32) | low);
+}
+
+int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
+{
+	int err;
+
+	if (stat->size > MAX_NON_LFS || !old_valid_dev(stat->dev) ||
+	    !old_valid_dev(stat->rdev))
+		return -EOVERFLOW;
+
+	err  = put_user(old_encode_dev(stat->dev), &statbuf->st_dev);
+	err |= put_user(stat->ino, &statbuf->st_ino);
+	err |= put_user(stat->mode, &statbuf->st_mode);
+	err |= put_user(stat->nlink, &statbuf->st_nlink);
+	err |= put_user(high2lowuid(stat->uid), &statbuf->st_uid);
+	err |= put_user(high2lowgid(stat->gid), &statbuf->st_gid);
+	err |= put_user(old_encode_dev(stat->rdev), &statbuf->st_rdev);
+	err |= put_user(stat->size, &statbuf->st_size);
+	err |= put_user(stat->atime.tv_sec, &statbuf->st_atime);
+	err |= put_user(0, &statbuf->__unused1);
+	err |= put_user(stat->mtime.tv_sec, &statbuf->st_mtime);
+	err |= put_user(0, &statbuf->__unused2);
+	err |= put_user(stat->ctime.tv_sec, &statbuf->st_ctime);
+	err |= put_user(0, &statbuf->__unused3);
+	err |= put_user(stat->blksize, &statbuf->st_blksize);
+	err |= put_user(stat->blocks, &statbuf->st_blocks);
+	err |= put_user(0, &statbuf->__unused4[0]);
+	err |= put_user(0, &statbuf->__unused4[1]);
+
+	return err;
+}
+
+asmlinkage long compat_sys_sysfs(int option, u32 arg1, u32 arg2)
+{
+	return sys_sysfs(option, arg1, arg2);
+}
+
+struct sysinfo32 {
+        s32 uptime;
+        u32 loads[3];
+        u32 totalram;
+        u32 freeram;
+        u32 sharedram;
+        u32 bufferram;
+        u32 totalswap;
+        u32 freeswap;
+        unsigned short procs;
+	unsigned short pad;
+	u32 totalhigh;
+	u32 freehigh;
+	u32 mem_unit;
+	char _f[20-2*sizeof(int)-sizeof(int)];
+};
+
+asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info)
+{
+	struct sysinfo s;
+	int ret, err;
+	int bitcount = 0;
+	mm_segment_t old_fs = get_fs ();
+	
+	set_fs(KERNEL_DS);
+	ret = sys_sysinfo((struct sysinfo __user *) &s);
+	set_fs(old_fs);
+	/* Check to see if any memory value is too large for 32-bit and
+         * scale down if needed.
+         */
+	if ((s.totalram >> 32) || (s.totalswap >> 32)) {
+		while (s.mem_unit < PAGE_SIZE) {
+			s.mem_unit <<= 1;
+			bitcount++;
+		}
+		s.totalram >>= bitcount;
+		s.freeram >>= bitcount;
+		s.sharedram >>= bitcount;
+		s.bufferram >>= bitcount;
+		s.totalswap >>= bitcount;
+		s.freeswap >>= bitcount;
+		s.totalhigh >>= bitcount;
+		s.freehigh >>= bitcount;
+	}
+
+	err = put_user (s.uptime, &info->uptime);
+	err |= __put_user (s.loads[0], &info->loads[0]);
+	err |= __put_user (s.loads[1], &info->loads[1]);
+	err |= __put_user (s.loads[2], &info->loads[2]);
+	err |= __put_user (s.totalram, &info->totalram);
+	err |= __put_user (s.freeram, &info->freeram);
+	err |= __put_user (s.sharedram, &info->sharedram);
+	err |= __put_user (s.bufferram, &info->bufferram);
+	err |= __put_user (s.totalswap, &info->totalswap);
+	err |= __put_user (s.freeswap, &info->freeswap);
+	err |= __put_user (s.procs, &info->procs);
+	err |= __put_user (s.totalhigh, &info->totalhigh);
+	err |= __put_user (s.freehigh, &info->freehigh);
+	err |= __put_user (s.mem_unit, &info->mem_unit);
+	if (err)
+		return -EFAULT;
+	return ret;
+}
+
+asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, struct compat_timespec __user *interval)
+{
+	struct timespec t;
+	int ret;
+	mm_segment_t old_fs = get_fs ();
+	
+	set_fs (KERNEL_DS);
+	ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t);
+	set_fs (old_fs);
+	if (put_compat_timespec(&t, interval))
+		return -EFAULT;
+	return ret;
+}
+
+asmlinkage long compat_sys_rt_sigprocmask(int how,
+					  compat_sigset_t __user *set,
+					  compat_sigset_t __user *oset,
+					  compat_size_t sigsetsize)
+{
+	sigset_t s;
+	compat_sigset_t s32;
+	int ret;
+	mm_segment_t old_fs = get_fs();
+	
+	if (set) {
+		if (copy_from_user (&s32, set, sizeof(compat_sigset_t)))
+			return -EFAULT;
+		switch (_NSIG_WORDS) {
+		case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
+		case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
+		case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
+		case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
+		}
+	}
+	set_fs (KERNEL_DS);
+	ret = sys_rt_sigprocmask(how,
+				 set ? (sigset_t __user *) &s : NULL,
+				 oset ? (sigset_t __user *) &s : NULL,
+				 sigsetsize);
+	set_fs (old_fs);
+	if (ret) return ret;
+	if (oset) {
+		switch (_NSIG_WORDS) {
+		case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
+		case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
+		case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
+		case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
+		}
+		if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
+				    compat_size_t sigsetsize)
+{
+	sigset_t s;
+	compat_sigset_t s32;
+	int ret;
+	mm_segment_t old_fs = get_fs();
+		
+	set_fs (KERNEL_DS);
+	ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
+	set_fs (old_fs);
+	if (!ret) {
+		switch (_NSIG_WORDS) {
+		case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
+		case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
+		case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
+		case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
+		}
+		if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
+			return -EFAULT;
+	}
+	return ret;
+}
+
+asmlinkage long compat_sys_rt_sigqueueinfo(int pid, int sig,
+					   struct compat_siginfo __user *uinfo)
+{
+	siginfo_t info;
+	int ret;
+	mm_segment_t old_fs = get_fs();
+	
+	if (copy_siginfo_from_user32(&info, uinfo))
+		return -EFAULT;
+
+	set_fs (KERNEL_DS);
+	ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info);
+	set_fs (old_fs);
+	return ret;
+}
+
+asmlinkage long compat_sys_sigaction(int sig, struct old_sigaction32 __user *act,
+				     struct old_sigaction32 __user *oact)
+{
+        struct k_sigaction new_ka, old_ka;
+        int ret;
+
+	if (sig < 0) {
+		set_thread_flag(TIF_NEWSIGNALS);
+		sig = -sig;
+	}
+
+        if (act) {
+		compat_old_sigset_t mask;
+		u32 u_handler, u_restorer;
+		
+		ret = get_user(u_handler, &act->sa_handler);
+		new_ka.sa.sa_handler =  compat_ptr(u_handler);
+		ret |= __get_user(u_restorer, &act->sa_restorer);
+		new_ka.sa.sa_restorer = compat_ptr(u_restorer);
+		ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+		ret |= __get_user(mask, &act->sa_mask);
+		if (ret)
+			return ret;
+		new_ka.ka_restorer = NULL;
+		siginitset(&new_ka.sa.sa_mask, mask);
+        }
+
+        ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+	if (!ret && oact) {
+		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler);
+		ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
+		ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+		ret |= __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+        }
+
+	return ret;
+}
+
+asmlinkage long compat_sys_rt_sigaction(int sig,
+					struct sigaction32 __user *act,
+					struct sigaction32 __user *oact,
+					void __user *restorer,
+					compat_size_t sigsetsize)
+{
+        struct k_sigaction new_ka, old_ka;
+        int ret;
+	compat_sigset_t set32;
+
+        /* XXX: Don't preclude handling different sized sigset_t's.  */
+        if (sigsetsize != sizeof(compat_sigset_t))
+                return -EINVAL;
+
+	/* All tasks which use RT signals (effectively) use
+	 * new style signals.
+	 */
+	set_thread_flag(TIF_NEWSIGNALS);
+
+        if (act) {
+		u32 u_handler, u_restorer;
+
+		new_ka.ka_restorer = restorer;
+		ret = get_user(u_handler, &act->sa_handler);
+		new_ka.sa.sa_handler =  compat_ptr(u_handler);
+		ret |= __copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t));
+		switch (_NSIG_WORDS) {
+		case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6] | (((long)set32.sig[7]) << 32);
+		case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4] | (((long)set32.sig[5]) << 32);
+		case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2] | (((long)set32.sig[3]) << 32);
+		case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0] | (((long)set32.sig[1]) << 32);
+		}
+		ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+		ret |= __get_user(u_restorer, &act->sa_restorer);
+		new_ka.sa.sa_restorer = compat_ptr(u_restorer);
+                if (ret)
+                	return -EFAULT;
+	}
+
+	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+	if (!ret && oact) {
+		switch (_NSIG_WORDS) {
+		case 4: set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32); set32.sig[6] = old_ka.sa.sa_mask.sig[3];
+		case 3: set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32); set32.sig[4] = old_ka.sa.sa_mask.sig[2];
+		case 2: set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32); set32.sig[2] = old_ka.sa.sa_mask.sig[1];
+		case 1: set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); set32.sig[0] = old_ka.sa.sa_mask.sig[0];
+		}
+		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler);
+		ret |= __copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t));
+		ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+		ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
+		if (ret)
+			ret = -EFAULT;
+        }
+
+        return ret;
+}
+
+/*
+ * sparc32_execve() executes a new program after the asm stub has set
+ * things up for us.  This should basically do what I want it to.
+ */
+asmlinkage long sparc32_execve(struct pt_regs *regs)
+{
+	int error, base = 0;
+	char *filename;
+
+	/* User register window flush is done by entry.S */
+
+	/* Check for indirect call. */
+	if ((u32)regs->u_regs[UREG_G1] == 0)
+		base = 1;
+
+	filename = getname(compat_ptr(regs->u_regs[base + UREG_I0]));
+	error = PTR_ERR(filename);
+	if (IS_ERR(filename))
+		goto out;
+
+	error = compat_do_execve(filename,
+				 compat_ptr(regs->u_regs[base + UREG_I1]),
+				 compat_ptr(regs->u_regs[base + UREG_I2]), regs);
+
+	putname(filename);
+
+	if (!error) {
+		fprs_write(0);
+		current_thread_info()->xfsr[0] = 0;
+		current_thread_info()->fpsaved[0] = 0;
+		regs->tstate &= ~TSTATE_PEF;
+		task_lock(current);
+		current->ptrace &= ~PT_DTRACE;
+		task_unlock(current);
+	}
+out:
+	return error;
+}
+
+#ifdef CONFIG_MODULES
+
+asmlinkage long sys32_init_module(void __user *umod, u32 len,
+				  const char __user *uargs)
+{
+	return sys_init_module(umod, len, uargs);
+}
+
+asmlinkage long sys32_delete_module(const char __user *name_user,
+				    unsigned int flags)
+{
+	return sys_delete_module(name_user, flags);
+}
+
+#else /* CONFIG_MODULES */
+
+asmlinkage long sys32_init_module(const char __user *name_user,
+				  struct module __user *mod_user)
+{
+	return -ENOSYS;
+}
+
+asmlinkage long sys32_delete_module(const char __user *name_user)
+{
+	return -ENOSYS;
+}
+
+#endif  /* CONFIG_MODULES */
+
+/* Translations due to time_t size differences.  Which affects all
+   sorts of things, like timeval and itimerval.  */
+
+extern struct timezone sys_tz;
+
+asmlinkage long sys32_gettimeofday(struct compat_timeval __user *tv,
+				   struct timezone __user *tz)
+{
+	if (tv) {
+		struct timeval ktv;
+		do_gettimeofday(&ktv);
+		if (put_tv32(tv, &ktv))
+			return -EFAULT;
+	}
+	if (tz) {
+		if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i)
+{
+	long usec;
+
+	if (!access_ok(VERIFY_READ, i, sizeof(*i)))
+		return -EFAULT;
+	if (__get_user(o->tv_sec, &i->tv_sec))
+		return -EFAULT;
+	if (__get_user(usec, &i->tv_usec))
+		return -EFAULT;
+	o->tv_nsec = usec * 1000;
+	return 0;
+}
+
+asmlinkage long sys32_settimeofday(struct compat_timeval __user *tv,
+				   struct timezone __user *tz)
+{
+	struct timespec kts;
+	struct timezone ktz;
+
+ 	if (tv) {
+		if (get_ts32(&kts, tv))
+			return -EFAULT;
+	}
+	if (tz) {
+		if (copy_from_user(&ktz, tz, sizeof(ktz)))
+			return -EFAULT;
+	}
+
+	return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
+}
+
+asmlinkage long sys32_utimes(char __user *filename,
+			     struct compat_timeval __user *tvs)
+{
+	struct timeval ktvs[2];
+
+	if (tvs) {
+		if (get_tv32(&ktvs[0], tvs) ||
+		    get_tv32(&ktvs[1], 1+tvs))
+			return -EFAULT;
+	}
+
+	return do_utimes(filename, (tvs ? &ktvs[0] : NULL));
+}
+
+/* These are here just in case some old sparc32 binary calls it. */
+asmlinkage long sys32_pause(void)
+{
+	current->state = TASK_INTERRUPTIBLE;
+	schedule();
+	return -ERESTARTNOHAND;
+}
+
+asmlinkage compat_ssize_t sys32_pread64(unsigned int fd,
+					char __user *ubuf,
+					compat_size_t count,
+					unsigned long poshi,
+					unsigned long poslo)
+{
+	return sys_pread64(fd, ubuf, count, (poshi << 32) | poslo);
+}
+
+asmlinkage compat_ssize_t sys32_pwrite64(unsigned int fd,
+					 char __user *ubuf,
+					 compat_size_t count,
+					 unsigned long poshi,
+					 unsigned long poslo)
+{
+	return sys_pwrite64(fd, ubuf, count, (poshi << 32) | poslo);
+}
+
+asmlinkage long compat_sys_readahead(int fd,
+				     unsigned long offhi,
+				     unsigned long offlo,
+				     compat_size_t count)
+{
+	return sys_readahead(fd, (offhi << 32) | offlo, count);
+}
+
+long compat_sys_fadvise64(int fd,
+			  unsigned long offhi,
+			  unsigned long offlo,
+			  compat_size_t len, int advice)
+{
+	return sys_fadvise64_64(fd, (offhi << 32) | offlo, len, advice);
+}
+
+long compat_sys_fadvise64_64(int fd,
+			     unsigned long offhi, unsigned long offlo,
+			     unsigned long lenhi, unsigned long lenlo,
+			     int advice)
+{
+	return sys_fadvise64_64(fd,
+				(offhi << 32) | offlo,
+				(lenhi << 32) | lenlo,
+				advice);
+}
+
+asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
+				    compat_off_t __user *offset,
+				    compat_size_t count)
+{
+	mm_segment_t old_fs = get_fs();
+	int ret;
+	off_t of;
+	
+	if (offset && get_user(of, offset))
+		return -EFAULT;
+		
+	set_fs(KERNEL_DS);
+	ret = sys_sendfile(out_fd, in_fd,
+			   offset ? (off_t __user *) &of : NULL,
+			   count);
+	set_fs(old_fs);
+	
+	if (offset && put_user(of, offset))
+		return -EFAULT;
+		
+	return ret;
+}
+
+asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
+				      compat_loff_t __user *offset,
+				      compat_size_t count)
+{
+	mm_segment_t old_fs = get_fs();
+	int ret;
+	loff_t lof;
+	
+	if (offset && get_user(lof, offset))
+		return -EFAULT;
+		
+	set_fs(KERNEL_DS);
+	ret = sys_sendfile64(out_fd, in_fd,
+			     offset ? (loff_t __user *) &lof : NULL,
+			     count);
+	set_fs(old_fs);
+	
+	if (offset && put_user(lof, offset))
+		return -EFAULT;
+		
+	return ret;
+}
+
+/* Handle adjtimex compatibility. */
+
+struct timex32 {
+	u32 modes;
+	s32 offset, freq, maxerror, esterror;
+	s32 status, constant, precision, tolerance;
+	struct compat_timeval time;
+	s32 tick;
+	s32 ppsfreq, jitter, shift, stabil;
+	s32 jitcnt, calcnt, errcnt, stbcnt;
+	s32  :32; s32  :32; s32  :32; s32  :32;
+	s32  :32; s32  :32; s32  :32; s32  :32;
+	s32  :32; s32  :32; s32  :32; s32  :32;
+};
+
+extern int do_adjtimex(struct timex *);
+
+asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
+{
+	struct timex txc;
+	int ret;
+
+	memset(&txc, 0, sizeof(struct timex));
+
+	if (get_user(txc.modes, &utp->modes) ||
+	    __get_user(txc.offset, &utp->offset) ||
+	    __get_user(txc.freq, &utp->freq) ||
+	    __get_user(txc.maxerror, &utp->maxerror) ||
+	    __get_user(txc.esterror, &utp->esterror) ||
+	    __get_user(txc.status, &utp->status) ||
+	    __get_user(txc.constant, &utp->constant) ||
+	    __get_user(txc.precision, &utp->precision) ||
+	    __get_user(txc.tolerance, &utp->tolerance) ||
+	    __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
+	    __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
+	    __get_user(txc.tick, &utp->tick) ||
+	    __get_user(txc.ppsfreq, &utp->ppsfreq) ||
+	    __get_user(txc.jitter, &utp->jitter) ||
+	    __get_user(txc.shift, &utp->shift) ||
+	    __get_user(txc.stabil, &utp->stabil) ||
+	    __get_user(txc.jitcnt, &utp->jitcnt) ||
+	    __get_user(txc.calcnt, &utp->calcnt) ||
+	    __get_user(txc.errcnt, &utp->errcnt) ||
+	    __get_user(txc.stbcnt, &utp->stbcnt))
+		return -EFAULT;
+
+	ret = do_adjtimex(&txc);
+
+	if (put_user(txc.modes, &utp->modes) ||
+	    __put_user(txc.offset, &utp->offset) ||
+	    __put_user(txc.freq, &utp->freq) ||
+	    __put_user(txc.maxerror, &utp->maxerror) ||
+	    __put_user(txc.esterror, &utp->esterror) ||
+	    __put_user(txc.status, &utp->status) ||
+	    __put_user(txc.constant, &utp->constant) ||
+	    __put_user(txc.precision, &utp->precision) ||
+	    __put_user(txc.tolerance, &utp->tolerance) ||
+	    __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
+	    __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
+	    __put_user(txc.tick, &utp->tick) ||
+	    __put_user(txc.ppsfreq, &utp->ppsfreq) ||
+	    __put_user(txc.jitter, &utp->jitter) ||
+	    __put_user(txc.shift, &utp->shift) ||
+	    __put_user(txc.stabil, &utp->stabil) ||
+	    __put_user(txc.jitcnt, &utp->jitcnt) ||
+	    __put_user(txc.calcnt, &utp->calcnt) ||
+	    __put_user(txc.errcnt, &utp->errcnt) ||
+	    __put_user(txc.stbcnt, &utp->stbcnt))
+		ret = -EFAULT;
+
+	return ret;
+}
+
+/* This is just a version for 32-bit applications which does
+ * not force O_LARGEFILE on.
+ */
+
+asmlinkage long sparc32_open(const char __user *filename,
+			     int flags, int mode)
+{
+	char * tmp;
+	int fd, error;
+
+	tmp = getname(filename);
+	fd = PTR_ERR(tmp);
+	if (!IS_ERR(tmp)) {
+		fd = get_unused_fd();
+		if (fd >= 0) {
+			struct file * f = filp_open(tmp, flags, mode);
+			error = PTR_ERR(f);
+			if (IS_ERR(f))
+				goto out_error;
+			fd_install(fd, f);
+		}
+out:
+		putname(tmp);
+	}
+	return fd;
+
+out_error:
+	put_unused_fd(fd);
+	fd = error;
+	goto out;
+}
+
+extern unsigned long do_mremap(unsigned long addr,
+	unsigned long old_len, unsigned long new_len,
+	unsigned long flags, unsigned long new_addr);
+                
+asmlinkage unsigned long sys32_mremap(unsigned long addr,
+	unsigned long old_len, unsigned long new_len,
+	unsigned long flags, u32 __new_addr)
+{
+	struct vm_area_struct *vma;
+	unsigned long ret = -EINVAL;
+	unsigned long new_addr = __new_addr;
+
+	if (old_len > 0xf0000000UL || new_len > 0xf0000000UL)
+		goto out;
+	if (addr > 0xf0000000UL - old_len)
+		goto out;
+	down_write(&current->mm->mmap_sem);
+	if (flags & MREMAP_FIXED) {
+		if (new_addr > 0xf0000000UL - new_len)
+			goto out_sem;
+	} else if (addr > 0xf0000000UL - new_len) {
+		unsigned long map_flags = 0;
+		struct file *file = NULL;
+
+		ret = -ENOMEM;
+		if (!(flags & MREMAP_MAYMOVE))
+			goto out_sem;
+
+		vma = find_vma(current->mm, addr);
+		if (vma) {
+			if (vma->vm_flags & VM_SHARED)
+				map_flags |= MAP_SHARED;
+			file = vma->vm_file;
+		}
+
+		/* MREMAP_FIXED checked above. */
+		new_addr = get_unmapped_area(file, addr, new_len,
+				    vma ? vma->vm_pgoff : 0,
+				    map_flags);
+		ret = new_addr;
+		if (new_addr & ~PAGE_MASK)
+			goto out_sem;
+		flags |= MREMAP_FIXED;
+	}
+	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
+out_sem:
+	up_write(&current->mm->mmap_sem);
+out:
+	return ret;       
+}
+
+struct __sysctl_args32 {
+	u32 name;
+	int nlen;
+	u32 oldval;
+	u32 oldlenp;
+	u32 newval;
+	u32 newlen;
+	u32 __unused[4];
+};
+
+asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args)
+{
+#ifndef CONFIG_SYSCTL
+	return -ENOSYS;
+#else
+	struct __sysctl_args32 tmp;
+	int error;
+	size_t oldlen, __user *oldlenp = NULL;
+	unsigned long addr = (((unsigned long)&args->__unused[0]) + 7UL) & ~7UL;
+
+	if (copy_from_user(&tmp, args, sizeof(tmp)))
+		return -EFAULT;
+
+	if (tmp.oldval && tmp.oldlenp) {
+		/* Duh, this is ugly and might not work if sysctl_args
+		   is in read-only memory, but do_sysctl does indirectly
+		   a lot of uaccess in both directions and we'd have to
+		   basically copy the whole sysctl.c here, and
+		   glibc's __sysctl uses rw memory for the structure
+		   anyway.  */
+		if (get_user(oldlen, (u32 __user *)(unsigned long)tmp.oldlenp) ||
+		    put_user(oldlen, (size_t __user *)addr))
+			return -EFAULT;
+		oldlenp = (size_t __user *)addr;
+	}
+
+	lock_kernel();
+	error = do_sysctl((int __user *)(unsigned long) tmp.name,
+			  tmp.nlen,
+			  (void __user *)(unsigned long) tmp.oldval,
+			  oldlenp,
+			  (void __user *)(unsigned long) tmp.newval,
+			  tmp.newlen);
+	unlock_kernel();
+	if (oldlenp) {
+		if (!error) {
+			if (get_user(oldlen, (size_t __user *)addr) ||
+			    put_user(oldlen, (u32 __user *)(unsigned long) tmp.oldlenp))
+				error = -EFAULT;
+		}
+		if (copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
+			error = -EFAULT;
+	}
+	return error;
+#endif
+}
+
+long sys32_lookup_dcookie(unsigned long cookie_high,
+			  unsigned long cookie_low,
+			  char __user *buf, size_t len)
+{
+	return sys_lookup_dcookie((cookie_high << 32) | cookie_low,
+				  buf, len);
+}
+
+extern asmlinkage long
+sys_timer_create(clockid_t which_clock,
+		 struct sigevent __user *timer_event_spec,
+		 timer_t __user *created_timer_id);
+
+long
+sys32_timer_create(u32 clock, struct compat_sigevent __user *se32,
+		   timer_t __user *timer_id)
+{
+	struct sigevent se;
+	mm_segment_t oldfs;
+	timer_t t;
+	long err;
+
+	if (se32 == NULL)
+		return sys_timer_create(clock, NULL, timer_id);
+
+	if (get_compat_sigevent(&se, se32))
+		return -EFAULT;
+
+	if (!access_ok(VERIFY_WRITE,timer_id,sizeof(timer_t)))
+		return -EFAULT;
+
+	oldfs = get_fs();
+	set_fs(KERNEL_DS);
+	err = sys_timer_create(clock,
+			       (struct sigevent __user *) &se,
+			       (timer_t __user *) &t);
+	set_fs(oldfs);
+
+	if (!err)
+		err = __put_user (t, timer_id);
+
+	return err;
+}
diff --git a/arch/sparc64/kernel/sys_sunos32.c b/arch/sparc64/kernel/sys_sunos32.c
new file mode 100644
index 0000000..d0592ed
--- /dev/null
+++ b/arch/sparc64/kernel/sys_sunos32.c
@@ -0,0 +1,1343 @@
+/* $Id: sys_sunos32.c,v 1.64 2002/02/09 19:49:31 davem Exp $
+ * sys_sunos32.c: SunOS binary compatibility layer on sparc64.
+ *
+ * Copyright (C) 1995, 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ *
+ * Based upon preliminary work which is:
+ *
+ * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/compat.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/resource.h>
+#include <linux/ipc.h>
+#include <linux/shm.h>
+#include <linux/msg.h>
+#include <linux/sem.h>
+#include <linux/signal.h>
+#include <linux/uio.h>
+#include <linux/utsname.h>
+#include <linux/major.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/syscalls.h>
+
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/pconf.h>
+#include <asm/idprom.h> /* for gethostid() */
+#include <asm/unistd.h>
+#include <asm/system.h>
+
+/* For the nfs mount emulation */
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/nfs.h>
+#include <linux/nfs2.h>
+#include <linux/nfs_mount.h>
+
+/* for sunos_select */
+#include <linux/time.h>
+#include <linux/personality.h>
+
+/* For SOCKET_I */
+#include <linux/socket.h>
+#include <net/sock.h>
+#include <net/compat.h>
+
+#define SUNOS_NR_OPEN	256
+
+asmlinkage u32 sunos_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u32 off)
+{
+	struct file *file = NULL;
+	unsigned long retval, ret_type;
+
+	if (flags & MAP_NORESERVE) {
+		static int cnt;
+		if (cnt++ < 10)
+			printk("%s:  unimplemented SunOS MAP_NORESERVE mmap() flag\n",
+			       current->comm);
+		flags &= ~MAP_NORESERVE;
+	}
+	retval = -EBADF;
+	if (!(flags & MAP_ANONYMOUS)) {
+		struct inode * inode;
+		if (fd >= SUNOS_NR_OPEN)
+			goto out;
+ 		file = fget(fd);
+		if (!file)
+			goto out;
+		inode = file->f_dentry->d_inode;
+		if (imajor(inode) == MEM_MAJOR && iminor(inode) == 5) {
+			flags |= MAP_ANONYMOUS;
+			fput(file);
+			file = NULL;
+		}
+	}
+
+	retval = -EINVAL;
+	if (!(flags & MAP_FIXED))
+		addr = 0;
+	else if (len > 0xf0000000 || addr > 0xf0000000 - len)
+		goto out_putf;
+	ret_type = flags & _MAP_NEW;
+	flags &= ~_MAP_NEW;
+
+	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+	down_write(&current->mm->mmap_sem);
+	retval = do_mmap(file,
+			 (unsigned long) addr, (unsigned long) len,
+			 (unsigned long) prot, (unsigned long) flags,
+			 (unsigned long) off);
+	up_write(&current->mm->mmap_sem);
+	if (!ret_type)
+		retval = ((retval < 0xf0000000) ? 0 : retval);
+out_putf:
+	if (file)
+		fput(file);
+out:
+	return (u32) retval;
+}
+
+asmlinkage int sunos_mctl(u32 addr, u32 len, int function, u32 arg)
+{
+	return 0;
+}
+
+asmlinkage int sunos_brk(u32 baddr)
+{
+	int freepages, retval = -ENOMEM;
+	unsigned long rlim;
+	unsigned long newbrk, oldbrk, brk = (unsigned long) baddr;
+
+	down_write(&current->mm->mmap_sem);
+	if (brk < current->mm->end_code)
+		goto out;
+	newbrk = PAGE_ALIGN(brk);
+	oldbrk = PAGE_ALIGN(current->mm->brk);
+	retval = 0;
+	if (oldbrk == newbrk) {
+		current->mm->brk = brk;
+		goto out;
+	}
+	/* Always allow shrinking brk. */
+	if (brk <= current->mm->brk) {
+		current->mm->brk = brk;
+		do_munmap(current->mm, newbrk, oldbrk-newbrk);
+		goto out;
+	}
+	/* Check against rlimit and stack.. */
+	retval = -ENOMEM;
+	rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
+	if (rlim >= RLIM_INFINITY)
+		rlim = ~0;
+	if (brk - current->mm->end_code > rlim)
+		goto out;
+	/* Check against existing mmap mappings. */
+	if (find_vma_intersection(current->mm, oldbrk, newbrk+PAGE_SIZE))
+		goto out;
+	/* stupid algorithm to decide if we have enough memory: while
+	 * simple, it hopefully works in most obvious cases.. Easy to
+	 * fool it, but this should catch most mistakes.
+	 */
+	freepages = get_page_cache_size();
+	freepages >>= 1;
+	freepages += nr_free_pages();
+	freepages += nr_swap_pages;
+	freepages -= num_physpages >> 4;
+	freepages -= (newbrk-oldbrk) >> PAGE_SHIFT;
+	if (freepages < 0)
+		goto out;
+	/* Ok, we have probably got enough memory - let it rip. */
+	current->mm->brk = brk;
+	do_brk(oldbrk, newbrk-oldbrk);
+	retval = 0;
+out:
+	up_write(&current->mm->mmap_sem);
+	return retval;
+}
+
+asmlinkage u32 sunos_sbrk(int increment)
+{
+	int error, oldbrk;
+
+	/* This should do it hopefully... */
+	oldbrk = (int)current->mm->brk;
+	error = sunos_brk(((int) current->mm->brk) + increment);
+	if (!error)
+		error = oldbrk;
+	return error;
+}
+
+asmlinkage u32 sunos_sstk(int increment)
+{
+	printk("%s: Call to sunos_sstk(increment<%d>) is unsupported\n",
+	       current->comm, increment);
+
+	return (u32)-1;
+}
+
+/* Give hints to the kernel as to what paging strategy to use...
+ * Completely bogus, don't remind me.
+ */
+#define VA_NORMAL     0 /* Normal vm usage expected */
+#define VA_ABNORMAL   1 /* Abnormal/random vm usage probable */
+#define VA_SEQUENTIAL 2 /* Accesses will be of a sequential nature */
+#define VA_INVALIDATE 3 /* Page table entries should be flushed ??? */
+static char *vstrings[] = {
+	"VA_NORMAL",
+	"VA_ABNORMAL",
+	"VA_SEQUENTIAL",
+	"VA_INVALIDATE",
+};
+
+asmlinkage void sunos_vadvise(u32 strategy)
+{
+	static int count;
+
+	/* I wanna see who uses this... */
+	if (count++ < 5)
+		printk("%s: Advises us to use %s paging strategy\n",
+		       current->comm,
+		       strategy <= 3 ? vstrings[strategy] : "BOGUS");
+}
+
+/* This just wants the soft limit (ie. rlim_cur element) of the RLIMIT_NOFILE
+ * resource limit and is for backwards compatibility with older sunos
+ * revs.
+ */
+asmlinkage int sunos_getdtablesize(void)
+{
+	return SUNOS_NR_OPEN;
+}
+
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+asmlinkage u32 sunos_sigblock(u32 blk_mask)
+{
+	u32 old;
+
+	spin_lock_irq(&current->sighand->siglock);
+	old = (u32) current->blocked.sig[0];
+	current->blocked.sig[0] |= (blk_mask & _BLOCKABLE);
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+	return old;
+}
+
+asmlinkage u32 sunos_sigsetmask(u32 newmask)
+{
+	u32 retval;
+
+	spin_lock_irq(&current->sighand->siglock);
+	retval = (u32) current->blocked.sig[0];
+	current->blocked.sig[0] = (newmask & _BLOCKABLE);
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+	return retval;
+}
+
+/* SunOS getdents is very similar to the newer Linux (iBCS2 compliant)    */
+/* getdents system call, the format of the structure just has a different */
+/* layout (d_off+d_ino instead of d_ino+d_off) */
+struct sunos_dirent {
+    s32		d_off;
+    u32		d_ino;
+    u16		d_reclen;
+    u16		d_namlen;
+    char	d_name[1];
+};
+
+struct sunos_dirent_callback {
+    struct sunos_dirent __user *curr;
+    struct sunos_dirent __user *previous;
+    int count;
+    int error;
+};
+
+#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
+#define ROUND_UP(x) (((x)+sizeof(s32)-1) & ~(sizeof(s32)-1))
+
+static int sunos_filldir(void * __buf, const char * name, int namlen,
+			 loff_t offset, ino_t ino, unsigned int d_type)
+{
+	struct sunos_dirent __user *dirent;
+	struct sunos_dirent_callback * buf = (struct sunos_dirent_callback *) __buf;
+	int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1);
+
+	buf->error = -EINVAL;	/* only used if we fail.. */
+	if (reclen > buf->count)
+		return -EINVAL;
+	dirent = buf->previous;
+	if (dirent)
+		put_user(offset, &dirent->d_off);
+	dirent = buf->curr;
+	buf->previous = dirent;
+	put_user(ino, &dirent->d_ino);
+	put_user(namlen, &dirent->d_namlen);
+	put_user(reclen, &dirent->d_reclen);
+	if (copy_to_user(dirent->d_name, name, namlen))
+		return -EFAULT;
+	put_user(0, dirent->d_name + namlen);
+	dirent = (void __user *) dirent + reclen;
+	buf->curr = dirent;
+	buf->count -= reclen;
+	return 0;
+}
+
+asmlinkage int sunos_getdents(unsigned int fd, void __user *dirent, int cnt)
+{
+	struct file * file;
+	struct sunos_dirent __user *lastdirent;
+	struct sunos_dirent_callback buf;
+	int error = -EBADF;
+
+	if (fd >= SUNOS_NR_OPEN)
+		goto out;
+
+	file = fget(fd);
+	if (!file)
+		goto out;
+
+	error = -EINVAL;
+	if (cnt < (sizeof(struct sunos_dirent) + 255))
+		goto out_putf;
+
+	buf.curr = (struct sunos_dirent __user *) dirent;
+	buf.previous = NULL;
+	buf.count = cnt;
+	buf.error = 0;
+
+	error = vfs_readdir(file, sunos_filldir, &buf);
+	if (error < 0)
+		goto out_putf;
+
+	lastdirent = buf.previous;
+	error = buf.error;
+	if (lastdirent) {
+		put_user(file->f_pos, &lastdirent->d_off);
+		error = cnt - buf.count;
+	}
+
+out_putf:
+	fput(file);
+out:
+	return error;
+}
+
+/* Old sunos getdirentries, severely broken compatibility stuff here. */
+struct sunos_direntry {
+    u32		d_ino;
+    u16		d_reclen;
+    u16		d_namlen;
+    char	d_name[1];
+};
+
+struct sunos_direntry_callback {
+    struct sunos_direntry __user *curr;
+    struct sunos_direntry __user *previous;
+    int count;
+    int error;
+};
+
+static int sunos_filldirentry(void * __buf, const char * name, int namlen,
+			      loff_t offset, ino_t ino, unsigned int d_type)
+{
+	struct sunos_direntry __user *dirent;
+	struct sunos_direntry_callback * buf =
+		(struct sunos_direntry_callback *) __buf;
+	int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1);
+
+	buf->error = -EINVAL;	/* only used if we fail.. */
+	if (reclen > buf->count)
+		return -EINVAL;
+	dirent = buf->previous;
+	dirent = buf->curr;
+	buf->previous = dirent;
+	put_user(ino, &dirent->d_ino);
+	put_user(namlen, &dirent->d_namlen);
+	put_user(reclen, &dirent->d_reclen);
+	if (copy_to_user(dirent->d_name, name, namlen))
+		return -EFAULT;
+	put_user(0, dirent->d_name + namlen);
+	dirent = (void __user *) dirent + reclen;
+	buf->curr = dirent;
+	buf->count -= reclen;
+	return 0;
+}
+
+asmlinkage int sunos_getdirentries(unsigned int fd,
+				   void __user *dirent,
+				   int cnt,
+				   unsigned int __user *basep)
+{
+	struct file * file;
+	struct sunos_direntry __user *lastdirent;
+	int error = -EBADF;
+	struct sunos_direntry_callback buf;
+
+	if (fd >= SUNOS_NR_OPEN)
+		goto out;
+
+	file = fget(fd);
+	if (!file)
+		goto out;
+
+	error = -EINVAL;
+	if (cnt < (sizeof(struct sunos_direntry) + 255))
+		goto out_putf;
+
+	buf.curr = (struct sunos_direntry __user *) dirent;
+	buf.previous = NULL;
+	buf.count = cnt;
+	buf.error = 0;
+
+	error = vfs_readdir(file, sunos_filldirentry, &buf);
+	if (error < 0)
+		goto out_putf;
+
+	lastdirent = buf.previous;
+	error = buf.error;
+	if (lastdirent) {
+		put_user(file->f_pos, basep);
+		error = cnt - buf.count;
+	}
+
+out_putf:
+	fput(file);
+out:
+	return error;
+}
+
+struct sunos_utsname {
+	char sname[9];
+	char nname[9];
+	char nnext[56];
+	char rel[9];
+	char ver[9];
+	char mach[9];
+};
+
+asmlinkage int sunos_uname(struct sunos_utsname __user *name)
+{
+	int ret;
+
+	down_read(&uts_sem);
+	ret = copy_to_user(&name->sname[0], &system_utsname.sysname[0],
+			   sizeof(name->sname) - 1);
+	ret |= copy_to_user(&name->nname[0], &system_utsname.nodename[0],
+			    sizeof(name->nname) - 1);
+	ret |= put_user('\0', &name->nname[8]);
+	ret |= copy_to_user(&name->rel[0], &system_utsname.release[0],
+			    sizeof(name->rel) - 1);
+	ret |= copy_to_user(&name->ver[0], &system_utsname.version[0],
+			    sizeof(name->ver) - 1);
+	ret |= copy_to_user(&name->mach[0], &system_utsname.machine[0],
+			    sizeof(name->mach) - 1);
+	up_read(&uts_sem);
+	return (ret ? -EFAULT : 0);
+}
+
+asmlinkage int sunos_nosys(void)
+{
+	struct pt_regs *regs;
+	siginfo_t info;
+	static int cnt;
+
+	regs = current_thread_info()->kregs;
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+	info.si_signo = SIGSYS;
+	info.si_errno = 0;
+	info.si_code = __SI_FAULT|0x100;
+	info.si_addr = (void __user *)regs->tpc;
+	info.si_trapno = regs->u_regs[UREG_G1];
+	send_sig_info(SIGSYS, &info, current);
+	if (cnt++ < 4) {
+		printk("Process makes ni_syscall number %d, register dump:\n",
+		       (int) regs->u_regs[UREG_G1]);
+		show_regs(regs);
+	}
+	return -ENOSYS;
+}
+
+/* This is not a real and complete implementation yet, just to keep
+ * the easy SunOS binaries happy.
+ */
+asmlinkage int sunos_fpathconf(int fd, int name)
+{
+	int ret;
+
+	switch(name) {
+	case _PCONF_LINK:
+		ret = LINK_MAX;
+		break;
+	case _PCONF_CANON:
+		ret = MAX_CANON;
+		break;
+	case _PCONF_INPUT:
+		ret = MAX_INPUT;
+		break;
+	case _PCONF_NAME:
+		ret = NAME_MAX;
+		break;
+	case _PCONF_PATH:
+		ret = PATH_MAX;
+		break;
+	case _PCONF_PIPE:
+		ret = PIPE_BUF;
+		break;
+	case _PCONF_CHRESTRICT:		/* XXX Investigate XXX */
+		ret = 1;
+		break;
+	case _PCONF_NOTRUNC:		/* XXX Investigate XXX */
+	case _PCONF_VDISABLE:
+		ret = 0;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+asmlinkage int sunos_pathconf(u32 u_path, int name)
+{
+	int ret;
+
+	ret = sunos_fpathconf(0, name); /* XXX cheese XXX */
+	return ret;
+}
+
+asmlinkage int sunos_select(int width, u32 inp, u32 outp, u32 exp, u32 tvp_x)
+{
+	int ret;
+
+	/* SunOS binaries expect that select won't change the tvp contents */
+	ret = compat_sys_select(width, compat_ptr(inp), compat_ptr(outp),
+				compat_ptr(exp), compat_ptr(tvp_x));
+	if (ret == -EINTR && tvp_x) {
+		struct compat_timeval __user *tvp = compat_ptr(tvp_x);
+		time_t sec, usec;
+
+		__get_user(sec, &tvp->tv_sec);
+		__get_user(usec, &tvp->tv_usec);
+		if (sec == 0 && usec == 0)
+			ret = 0;
+	}
+	return ret;
+}
+
+asmlinkage void sunos_nop(void)
+{
+	return;
+}
+
+#if 0 /* This code doesn't translate user pointers correctly,
+       * disable for now. -DaveM
+       */
+
+/* XXXXXXXXXX SunOS mount/umount. XXXXXXXXXXX */
+#define SMNT_RDONLY       1
+#define SMNT_NOSUID       2
+#define SMNT_NEWTYPE      4
+#define SMNT_GRPID        8
+#define SMNT_REMOUNT      16
+#define SMNT_NOSUB        32
+#define SMNT_MULTI        64
+#define SMNT_SYS5         128
+
+struct sunos_fh_t {
+	char fh_data [NFS_FHSIZE];
+};
+
+struct sunos_nfs_mount_args {
+	struct sockaddr_in  *addr; /* file server address */
+	struct nfs_fh *fh;     /* File handle to be mounted */
+	int        flags;      /* flags */
+	int        wsize;      /* write size in bytes */
+	int        rsize;      /* read size in bytes */
+	int        timeo;      /* initial timeout in .1 secs */
+	int        retrans;    /* times to retry send */
+	char       *hostname;  /* server's hostname */
+	int        acregmin;   /* attr cache file min secs */
+	int        acregmax;   /* attr cache file max secs */
+	int        acdirmin;   /* attr cache dir min secs */
+	int        acdirmax;   /* attr cache dir max secs */
+	char       *netname;   /* server's netname */
+};
+
+
+/* Bind the socket on a local reserved port and connect it to the
+ * remote server.  This on Linux/i386 is done by the mount program,
+ * not by the kernel. 
+ */
+/* XXXXXXXXXXXXXXXXXXXX */
+static int
+sunos_nfs_get_server_fd (int fd, struct sockaddr_in *addr)
+{
+	struct sockaddr_in local;
+	struct sockaddr_in server;
+	int    try_port;
+	int    ret;
+	struct socket *socket;
+	struct inode  *inode;
+	struct file   *file;
+
+	file = fget(fd);
+	if (!file)
+		return 0;
+
+	inode = file->f_dentry->d_inode;
+
+	socket = SOCKET_I(inode);
+	local.sin_family = AF_INET;
+	local.sin_addr.s_addr = INADDR_ANY;
+
+	/* IPPORT_RESERVED = 1024, can't find the definition in the kernel */
+	try_port = 1024;
+	do {
+		local.sin_port = htons (--try_port);
+		ret = socket->ops->bind(socket, (struct sockaddr*)&local,
+					sizeof(local));
+	} while (ret && try_port > (1024 / 2));
+
+	if (ret) {
+		fput(file);
+		return 0;
+	}
+
+	server.sin_family = AF_INET;
+	server.sin_addr = addr->sin_addr;
+	server.sin_port = NFS_PORT;
+
+	/* Call sys_connect */
+	ret = socket->ops->connect (socket, (struct sockaddr *) &server,
+				    sizeof (server), file->f_flags);
+	fput(file);
+	if (ret < 0)
+		return 0;
+	return 1;
+}
+
+/* XXXXXXXXXXXXXXXXXXXX */
+static int get_default (int value, int def_value)
+{
+    if (value)
+	return value;
+    else
+	return def_value;
+}
+
+/* XXXXXXXXXXXXXXXXXXXX */
+static int sunos_nfs_mount(char *dir_name, int linux_flags, void __user *data)
+{
+	int  server_fd, err;
+	char *the_name, *mount_page;
+	struct nfs_mount_data linux_nfs_mount;
+	struct sunos_nfs_mount_args sunos_mount;
+
+	/* Ok, here comes the fun part: Linux's nfs mount needs a
+	 * socket connection to the server, but SunOS mount does not
+	 * require this, so we use the information on the destination
+	 * address to create a socket and bind it to a reserved
+	 * port on this system
+	 */
+	if (copy_from_user(&sunos_mount, data, sizeof(sunos_mount)))
+		return -EFAULT;
+
+	server_fd = sys_socket (AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+	if (server_fd < 0)
+		return -ENXIO;
+
+	if (copy_from_user(&linux_nfs_mount.addr, sunos_mount.addr,
+			   sizeof(*sunos_mount.addr)) ||
+	    copy_from_user(&linux_nfs_mount.root, sunos_mount.fh,
+			   sizeof(*sunos_mount.fh))) {
+		sys_close (server_fd);
+		return -EFAULT;
+	}
+
+	if (!sunos_nfs_get_server_fd (server_fd, &linux_nfs_mount.addr)){
+		sys_close (server_fd);
+		return -ENXIO;
+	}
+
+	/* Now, bind it to a locally reserved port */
+	linux_nfs_mount.version  = NFS_MOUNT_VERSION;
+	linux_nfs_mount.flags    = sunos_mount.flags;
+	linux_nfs_mount.fd       = server_fd;
+	
+	linux_nfs_mount.rsize    = get_default (sunos_mount.rsize, 8192);
+	linux_nfs_mount.wsize    = get_default (sunos_mount.wsize, 8192);
+	linux_nfs_mount.timeo    = get_default (sunos_mount.timeo, 10);
+	linux_nfs_mount.retrans  = sunos_mount.retrans;
+	
+	linux_nfs_mount.acregmin = sunos_mount.acregmin;
+	linux_nfs_mount.acregmax = sunos_mount.acregmax;
+	linux_nfs_mount.acdirmin = sunos_mount.acdirmin;
+	linux_nfs_mount.acdirmax = sunos_mount.acdirmax;
+
+	the_name = getname(sunos_mount.hostname);
+	if (IS_ERR(the_name))
+		return PTR_ERR(the_name);
+
+	strlcpy(linux_nfs_mount.hostname, the_name,
+		sizeof(linux_nfs_mount.hostname));
+	putname (the_name);
+	
+	mount_page = (char *) get_zeroed_page(GFP_KERNEL);
+	if (!mount_page)
+		return -ENOMEM;
+
+	memcpy(mount_page, &linux_nfs_mount, sizeof(linux_nfs_mount));
+
+	err = do_mount("", dir_name, "nfs", linux_flags, mount_page);
+
+	free_page((unsigned long) mount_page);
+	return err;
+}
+
+/* XXXXXXXXXXXXXXXXXXXX */
+asmlinkage int
+sunos_mount(char *type, char *dir, int flags, void *data)
+{
+	int linux_flags = 0;
+	int ret = -EINVAL;
+	char *dev_fname = 0;
+	char *dir_page, *type_page;
+
+	if (!capable (CAP_SYS_ADMIN))
+		return -EPERM;
+
+	/* We don't handle the integer fs type */
+	if ((flags & SMNT_NEWTYPE) == 0)
+		goto out;
+
+	/* Do not allow for those flags we don't support */
+	if (flags & (SMNT_GRPID|SMNT_NOSUB|SMNT_MULTI|SMNT_SYS5))
+		goto out;
+
+	if (flags & SMNT_REMOUNT)
+		linux_flags |= MS_REMOUNT;
+	if (flags & SMNT_RDONLY)
+		linux_flags |= MS_RDONLY;
+	if (flags & SMNT_NOSUID)
+		linux_flags |= MS_NOSUID;
+
+	dir_page = getname(dir);
+	ret = PTR_ERR(dir_page);
+	if (IS_ERR(dir_page))
+		goto out;
+
+	type_page = getname(type);
+	ret = PTR_ERR(type_page);
+	if (IS_ERR(type_page))
+		goto out1;
+
+	if (strcmp(type_page, "ext2") == 0) {
+		dev_fname = getname(data);
+	} else if (strcmp(type_page, "iso9660") == 0) {
+		dev_fname = getname(data);
+	} else if (strcmp(type_page, "minix") == 0) {
+		dev_fname = getname(data);
+	} else if (strcmp(type_page, "nfs") == 0) {
+		ret = sunos_nfs_mount (dir_page, flags, data);
+		goto out2;
+        } else if (strcmp(type_page, "ufs") == 0) {
+		printk("Warning: UFS filesystem mounts unsupported.\n");
+		ret = -ENODEV;
+		goto out2;
+	} else if (strcmp(type_page, "proc")) {
+		ret = -ENODEV;
+		goto out2;
+	}
+	ret = PTR_ERR(dev_fname);
+	if (IS_ERR(dev_fname))
+		goto out2;
+	lock_kernel();
+	ret = do_mount(dev_fname, dir_page, type_page, linux_flags, NULL);
+	unlock_kernel();
+	if (dev_fname)
+		putname(dev_fname);
+out2:
+	putname(type_page);
+out1:
+	putname(dir_page);
+out:
+	return ret;
+}
+#endif
+
+asmlinkage int sunos_setpgrp(pid_t pid, pid_t pgid)
+{
+	int ret;
+
+	/* So stupid... */
+	if ((!pid || pid == current->pid) &&
+	    !pgid) {
+		sys_setsid();
+		ret = 0;
+	} else {
+		ret = sys_setpgid(pid, pgid);
+	}
+	return ret;
+}
+
+/* So stupid... */
+extern long compat_sys_wait4(compat_pid_t, compat_uint_t __user *, int,
+			     struct compat_rusage __user *);
+
+asmlinkage int sunos_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, struct compat_rusage __user *ru)
+{
+	int ret;
+
+	ret = compat_sys_wait4((pid ? pid : ((compat_pid_t)-1)),
+			       stat_addr, options, ru);
+	return ret;
+}
+
+extern int kill_pg(int, int, int);
+asmlinkage int sunos_killpg(int pgrp, int sig)
+{
+	return kill_pg(pgrp, sig, 0);
+}
+
+asmlinkage int sunos_audit(void)
+{
+	printk ("sys_audit\n");
+	return -1;
+}
+
+asmlinkage u32 sunos_gethostid(void)
+{
+	u32 ret;
+
+	ret = (((u32)idprom->id_machtype << 24) | ((u32)idprom->id_sernum));
+
+	return ret;
+}
+
+/* sysconf options, for SunOS compatibility */
+#define   _SC_ARG_MAX             1
+#define   _SC_CHILD_MAX           2
+#define   _SC_CLK_TCK             3
+#define   _SC_NGROUPS_MAX         4
+#define   _SC_OPEN_MAX            5
+#define   _SC_JOB_CONTROL         6
+#define   _SC_SAVED_IDS           7
+#define   _SC_VERSION             8
+
+asmlinkage s32 sunos_sysconf (int name)
+{
+	s32 ret;
+
+	switch (name){
+	case _SC_ARG_MAX:
+		ret = ARG_MAX;
+		break;
+	case _SC_CHILD_MAX:
+		ret = CHILD_MAX;
+		break;
+	case _SC_CLK_TCK:
+		ret = HZ;
+		break;
+	case _SC_NGROUPS_MAX:
+		ret = NGROUPS_MAX;
+		break;
+	case _SC_OPEN_MAX:
+		ret = OPEN_MAX;
+		break;
+	case _SC_JOB_CONTROL:
+		ret = 1;	/* yes, we do support job control */
+		break;
+	case _SC_SAVED_IDS:
+		ret = 1;	/* yes, we do support saved uids  */
+		break;
+	case _SC_VERSION:
+		/* mhm, POSIX_VERSION is in /usr/include/unistd.h
+		 * should it go on /usr/include/linux?
+		 */
+		ret = 199009;
+		break;
+	default:
+		ret = -1;
+		break;
+	};
+	return ret;
+}
+
+asmlinkage int sunos_semsys(int op, u32 arg1, u32 arg2, u32 arg3, void __user *ptr)
+{
+	union semun arg4;
+	int ret;
+
+	switch (op) {
+	case 0:
+		/* Most arguments match on a 1:1 basis but cmd doesn't */
+		switch(arg3) {
+		case 4:
+			arg3=GETPID; break;
+		case 5:
+			arg3=GETVAL; break;
+		case 6:
+			arg3=GETALL; break;
+		case 3:
+			arg3=GETNCNT; break;
+		case 7:
+			arg3=GETZCNT; break;
+		case 8:
+			arg3=SETVAL; break;
+		case 9:
+			arg3=SETALL; break;
+		}
+		/* sys_semctl(): */
+		/* value to modify semaphore to */
+		arg4.__pad = ptr;
+		ret = sys_semctl((int)arg1, (int)arg2, (int)arg3, arg4);
+		break;
+	case 1:
+		/* sys_semget(): */
+		ret = sys_semget((key_t)arg1, (int)arg2, (int)arg3);
+		break;
+	case 2:
+		/* sys_semop(): */
+		ret = sys_semop((int)arg1, (struct sembuf __user *)(unsigned long)arg2,
+				(unsigned int) arg3);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	};
+	return ret;
+}
+
+struct msgbuf32 {
+	s32 mtype;
+	char mtext[1];
+};
+
+struct ipc_perm32
+{
+	key_t    	  key;
+        compat_uid_t  uid;
+        compat_gid_t  gid;
+        compat_uid_t  cuid;
+        compat_gid_t  cgid;
+        compat_mode_t mode;
+        unsigned short  seq;
+};
+
+struct msqid_ds32
+{
+        struct ipc_perm32 msg_perm;
+        u32 msg_first;
+        u32 msg_last;
+        compat_time_t msg_stime;
+        compat_time_t msg_rtime;
+        compat_time_t msg_ctime;
+        u32 wwait;
+        u32 rwait;
+        unsigned short msg_cbytes;
+        unsigned short msg_qnum;  
+        unsigned short msg_qbytes;
+        compat_ipc_pid_t msg_lspid;
+        compat_ipc_pid_t msg_lrpid;
+};
+
+static inline int sunos_msqid_get(struct msqid_ds32 __user *user,
+				  struct msqid_ds *kern)
+{
+	if (get_user(kern->msg_perm.key, &user->msg_perm.key)		||
+	    __get_user(kern->msg_perm.uid, &user->msg_perm.uid)		||
+	    __get_user(kern->msg_perm.gid, &user->msg_perm.gid)		||
+	    __get_user(kern->msg_perm.cuid, &user->msg_perm.cuid)	||
+	    __get_user(kern->msg_perm.cgid, &user->msg_perm.cgid)	||
+	    __get_user(kern->msg_stime, &user->msg_stime)		||
+	    __get_user(kern->msg_rtime, &user->msg_rtime)		||
+	    __get_user(kern->msg_ctime, &user->msg_ctime)		||
+	    __get_user(kern->msg_ctime, &user->msg_cbytes)		||
+	    __get_user(kern->msg_ctime, &user->msg_qnum)		||
+	    __get_user(kern->msg_ctime, &user->msg_qbytes)		||
+	    __get_user(kern->msg_ctime, &user->msg_lspid)		||
+	    __get_user(kern->msg_ctime, &user->msg_lrpid))
+		return -EFAULT;
+	return 0;
+}
+
+static inline int sunos_msqid_put(struct msqid_ds32 __user *user,
+				  struct msqid_ds *kern)
+{
+	if (put_user(kern->msg_perm.key, &user->msg_perm.key)		||
+	    __put_user(kern->msg_perm.uid, &user->msg_perm.uid)		||
+	    __put_user(kern->msg_perm.gid, &user->msg_perm.gid)		||
+	    __put_user(kern->msg_perm.cuid, &user->msg_perm.cuid)	||
+	    __put_user(kern->msg_perm.cgid, &user->msg_perm.cgid)	||
+	    __put_user(kern->msg_stime, &user->msg_stime)		||
+	    __put_user(kern->msg_rtime, &user->msg_rtime)		||
+	    __put_user(kern->msg_ctime, &user->msg_ctime)		||
+	    __put_user(kern->msg_ctime, &user->msg_cbytes)		||
+	    __put_user(kern->msg_ctime, &user->msg_qnum)		||
+	    __put_user(kern->msg_ctime, &user->msg_qbytes)		||
+	    __put_user(kern->msg_ctime, &user->msg_lspid)		||
+	    __put_user(kern->msg_ctime, &user->msg_lrpid))
+		return -EFAULT;
+	return 0;
+}
+
+static inline int sunos_msgbuf_get(struct msgbuf32 __user *user, struct msgbuf *kern, int len)
+{
+	if (get_user(kern->mtype, &user->mtype)	||
+	    __copy_from_user(kern->mtext, &user->mtext, len))
+		return -EFAULT;
+	return 0;
+}
+
+static inline int sunos_msgbuf_put(struct msgbuf32 __user *user, struct msgbuf *kern, int len)
+{
+	if (put_user(kern->mtype, &user->mtype)	||
+	    __copy_to_user(user->mtext, kern->mtext, len))
+		return -EFAULT;
+	return 0;
+}
+
+asmlinkage int sunos_msgsys(int op, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+	struct sparc_stackf32 __user *sp;
+	struct msqid_ds kds;
+	struct msgbuf *kmbuf;
+	mm_segment_t old_fs = get_fs();
+	u32 arg5;
+	int rval;
+
+	switch(op) {
+	case 0:
+		rval = sys_msgget((key_t)arg1, (int)arg2);
+		break;
+	case 1:
+		if (!sunos_msqid_get((struct msqid_ds32 __user *)(unsigned long)arg3, &kds)) {
+			set_fs(KERNEL_DS);
+			rval = sys_msgctl((int)arg1, (int)arg2,
+					  (struct msqid_ds __user *)(unsigned long)arg3);
+			set_fs(old_fs);
+			if (!rval)
+				rval = sunos_msqid_put((struct msqid_ds32 __user *)(unsigned long)arg3,
+						       &kds);
+		} else
+			rval = -EFAULT;
+		break;
+	case 2:
+		rval = -EFAULT;
+		kmbuf = (struct msgbuf *)kmalloc(sizeof(struct msgbuf) + arg3,
+						 GFP_KERNEL);
+		if (!kmbuf)
+			break;
+		sp = (struct sparc_stackf32 __user *)
+			(current_thread_info()->kregs->u_regs[UREG_FP] & 0xffffffffUL);
+		if (get_user(arg5, &sp->xxargs[0])) {
+			rval = -EFAULT;
+			kfree(kmbuf);
+			break;
+		}
+		set_fs(KERNEL_DS);
+		rval = sys_msgrcv((int)arg1, (struct msgbuf __user *) kmbuf,
+				  (size_t)arg3,
+				  (long)arg4, (int)arg5);
+		set_fs(old_fs);
+		if (!rval)
+			rval = sunos_msgbuf_put((struct msgbuf32 __user *)(unsigned long)arg2,
+						kmbuf, arg3);
+		kfree(kmbuf);
+		break;
+	case 3:
+		rval = -EFAULT;
+		kmbuf = (struct msgbuf *)kmalloc(sizeof(struct msgbuf) + arg3,
+						 GFP_KERNEL);
+		if (!kmbuf || sunos_msgbuf_get((struct msgbuf32 __user *)(unsigned long)arg2,
+					       kmbuf, arg3))
+			break;
+		set_fs(KERNEL_DS);
+		rval = sys_msgsnd((int)arg1, (struct msgbuf __user *) kmbuf,
+				  (size_t)arg3, (int)arg4);
+		set_fs(old_fs);
+		kfree(kmbuf);
+		break;
+	default:
+		rval = -EINVAL;
+		break;
+	}
+	return rval;
+}
+
+struct shmid_ds32 {
+        struct ipc_perm32       shm_perm;
+        int                     shm_segsz;
+        compat_time_t         shm_atime;
+        compat_time_t         shm_dtime;
+        compat_time_t         shm_ctime;
+        compat_ipc_pid_t    shm_cpid; 
+        compat_ipc_pid_t    shm_lpid; 
+        unsigned short          shm_nattch;
+};
+                                                        
+static inline int sunos_shmid_get(struct shmid_ds32 __user *user,
+				  struct shmid_ds *kern)
+{
+	if (get_user(kern->shm_perm.key, &user->shm_perm.key)		||
+	    __get_user(kern->shm_perm.uid, &user->shm_perm.uid)		||
+	    __get_user(kern->shm_perm.gid, &user->shm_perm.gid)		||
+	    __get_user(kern->shm_perm.cuid, &user->shm_perm.cuid)	||
+	    __get_user(kern->shm_perm.cgid, &user->shm_perm.cgid)	||
+	    __get_user(kern->shm_segsz, &user->shm_segsz)		||
+	    __get_user(kern->shm_atime, &user->shm_atime)		||
+	    __get_user(kern->shm_dtime, &user->shm_dtime)		||
+	    __get_user(kern->shm_ctime, &user->shm_ctime)		||
+	    __get_user(kern->shm_cpid, &user->shm_cpid)			||
+	    __get_user(kern->shm_lpid, &user->shm_lpid)			||
+	    __get_user(kern->shm_nattch, &user->shm_nattch))
+		return -EFAULT;
+	return 0;
+}
+
+static inline int sunos_shmid_put(struct shmid_ds32 __user *user,
+				  struct shmid_ds *kern)
+{
+	if (put_user(kern->shm_perm.key, &user->shm_perm.key)		||
+	    __put_user(kern->shm_perm.uid, &user->shm_perm.uid)		||
+	    __put_user(kern->shm_perm.gid, &user->shm_perm.gid)		||
+	    __put_user(kern->shm_perm.cuid, &user->shm_perm.cuid)	||
+	    __put_user(kern->shm_perm.cgid, &user->shm_perm.cgid)	||
+	    __put_user(kern->shm_segsz, &user->shm_segsz)		||
+	    __put_user(kern->shm_atime, &user->shm_atime)		||
+	    __put_user(kern->shm_dtime, &user->shm_dtime)		||
+	    __put_user(kern->shm_ctime, &user->shm_ctime)		||
+	    __put_user(kern->shm_cpid, &user->shm_cpid)			||
+	    __put_user(kern->shm_lpid, &user->shm_lpid)			||
+	    __put_user(kern->shm_nattch, &user->shm_nattch))
+		return -EFAULT;
+	return 0;
+}
+
+asmlinkage int sunos_shmsys(int op, u32 arg1, u32 arg2, u32 arg3)
+{
+	struct shmid_ds ksds;
+	unsigned long raddr;
+	mm_segment_t old_fs = get_fs();
+	int rval;
+
+	switch(op) {
+	case 0:
+		/* do_shmat(): attach a shared memory area */
+		rval = do_shmat((int)arg1,(char __user *)(unsigned long)arg2,(int)arg3,&raddr);
+		if (!rval)
+			rval = (int) raddr;
+		break;
+	case 1:
+		/* sys_shmctl(): modify shared memory area attr. */
+		if (!sunos_shmid_get((struct shmid_ds32 __user *)(unsigned long)arg3, &ksds)) {
+			set_fs(KERNEL_DS);
+			rval = sys_shmctl((int) arg1,(int) arg2,
+					  (struct shmid_ds __user *) &ksds);
+			set_fs(old_fs);
+			if (!rval)
+				rval = sunos_shmid_put((struct shmid_ds32 __user *)(unsigned long)arg3,
+						       &ksds);
+		} else
+			rval = -EFAULT;
+		break;
+	case 2:
+		/* sys_shmdt(): detach a shared memory area */
+		rval = sys_shmdt((char __user *)(unsigned long)arg1);
+		break;
+	case 3:
+		/* sys_shmget(): get a shared memory area */
+		rval = sys_shmget((key_t)arg1,(int)arg2,(int)arg3);
+		break;
+	default:
+		rval = -EINVAL;
+		break;
+	};
+	return rval;
+}
+
+extern asmlinkage long sparc32_open(const char __user * filename, int flags, int mode);
+
+asmlinkage int sunos_open(u32 fname, int flags, int mode)
+{
+	const char __user *filename = compat_ptr(fname);
+
+	return sparc32_open(filename, flags, mode);
+}
+
+#define SUNOS_EWOULDBLOCK 35
+
+/* see the sunos man page read(2v) for an explanation
+   of this garbage. We use O_NDELAY to mark
+   file descriptors that have been set non-blocking 
+   using 4.2BSD style calls. (tridge) */
+
+static inline int check_nonblock(int ret, int fd)
+{
+	if (ret == -EAGAIN) {
+		struct file * file = fget(fd);
+		if (file) {
+			if (file->f_flags & O_NDELAY)
+				ret = -SUNOS_EWOULDBLOCK;
+			fput(file);
+		}
+	}
+	return ret;
+}
+
+asmlinkage int sunos_read(unsigned int fd, char __user *buf, u32 count)
+{
+	int ret;
+
+	ret = check_nonblock(sys_read(fd, buf, count), fd);
+	return ret;
+}
+
+asmlinkage int sunos_readv(u32 fd, void __user *vector, s32 count)
+{
+	int ret;
+
+	ret = check_nonblock(compat_sys_readv(fd, vector, count), fd);
+	return ret;
+}
+
+asmlinkage int sunos_write(unsigned int fd, char __user *buf, u32 count)
+{
+	int ret;
+
+	ret = check_nonblock(sys_write(fd, buf, count), fd);
+	return ret;
+}
+
+asmlinkage int sunos_writev(u32 fd, void __user *vector, s32 count)
+{
+	int ret;
+
+	ret = check_nonblock(compat_sys_writev(fd, vector, count), fd);
+	return ret;
+}
+
+asmlinkage int sunos_recv(u32 __fd, void __user *ubuf, int size, unsigned flags)
+{
+	int ret, fd = (int) __fd;
+
+	ret = check_nonblock(sys_recv(fd, ubuf, size, flags), fd);
+	return ret;
+}
+
+asmlinkage int sunos_send(u32 __fd, void __user *buff, int len, unsigned flags)
+{
+	int ret, fd = (int) __fd;
+
+	ret = check_nonblock(sys_send(fd, buff, len, flags), fd);
+	return ret;
+}
+
+asmlinkage int sunos_accept(u32 __fd, struct sockaddr __user *sa, int __user *addrlen)
+{
+	int ret, fd = (int) __fd;
+
+	while (1) {
+		ret = check_nonblock(sys_accept(fd, sa, addrlen), fd);
+		if (ret != -ENETUNREACH && ret != -EHOSTUNREACH)
+			break;
+	}
+	return ret;
+}
+
+#define SUNOS_SV_INTERRUPT 2
+
+asmlinkage int sunos_sigaction (int sig,
+				struct old_sigaction32 __user *act,
+				struct old_sigaction32 __user *oact)
+{
+	struct k_sigaction new_ka, old_ka;
+	int ret;
+
+	if (act) {
+		compat_old_sigset_t mask;
+		u32 u_handler;
+
+		if (get_user(u_handler, &act->sa_handler) ||
+		    __get_user(new_ka.sa.sa_flags, &act->sa_flags))
+			return -EFAULT;
+		new_ka.sa.sa_handler = compat_ptr(u_handler);
+		__get_user(mask, &act->sa_mask);
+		new_ka.sa.sa_restorer = NULL;
+		new_ka.ka_restorer = NULL;
+		siginitset(&new_ka.sa.sa_mask, mask);
+		new_ka.sa.sa_flags ^= SUNOS_SV_INTERRUPT;
+	}
+
+	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+	if (!ret && oact) {
+		old_ka.sa.sa_flags ^= SUNOS_SV_INTERRUPT;
+		if (put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler) ||
+		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags))
+			return -EFAULT;
+		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+	}
+
+	return ret;
+}
+
+asmlinkage int sunos_setsockopt(u32 __fd, u32 __level, u32 __optname,
+				char __user *optval, u32 __optlen)
+{
+	int fd = (int) __fd;
+	int level = (int) __level;
+	int optname = (int) __optname;
+	int optlen = (int) __optlen;
+	int tr_opt = optname;
+	int ret;
+
+	if (level == SOL_IP) {
+		/* Multicast socketopts (ttl, membership) */
+		if (tr_opt >=2 && tr_opt <= 6)
+			tr_opt += 30;
+	}
+	ret = sys_setsockopt(fd, level, tr_opt,
+			     optval, optlen);
+	return ret;
+}
+
+asmlinkage int sunos_getsockopt(u32 __fd, u32 __level, u32 __optname,
+				char __user *optval, int __user *optlen)
+{
+	int fd = (int) __fd;
+	int level = (int) __level;
+	int optname = (int) __optname;
+	int tr_opt = optname;
+	int ret;
+
+	if (level == SOL_IP) {
+		/* Multicast socketopts (ttl, membership) */
+		if (tr_opt >=2 && tr_opt <= 6)
+			tr_opt += 30;
+	}
+	ret = compat_sys_getsockopt(fd, level, tr_opt,
+				    optval, optlen);
+	return ret;
+}
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
new file mode 100644
index 0000000..48170f7
--- /dev/null
+++ b/arch/sparc64/kernel/systbls.S
@@ -0,0 +1,251 @@
+/* $Id: systbls.S,v 1.81 2002/02/08 03:57:14 davem Exp $
+ * systbls.S: System call entry point tables for OS compatibility.
+ *            The native Linux system call table lives here also.
+ *
+ * Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ *
+ * Based upon preliminary work which is:
+ *
+ * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
+ */
+
+#include <linux/config.h>
+
+	.text
+	.align	4
+
+#ifdef CONFIG_COMPAT
+	/* First, the 32-bit Linux native syscall table. */
+
+	.globl sys_call_table32
+sys_call_table32:
+/*0*/	.word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write
+/*5*/	.word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link
+/*10*/  .word sys_unlink, sunos_execv, sys_chdir, sys32_chown16, sys32_mknod
+/*15*/	.word sys_chmod, sys32_lchown16, sparc_brk, sys32_perfctr, sys32_lseek
+/*20*/	.word sys_getpid, sys_capget, sys_capset, sys32_setuid16, sys32_getuid16
+/*25*/	.word compat_sys_time, sys_ptrace, sys_alarm, sys32_sigaltstack, sys32_pause
+/*30*/	.word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice
+	.word sys_chown, sys_sync, sys32_kill, compat_sys_newstat, sys32_sendfile
+/*40*/	.word compat_sys_newlstat, sys_dup, sys_pipe, compat_sys_times, sys_getuid
+	.word sys32_umount, sys32_setgid16, sys32_getgid16, sys32_signal, sys32_geteuid16
+/*50*/	.word sys32_getegid16, sys_acct, sys_nis_syscall, sys_getgid, compat_sys_ioctl
+	.word sys32_reboot, sys32_mmap2, sys_symlink, sys32_readlink, sys32_execve
+/*60*/	.word sys32_umask, sys_chroot, compat_sys_newfstat, sys_fstat64, sys_getpagesize
+	.word sys32_msync, sys_vfork, sys32_pread64, sys32_pwrite64, sys_geteuid
+/*70*/	.word sys_getegid, sys_mmap, sys_setreuid, sys_munmap, sys_mprotect
+	.word sys_madvise, sys_vhangup, sys32_truncate64, sys_mincore, sys32_getgroups16
+/*80*/	.word sys32_setgroups16, sys_getpgrp, sys32_setgroups, sys32_setitimer, sys32_ftruncate64
+	.word sys32_swapon, sys32_getitimer, sys_setuid, sys32_sethostname, sys_setgid
+/*90*/	.word sys_dup2, sys_setfsuid, compat_sys_fcntl, sys32_select, sys_setfsgid
+	.word sys_fsync, sys32_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+/*100*/ .word sys32_getpriority, sys32_rt_sigreturn, sys32_rt_sigaction, sys32_rt_sigprocmask, sys32_rt_sigpending
+	.word compat_sys_rt_sigtimedwait, sys32_rt_sigqueueinfo, sys32_rt_sigsuspend, sys_setresuid, sys_getresuid
+/*110*/	.word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
+	.word sys32_getgroups, sys32_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd
+/*120*/	.word compat_sys_readv, compat_sys_writev, sys32_settimeofday, sys32_fchown16, sys_fchmod
+	.word sys_nis_syscall, sys32_setreuid16, sys32_setregid16, sys_rename, sys_truncate
+/*130*/	.word sys_ftruncate, sys_flock, sys_lstat64, sys_nis_syscall, sys_nis_syscall
+	.word sys_nis_syscall, sys32_mkdir, sys_rmdir, sys32_utimes, sys_stat64
+/*140*/	.word sys32_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit
+	.word compat_sys_setrlimit, sys_pivot_root, sys32_prctl, sys_pciconfig_read, sys_pciconfig_write
+/*150*/	.word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
+	.word compat_sys_fcntl64, sys_ni_syscall, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
+/*160*/	.word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall
+	.word sys_quotactl, sys_set_tid_address, compat_sys_mount, sys_ustat, sys32_setxattr
+/*170*/	.word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents
+	.word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr
+/*180*/	.word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall
+	.word sys32_setpgid, sys32_fremovexattr, sys32_tkill, sys32_exit_group, sparc64_newuname
+/*190*/	.word sys32_init_module, sparc64_personality, sys_remap_file_pages, sys32_epoll_create, sys32_epoll_ctl
+	.word sys32_epoll_wait, sys_nis_syscall, sys_getppid, sys32_sigaction, sys_sgetmask
+/*200*/	.word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir
+	.word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64
+/*210*/	.word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, sys32_sysinfo
+	.word sys32_ipc, sys32_sigreturn, sys_clone, sys_nis_syscall, sys32_adjtimex
+/*220*/	.word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid
+	.word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys32_setfsuid16, sys32_setfsgid16
+/*230*/	.word sys32_select, compat_sys_time, sys_nis_syscall, compat_sys_stime, compat_sys_statfs64
+	.word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall
+/*240*/	.word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler
+	.word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep
+/*250*/	.word sys32_mremap, sys32_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl
+	.word sys_ni_syscall, sys32_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
+/*260*/	.word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
+	.word sys_timer_delete, sys32_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
+/*270*/	.word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink
+	.word sys_mq_timedsend, sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
+/*280*/	.word sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl
+
+#endif /* CONFIG_COMPAT */
+
+	/* Now the 64-bit native Linux syscall table. */
+
+	.align	4
+	.globl sys_call_table64, sys_call_table
+sys_call_table64:
+sys_call_table:
+/*0*/	.word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
+/*5*/	.word sys_open, sys_close, sys_wait4, sys_creat, sys_link
+/*10*/  .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod
+/*15*/	.word sys_chmod, sys_lchown, sparc_brk, sys_perfctr, sys_lseek
+/*20*/	.word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
+/*25*/	.word sys_nis_syscall, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
+/*30*/	.word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice
+	.word sys_nis_syscall, sys_sync, sys_kill, sys_newstat, sys_sendfile64
+/*40*/	.word sys_newlstat, sys_dup, sys_pipe, sys_times, sys_nis_syscall
+	.word sys_umount, sys_setgid, sys_getgid, sys_signal, sys_geteuid
+/*50*/	.word sys_getegid, sys_acct, sys_memory_ordering, sys_nis_syscall, sys_ioctl
+	.word sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys_execve
+/*60*/	.word sys_umask, sys_chroot, sys_newfstat, sys_nis_syscall, sys_getpagesize
+	.word sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_nis_syscall
+/*70*/	.word sys_nis_syscall, sys_mmap, sys_nis_syscall, sys64_munmap, sys_mprotect
+	.word sys_madvise, sys_vhangup, sys_nis_syscall, sys_mincore, sys_getgroups
+/*80*/	.word sys_setgroups, sys_getpgrp, sys_nis_syscall, sys_setitimer, sys_nis_syscall
+	.word sys_swapon, sys_getitimer, sys_nis_syscall, sys_sethostname, sys_nis_syscall
+/*90*/	.word sys_dup2, sys_nis_syscall, sys_fcntl, sys_select, sys_nis_syscall
+	.word sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept
+/*100*/	.word sys_getpriority, sys_rt_sigreturn, sys_rt_sigaction, sys_rt_sigprocmask, sys_rt_sigpending
+	.word sys_rt_sigtimedwait, sys_rt_sigqueueinfo, sys_rt_sigsuspend, sys_setresuid, sys_getresuid
+/*110*/	.word sys_setresgid, sys_getresgid, sys_nis_syscall, sys_recvmsg, sys_sendmsg
+	.word sys_nis_syscall, sys_gettimeofday, sys_getrusage, sys_getsockopt, sys_getcwd
+/*120*/	.word sys_readv, sys_writev, sys_settimeofday, sys_fchown, sys_fchmod
+	.word sys_recvfrom, sys_setreuid, sys_setregid, sys_rename, sys_truncate
+/*130*/	.word sys_ftruncate, sys_flock, sys_nis_syscall, sys_sendto, sys_shutdown
+	.word sys_socketpair, sys_mkdir, sys_rmdir, sys_utimes, sys_nis_syscall
+/*140*/	.word sys_sendfile64, sys_getpeername, sys_futex, sys_gettid, sys_getrlimit
+	.word sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
+/*150*/	.word sys_getsockname, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
+	.word sys_nis_syscall, sys_ni_syscall, sys_statfs, sys_fstatfs, sys_oldumount
+/*160*/	.word sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_utrap_install
+	.word sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr
+/*170*/	.word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
+	.word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
+/*180*/	.word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_ni_syscall
+	.word sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sparc64_newuname
+/*190*/	.word sys_init_module, sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl
+	.word sys_epoll_wait, sys_nis_syscall, sys_getppid, sys_nis_syscall, sys_sgetmask
+/*200*/	.word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall
+	.word sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64
+/*210*/	.word sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo
+	.word sys_ipc, sys_nis_syscall, sys_clone, sys_nis_syscall, sys_adjtimex
+/*220*/	.word sys_nis_syscall, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
+	.word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid
+/*230*/	.word sys_select, sys_nis_syscall, sys_nis_syscall, sys_stime, sys_statfs64
+	.word sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
+/*240*/	.word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
+	.word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
+/*250*/	.word sys64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
+	.word sys_ni_syscall, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
+/*260*/	.word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
+	.word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
+/*270*/	.word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
+	.word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
+/*280*/	.word sys_nis_syscall, sys_add_key, sys_request_key, sys_keyctl
+
+#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
+    defined(CONFIG_SOLARIS_EMUL_MODULE)
+	/* Now the 32-bit SunOS syscall table. */
+
+	.align 4
+	.globl sunos_sys_table
+sunos_sys_table:
+/*0*/	.word sunos_indir, sys32_exit, sys_fork
+	.word sunos_read, sunos_write, sunos_open
+	.word sys_close, sunos_wait4, sys_creat
+	.word sys_link, sys_unlink, sunos_execv
+	.word sys_chdir, sunos_nosys, sys32_mknod
+	.word sys_chmod, sys32_lchown16, sunos_brk
+	.word sunos_nosys, sys32_lseek, sunos_getpid
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_getuid, sunos_nosys, sys_ptrace
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sys_access, sunos_nosys, sunos_nosys
+	.word sys_sync, sys_kill, compat_sys_newstat
+	.word sunos_nosys, compat_sys_newlstat, sys_dup
+	.word sys_pipe, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_getgid
+	.word sunos_nosys, sunos_nosys
+/*50*/	.word sunos_nosys, sys_acct, sunos_nosys
+	.word sunos_mctl, sunos_ioctl, sys_reboot
+	.word sunos_nosys, sys_symlink, sys_readlink
+	.word sys32_execve, sys_umask, sys_chroot
+	.word compat_sys_newfstat, sunos_nosys, sys_getpagesize
+	.word sys_msync, sys_vfork, sunos_nosys
+	.word sunos_nosys, sunos_sbrk, sunos_sstk
+	.word sunos_mmap, sunos_vadvise, sys_munmap
+	.word sys_mprotect, sys_madvise, sys_vhangup
+	.word sunos_nosys, sys_mincore, sys32_getgroups16
+	.word sys32_setgroups16, sys_getpgrp, sunos_setpgrp
+	.word compat_sys_setitimer, sunos_nosys, sys_swapon
+	.word compat_sys_getitimer, sys_gethostname, sys_sethostname
+	.word sunos_getdtablesize, sys_dup2, sunos_nop
+	.word compat_sys_fcntl, sunos_select, sunos_nop
+	.word sys_fsync, sys32_setpriority, sys32_socket
+	.word sys32_connect, sunos_accept
+/*100*/	.word sys_getpriority, sunos_send, sunos_recv
+	.word sunos_nosys, sys32_bind, sunos_setsockopt
+	.word sys32_listen, sunos_nosys, sunos_sigaction
+	.word sunos_sigblock, sunos_sigsetmask, sys_sigpause
+	.word sys32_sigstack, sys32_recvmsg, sys32_sendmsg
+	.word sunos_nosys, sys32_gettimeofday, compat_sys_getrusage
+	.word sunos_getsockopt, sunos_nosys, sunos_readv
+	.word sunos_writev, sys32_settimeofday, sys32_fchown16
+	.word sys_fchmod, sys32_recvfrom, sys32_setreuid16
+	.word sys32_setregid16, sys_rename, sys_truncate
+	.word sys_ftruncate, sys_flock, sunos_nosys
+	.word sys32_sendto, sys32_shutdown, sys32_socketpair
+	.word sys_mkdir, sys_rmdir, sys32_utimes
+	.word sys32_sigreturn, sunos_nosys, sys32_getpeername
+	.word sunos_gethostid, sunos_nosys, compat_sys_getrlimit
+	.word compat_sys_setrlimit, sunos_killpg, sunos_nosys
+	.word sunos_nosys, sunos_nosys
+/*150*/	.word sys32_getsockname, sunos_nosys, sunos_nosys
+	.word sys_poll, sunos_nosys, sunos_nosys
+	.word sunos_getdirentries, compat_sys_statfs, compat_sys_fstatfs
+	.word sys_oldumount, sunos_nosys, sunos_nosys
+	.word sys_getdomainname, sys_setdomainname
+	.word sunos_nosys, sys_quotactl, sunos_nosys
+	.word sunos_nosys, sys_ustat, sunos_semsys
+	.word sunos_nosys, sunos_shmsys, sunos_audit
+	.word sunos_nosys, sunos_getdents, sys_setsid
+	.word sys_fchdir, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, compat_sys_sigpending, sunos_nosys
+	.word sys_setpgid, sunos_pathconf, sunos_fpathconf
+	.word sunos_sysconf, sunos_uname, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+/*200*/	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys
+/*250*/	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys, sunos_nosys, sunos_nosys
+	.word sunos_nosys
+#endif
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
new file mode 100644
index 0000000..6a717d4
--- /dev/null
+++ b/arch/sparc64/kernel/time.c
@@ -0,0 +1,1195 @@
+/* $Id: time.c,v 1.42 2002/01/23 14:33:55 davem Exp $
+ * time.c: UltraSparc timer and TOD clock support.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1998 Eddie C. Dost   (ecd@skynet.be)
+ *
+ * Based largely on code which is:
+ *
+ * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/mc146818rtc.h>
+#include <linux/delay.h>
+#include <linux/profile.h>
+#include <linux/bcd.h>
+#include <linux/jiffies.h>
+#include <linux/cpufreq.h>
+#include <linux/percpu.h>
+#include <linux/profile.h>
+
+#include <asm/oplib.h>
+#include <asm/mostek.h>
+#include <asm/timer.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/sbus.h>
+#include <asm/fhc.h>
+#include <asm/pbm.h>
+#include <asm/ebus.h>
+#include <asm/isa.h>
+#include <asm/starfire.h>
+#include <asm/smp.h>
+#include <asm/sections.h>
+#include <asm/cpudata.h>
+
+DEFINE_SPINLOCK(mostek_lock);
+DEFINE_SPINLOCK(rtc_lock);
+unsigned long mstk48t02_regs = 0UL;
+#ifdef CONFIG_PCI
+unsigned long ds1287_regs = 0UL;
+#endif
+
+extern unsigned long wall_jiffies;
+
+u64 jiffies_64 = INITIAL_JIFFIES;
+
+EXPORT_SYMBOL(jiffies_64);
+
+static unsigned long mstk48t08_regs = 0UL;
+static unsigned long mstk48t59_regs = 0UL;
+
+static int set_rtc_mmss(unsigned long);
+
+static __init unsigned long dummy_get_tick(void)
+{
+	return 0;
+}
+
+static __initdata struct sparc64_tick_ops dummy_tick_ops = {
+	.get_tick	= dummy_get_tick,
+};
+
+struct sparc64_tick_ops *tick_ops = &dummy_tick_ops;
+
+#define TICK_PRIV_BIT	(1UL << 63)
+
+#ifdef CONFIG_SMP
+unsigned long profile_pc(struct pt_regs *regs)
+{
+	unsigned long pc = instruction_pointer(regs);
+
+	if (in_lock_functions(pc))
+		return regs->u_regs[UREG_RETPC];
+	return pc;
+}
+EXPORT_SYMBOL(profile_pc);
+#endif
+
+static void tick_disable_protection(void)
+{
+	/* Set things up so user can access tick register for profiling
+	 * purposes.  Also workaround BB_ERRATA_1 by doing a dummy
+	 * read back of %tick after writing it.
+	 */
+	__asm__ __volatile__(
+	"	ba,pt	%%xcc, 1f\n"
+	"	 nop\n"
+	"	.align	64\n"
+	"1:	rd	%%tick, %%g2\n"
+	"	add	%%g2, 6, %%g2\n"
+	"	andn	%%g2, %0, %%g2\n"
+	"	wrpr	%%g2, 0, %%tick\n"
+	"	rdpr	%%tick, %%g0"
+	: /* no outputs */
+	: "r" (TICK_PRIV_BIT)
+	: "g2");
+}
+
+static void tick_init_tick(unsigned long offset)
+{
+	tick_disable_protection();
+
+	__asm__ __volatile__(
+	"	rd	%%tick, %%g1\n"
+	"	andn	%%g1, %1, %%g1\n"
+	"	ba,pt	%%xcc, 1f\n"
+	"	 add	%%g1, %0, %%g1\n"
+	"	.align	64\n"
+	"1:	wr	%%g1, 0x0, %%tick_cmpr\n"
+	"	rd	%%tick_cmpr, %%g0"
+	: /* no outputs */
+	: "r" (offset), "r" (TICK_PRIV_BIT)
+	: "g1");
+}
+
+static unsigned long tick_get_tick(void)
+{
+	unsigned long ret;
+
+	__asm__ __volatile__("rd	%%tick, %0\n\t"
+			     "mov	%0, %0"
+			     : "=r" (ret));
+
+	return ret & ~TICK_PRIV_BIT;
+}
+
+static unsigned long tick_get_compare(void)
+{
+	unsigned long ret;
+
+	__asm__ __volatile__("rd	%%tick_cmpr, %0\n\t"
+			     "mov	%0, %0"
+			     : "=r" (ret));
+
+	return ret;
+}
+
+static unsigned long tick_add_compare(unsigned long adj)
+{
+	unsigned long new_compare;
+
+	/* Workaround for Spitfire Errata (#54 I think??), I discovered
+	 * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
+	 * number 103640.
+	 *
+	 * On Blackbird writes to %tick_cmpr can fail, the
+	 * workaround seems to be to execute the wr instruction
+	 * at the start of an I-cache line, and perform a dummy
+	 * read back from %tick_cmpr right after writing to it. -DaveM
+	 */
+	__asm__ __volatile__("rd	%%tick_cmpr, %0\n\t"
+			     "ba,pt	%%xcc, 1f\n\t"
+			     " add	%0, %1, %0\n\t"
+			     ".align	64\n"
+			     "1:\n\t"
+			     "wr	%0, 0, %%tick_cmpr\n\t"
+			     "rd	%%tick_cmpr, %%g0"
+			     : "=&r" (new_compare)
+			     : "r" (adj));
+
+	return new_compare;
+}
+
+static unsigned long tick_add_tick(unsigned long adj, unsigned long offset)
+{
+	unsigned long new_tick, tmp;
+
+	/* Also need to handle Blackbird bug here too. */
+	__asm__ __volatile__("rd	%%tick, %0\n\t"
+			     "add	%0, %2, %0\n\t"
+			     "wrpr	%0, 0, %%tick\n\t"
+			     "andn	%0, %4, %1\n\t"
+			     "ba,pt	%%xcc, 1f\n\t"
+			     " add	%1, %3, %1\n\t"
+			     ".align	64\n"
+			     "1:\n\t"
+			     "wr	%1, 0, %%tick_cmpr\n\t"
+			     "rd	%%tick_cmpr, %%g0"
+			     : "=&r" (new_tick), "=&r" (tmp)
+			     : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
+
+	return new_tick;
+}
+
+static struct sparc64_tick_ops tick_operations = {
+	.init_tick	=	tick_init_tick,
+	.get_tick	=	tick_get_tick,
+	.get_compare	=	tick_get_compare,
+	.add_tick	=	tick_add_tick,
+	.add_compare	=	tick_add_compare,
+	.softint_mask	=	1UL << 0,
+};
+
+static void stick_init_tick(unsigned long offset)
+{
+	tick_disable_protection();
+
+	/* Let the user get at STICK too. */
+	__asm__ __volatile__(
+	"	rd	%%asr24, %%g2\n"
+	"	andn	%%g2, %0, %%g2\n"
+	"	wr	%%g2, 0, %%asr24"
+	: /* no outputs */
+	: "r" (TICK_PRIV_BIT)
+	: "g1", "g2");
+
+	__asm__ __volatile__(
+	"	rd	%%asr24, %%g1\n"
+	"	andn	%%g1, %1, %%g1\n"
+	"	add	%%g1, %0, %%g1\n"
+	"	wr	%%g1, 0x0, %%asr25"
+	: /* no outputs */
+	: "r" (offset), "r" (TICK_PRIV_BIT)
+	: "g1");
+}
+
+static unsigned long stick_get_tick(void)
+{
+	unsigned long ret;
+
+	__asm__ __volatile__("rd	%%asr24, %0"
+			     : "=r" (ret));
+
+	return ret & ~TICK_PRIV_BIT;
+}
+
+static unsigned long stick_get_compare(void)
+{
+	unsigned long ret;
+
+	__asm__ __volatile__("rd	%%asr25, %0"
+			     : "=r" (ret));
+
+	return ret;
+}
+
+static unsigned long stick_add_tick(unsigned long adj, unsigned long offset)
+{
+	unsigned long new_tick, tmp;
+
+	__asm__ __volatile__("rd	%%asr24, %0\n\t"
+			     "add	%0, %2, %0\n\t"
+			     "wr	%0, 0, %%asr24\n\t"
+			     "andn	%0, %4, %1\n\t"
+			     "add	%1, %3, %1\n\t"
+			     "wr	%1, 0, %%asr25"
+			     : "=&r" (new_tick), "=&r" (tmp)
+			     : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
+
+	return new_tick;
+}
+
+static unsigned long stick_add_compare(unsigned long adj)
+{
+	unsigned long new_compare;
+
+	__asm__ __volatile__("rd	%%asr25, %0\n\t"
+			     "add	%0, %1, %0\n\t"
+			     "wr	%0, 0, %%asr25"
+			     : "=&r" (new_compare)
+			     : "r" (adj));
+
+	return new_compare;
+}
+
+static struct sparc64_tick_ops stick_operations = {
+	.init_tick	=	stick_init_tick,
+	.get_tick	=	stick_get_tick,
+	.get_compare	=	stick_get_compare,
+	.add_tick	=	stick_add_tick,
+	.add_compare	=	stick_add_compare,
+	.softint_mask	=	1UL << 16,
+};
+
+/* On Hummingbird the STICK/STICK_CMPR register is implemented
+ * in I/O space.  There are two 64-bit registers each, the
+ * first holds the low 32-bits of the value and the second holds
+ * the high 32-bits.
+ *
+ * Since STICK is constantly updating, we have to access it carefully.
+ *
+ * The sequence we use to read is:
+ * 1) read low
+ * 2) read high
+ * 3) read low again, if it rolled over increment high by 1
+ *
+ * Writing STICK safely is also tricky:
+ * 1) write low to zero
+ * 2) write high
+ * 3) write low
+ */
+#define HBIRD_STICKCMP_ADDR	0x1fe0000f060UL
+#define HBIRD_STICK_ADDR	0x1fe0000f070UL
+
+static unsigned long __hbird_read_stick(void)
+{
+	unsigned long ret, tmp1, tmp2, tmp3;
+	unsigned long addr = HBIRD_STICK_ADDR;
+
+	__asm__ __volatile__("ldxa	[%1] %5, %2\n\t"
+			     "add	%1, 0x8, %1\n\t"
+			     "ldxa	[%1] %5, %3\n\t"
+			     "sub	%1, 0x8, %1\n\t"
+			     "ldxa	[%1] %5, %4\n\t"
+			     "cmp	%4, %2\n\t"
+			     "blu,a,pn	%%xcc, 1f\n\t"
+			     " add	%3, 1, %3\n"
+			     "1:\n\t"
+			     "sllx	%3, 32, %3\n\t"
+			     "or	%3, %4, %0\n\t"
+			     : "=&r" (ret), "=&r" (addr),
+			       "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3)
+			     : "i" (ASI_PHYS_BYPASS_EC_E), "1" (addr));
+
+	return ret;
+}
+
+static unsigned long __hbird_read_compare(void)
+{
+	unsigned long low, high;
+	unsigned long addr = HBIRD_STICKCMP_ADDR;
+
+	__asm__ __volatile__("ldxa	[%2] %3, %0\n\t"
+			     "add	%2, 0x8, %2\n\t"
+			     "ldxa	[%2] %3, %1"
+			     : "=&r" (low), "=&r" (high), "=&r" (addr)
+			     : "i" (ASI_PHYS_BYPASS_EC_E), "2" (addr));
+
+	return (high << 32UL) | low;
+}
+
+static void __hbird_write_stick(unsigned long val)
+{
+	unsigned long low = (val & 0xffffffffUL);
+	unsigned long high = (val >> 32UL);
+	unsigned long addr = HBIRD_STICK_ADDR;
+
+	__asm__ __volatile__("stxa	%%g0, [%0] %4\n\t"
+			     "add	%0, 0x8, %0\n\t"
+			     "stxa	%3, [%0] %4\n\t"
+			     "sub	%0, 0x8, %0\n\t"
+			     "stxa	%2, [%0] %4"
+			     : "=&r" (addr)
+			     : "0" (addr), "r" (low), "r" (high),
+			       "i" (ASI_PHYS_BYPASS_EC_E));
+}
+
+static void __hbird_write_compare(unsigned long val)
+{
+	unsigned long low = (val & 0xffffffffUL);
+	unsigned long high = (val >> 32UL);
+	unsigned long addr = HBIRD_STICKCMP_ADDR + 0x8UL;
+
+	__asm__ __volatile__("stxa	%3, [%0] %4\n\t"
+			     "sub	%0, 0x8, %0\n\t"
+			     "stxa	%2, [%0] %4"
+			     : "=&r" (addr)
+			     : "0" (addr), "r" (low), "r" (high),
+			       "i" (ASI_PHYS_BYPASS_EC_E));
+}
+
+static void hbtick_init_tick(unsigned long offset)
+{
+	unsigned long val;
+
+	tick_disable_protection();
+
+	/* XXX This seems to be necessary to 'jumpstart' Hummingbird
+	 * XXX into actually sending STICK interrupts.  I think because
+	 * XXX of how we store %tick_cmpr in head.S this somehow resets the
+	 * XXX {TICK + STICK} interrupt mux.  -DaveM
+	 */
+	__hbird_write_stick(__hbird_read_stick());
+
+	val = __hbird_read_stick() & ~TICK_PRIV_BIT;
+	__hbird_write_compare(val + offset);
+}
+
+static unsigned long hbtick_get_tick(void)
+{
+	return __hbird_read_stick() & ~TICK_PRIV_BIT;
+}
+
+static unsigned long hbtick_get_compare(void)
+{
+	return __hbird_read_compare();
+}
+
+static unsigned long hbtick_add_tick(unsigned long adj, unsigned long offset)
+{
+	unsigned long val;
+
+	val = __hbird_read_stick() + adj;
+	__hbird_write_stick(val);
+
+	val &= ~TICK_PRIV_BIT;
+	__hbird_write_compare(val + offset);
+
+	return val;
+}
+
+static unsigned long hbtick_add_compare(unsigned long adj)
+{
+	unsigned long val = __hbird_read_compare() + adj;
+
+	val &= ~TICK_PRIV_BIT;
+	__hbird_write_compare(val);
+
+	return val;
+}
+
+static struct sparc64_tick_ops hbtick_operations = {
+	.init_tick	=	hbtick_init_tick,
+	.get_tick	=	hbtick_get_tick,
+	.get_compare	=	hbtick_get_compare,
+	.add_tick	=	hbtick_add_tick,
+	.add_compare	=	hbtick_add_compare,
+	.softint_mask	=	1UL << 0,
+};
+
+/* timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ *
+ * NOTE: On SUN5 systems the ticker interrupt comes in using 2
+ *       interrupts, one at level14 and one with softint bit 0.
+ */
+unsigned long timer_tick_offset;
+unsigned long timer_tick_compare;
+
+static unsigned long timer_ticks_per_nsec_quotient;
+
+#define TICK_SIZE (tick_nsec / 1000)
+
+static inline void timer_check_rtc(void)
+{
+	/* last time the cmos clock got updated */
+	static long last_rtc_update;
+
+	/* Determine when to update the Mostek clock. */
+	if ((time_status & STA_UNSYNC) == 0 &&
+	    xtime.tv_sec > last_rtc_update + 660 &&
+	    (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
+	    (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
+		if (set_rtc_mmss(xtime.tv_sec) == 0)
+			last_rtc_update = xtime.tv_sec;
+		else
+			last_rtc_update = xtime.tv_sec - 600;
+			/* do it again in 60 s */
+	}
+}
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+	unsigned long ticks, pstate;
+
+	write_seqlock(&xtime_lock);
+
+	do {
+#ifndef CONFIG_SMP
+		profile_tick(CPU_PROFILING, regs);
+		update_process_times(user_mode(regs));
+#endif
+		do_timer(regs);
+
+		/* Guarantee that the following sequences execute
+		 * uninterrupted.
+		 */
+		__asm__ __volatile__("rdpr	%%pstate, %0\n\t"
+				     "wrpr	%0, %1, %%pstate"
+				     : "=r" (pstate)
+				     : "i" (PSTATE_IE));
+
+		timer_tick_compare = tick_ops->add_compare(timer_tick_offset);
+		ticks = tick_ops->get_tick();
+
+		/* Restore PSTATE_IE. */
+		__asm__ __volatile__("wrpr	%0, 0x0, %%pstate"
+				     : /* no outputs */
+				     : "r" (pstate));
+	} while (time_after_eq(ticks, timer_tick_compare));
+
+	timer_check_rtc();
+
+	write_sequnlock(&xtime_lock);
+
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_SMP
+void timer_tick_interrupt(struct pt_regs *regs)
+{
+	write_seqlock(&xtime_lock);
+
+	do_timer(regs);
+
+	/*
+	 * Only keep timer_tick_offset uptodate, but don't set TICK_CMPR.
+	 */
+	timer_tick_compare = tick_ops->get_compare() + timer_tick_offset;
+
+	timer_check_rtc();
+
+	write_sequnlock(&xtime_lock);
+}
+#endif
+
+/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
+static void __init kick_start_clock(void)
+{
+	unsigned long regs = mstk48t02_regs;
+	u8 sec, tmp;
+	int i, count;
+
+	prom_printf("CLOCK: Clock was stopped. Kick start ");
+
+	spin_lock_irq(&mostek_lock);
+
+	/* Turn on the kick start bit to start the oscillator. */
+	tmp = mostek_read(regs + MOSTEK_CREG);
+	tmp |= MSTK_CREG_WRITE;
+	mostek_write(regs + MOSTEK_CREG, tmp);
+	tmp = mostek_read(regs + MOSTEK_SEC);
+	tmp &= ~MSTK_STOP;
+	mostek_write(regs + MOSTEK_SEC, tmp);
+	tmp = mostek_read(regs + MOSTEK_HOUR);
+	tmp |= MSTK_KICK_START;
+	mostek_write(regs + MOSTEK_HOUR, tmp);
+	tmp = mostek_read(regs + MOSTEK_CREG);
+	tmp &= ~MSTK_CREG_WRITE;
+	mostek_write(regs + MOSTEK_CREG, tmp);
+
+	spin_unlock_irq(&mostek_lock);
+
+	/* Delay to allow the clock oscillator to start. */
+	sec = MSTK_REG_SEC(regs);
+	for (i = 0; i < 3; i++) {
+		while (sec == MSTK_REG_SEC(regs))
+			for (count = 0; count < 100000; count++)
+				/* nothing */ ;
+		prom_printf(".");
+		sec = MSTK_REG_SEC(regs);
+	}
+	prom_printf("\n");
+
+	spin_lock_irq(&mostek_lock);
+
+	/* Turn off kick start and set a "valid" time and date. */
+	tmp = mostek_read(regs + MOSTEK_CREG);
+	tmp |= MSTK_CREG_WRITE;
+	mostek_write(regs + MOSTEK_CREG, tmp);
+	tmp = mostek_read(regs + MOSTEK_HOUR);
+	tmp &= ~MSTK_KICK_START;
+	mostek_write(regs + MOSTEK_HOUR, tmp);
+	MSTK_SET_REG_SEC(regs,0);
+	MSTK_SET_REG_MIN(regs,0);
+	MSTK_SET_REG_HOUR(regs,0);
+	MSTK_SET_REG_DOW(regs,5);
+	MSTK_SET_REG_DOM(regs,1);
+	MSTK_SET_REG_MONTH(regs,8);
+	MSTK_SET_REG_YEAR(regs,1996 - MSTK_YEAR_ZERO);
+	tmp = mostek_read(regs + MOSTEK_CREG);
+	tmp &= ~MSTK_CREG_WRITE;
+	mostek_write(regs + MOSTEK_CREG, tmp);
+
+	spin_unlock_irq(&mostek_lock);
+
+	/* Ensure the kick start bit is off. If it isn't, turn it off. */
+	while (mostek_read(regs + MOSTEK_HOUR) & MSTK_KICK_START) {
+		prom_printf("CLOCK: Kick start still on!\n");
+
+		spin_lock_irq(&mostek_lock);
+
+		tmp = mostek_read(regs + MOSTEK_CREG);
+		tmp |= MSTK_CREG_WRITE;
+		mostek_write(regs + MOSTEK_CREG, tmp);
+
+		tmp = mostek_read(regs + MOSTEK_HOUR);
+		tmp &= ~MSTK_KICK_START;
+		mostek_write(regs + MOSTEK_HOUR, tmp);
+
+		tmp = mostek_read(regs + MOSTEK_CREG);
+		tmp &= ~MSTK_CREG_WRITE;
+		mostek_write(regs + MOSTEK_CREG, tmp);
+
+		spin_unlock_irq(&mostek_lock);
+	}
+
+	prom_printf("CLOCK: Kick start procedure successful.\n");
+}
+
+/* Return nonzero if the clock chip battery is low. */
+static int __init has_low_battery(void)
+{
+	unsigned long regs = mstk48t02_regs;
+	u8 data1, data2;
+
+	spin_lock_irq(&mostek_lock);
+
+	data1 = mostek_read(regs + MOSTEK_EEPROM);	/* Read some data. */
+	mostek_write(regs + MOSTEK_EEPROM, ~data1);	/* Write back the complement. */
+	data2 = mostek_read(regs + MOSTEK_EEPROM);	/* Read back the complement. */
+	mostek_write(regs + MOSTEK_EEPROM, data1);	/* Restore original value. */
+
+	spin_unlock_irq(&mostek_lock);
+
+	return (data1 == data2);	/* Was the write blocked? */
+}
+
+/* Probe for the real time clock chip. */
+static void __init set_system_time(void)
+{
+	unsigned int year, mon, day, hour, min, sec;
+	unsigned long mregs = mstk48t02_regs;
+#ifdef CONFIG_PCI
+	unsigned long dregs = ds1287_regs;
+#else
+	unsigned long dregs = 0UL;
+#endif
+	u8 tmp;
+
+	if (!mregs && !dregs) {
+		prom_printf("Something wrong, clock regs not mapped yet.\n");
+		prom_halt();
+	}		
+
+	if (mregs) {
+		spin_lock_irq(&mostek_lock);
+
+		/* Traditional Mostek chip. */
+		tmp = mostek_read(mregs + MOSTEK_CREG);
+		tmp |= MSTK_CREG_READ;
+		mostek_write(mregs + MOSTEK_CREG, tmp);
+
+		sec = MSTK_REG_SEC(mregs);
+		min = MSTK_REG_MIN(mregs);
+		hour = MSTK_REG_HOUR(mregs);
+		day = MSTK_REG_DOM(mregs);
+		mon = MSTK_REG_MONTH(mregs);
+		year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
+	} else {
+		int i;
+
+		/* Dallas 12887 RTC chip. */
+
+		/* Stolen from arch/i386/kernel/time.c, see there for
+		 * credits and descriptive comments.
+		 */
+		for (i = 0; i < 1000000; i++) {
+			if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
+				break;
+			udelay(10);
+		}
+		for (i = 0; i < 1000000; i++) {
+			if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
+				break;
+			udelay(10);
+		}
+		do {
+			sec  = CMOS_READ(RTC_SECONDS);
+			min  = CMOS_READ(RTC_MINUTES);
+			hour = CMOS_READ(RTC_HOURS);
+			day  = CMOS_READ(RTC_DAY_OF_MONTH);
+			mon  = CMOS_READ(RTC_MONTH);
+			year = CMOS_READ(RTC_YEAR);
+		} while (sec != CMOS_READ(RTC_SECONDS));
+		if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+			BCD_TO_BIN(sec);
+			BCD_TO_BIN(min);
+			BCD_TO_BIN(hour);
+			BCD_TO_BIN(day);
+			BCD_TO_BIN(mon);
+			BCD_TO_BIN(year);
+		}
+		if ((year += 1900) < 1970)
+			year += 100;
+	}
+
+	xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
+	xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
+	set_normalized_timespec(&wall_to_monotonic,
+ 	                        -xtime.tv_sec, -xtime.tv_nsec);
+
+	if (mregs) {
+		tmp = mostek_read(mregs + MOSTEK_CREG);
+		tmp &= ~MSTK_CREG_READ;
+		mostek_write(mregs + MOSTEK_CREG, tmp);
+
+		spin_unlock_irq(&mostek_lock);
+	}
+}
+
+void __init clock_probe(void)
+{
+	struct linux_prom_registers clk_reg[2];
+	char model[128];
+	int node, busnd = -1, err;
+	unsigned long flags;
+	struct linux_central *cbus;
+#ifdef CONFIG_PCI
+	struct linux_ebus *ebus = NULL;
+	struct sparc_isa_bridge *isa_br = NULL;
+#endif
+	static int invoked;
+
+	if (invoked)
+		return;
+	invoked = 1;
+
+
+	if (this_is_starfire) {
+		/* davem suggests we keep this within the 4M locked kernel image */
+		static char obp_gettod[256];
+		static u32 unix_tod;
+
+		sprintf(obp_gettod, "h# %08x unix-gettod",
+			(unsigned int) (long) &unix_tod);
+		prom_feval(obp_gettod);
+		xtime.tv_sec = unix_tod;
+		xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
+		set_normalized_timespec(&wall_to_monotonic,
+		                        -xtime.tv_sec, -xtime.tv_nsec);
+		return;
+	}
+
+	local_irq_save(flags);
+
+	cbus = central_bus;
+	if (cbus != NULL)
+		busnd = central_bus->child->prom_node;
+
+	/* Check FHC Central then EBUSs then ISA bridges then SBUSs.
+	 * That way we handle the presence of multiple properly.
+	 *
+	 * As a special case, machines with Central must provide the
+	 * timer chip there.
+	 */
+#ifdef CONFIG_PCI
+	if (ebus_chain != NULL) {
+		ebus = ebus_chain;
+		if (busnd == -1)
+			busnd = ebus->prom_node;
+	}
+	if (isa_chain != NULL) {
+		isa_br = isa_chain;
+		if (busnd == -1)
+			busnd = isa_br->prom_node;
+	}
+#endif
+	if (sbus_root != NULL && busnd == -1)
+		busnd = sbus_root->prom_node;
+
+	if (busnd == -1) {
+		prom_printf("clock_probe: problem, cannot find bus to search.\n");
+		prom_halt();
+	}
+
+	node = prom_getchild(busnd);
+
+	while (1) {
+		if (!node)
+			model[0] = 0;
+		else
+			prom_getstring(node, "model", model, sizeof(model));
+		if (strcmp(model, "mk48t02") &&
+		    strcmp(model, "mk48t08") &&
+		    strcmp(model, "mk48t59") &&
+		    strcmp(model, "m5819") &&
+		    strcmp(model, "m5819p") &&
+		    strcmp(model, "m5823") &&
+		    strcmp(model, "ds1287")) {
+			if (cbus != NULL) {
+				prom_printf("clock_probe: Central bus lacks timer chip.\n");
+				prom_halt();
+			}
+
+		   	if (node != 0)
+				node = prom_getsibling(node);
+#ifdef CONFIG_PCI
+			while ((node == 0) && ebus != NULL) {
+				ebus = ebus->next;
+				if (ebus != NULL) {
+					busnd = ebus->prom_node;
+					node = prom_getchild(busnd);
+				}
+			}
+			while ((node == 0) && isa_br != NULL) {
+				isa_br = isa_br->next;
+				if (isa_br != NULL) {
+					busnd = isa_br->prom_node;
+					node = prom_getchild(busnd);
+				}
+			}
+#endif
+			if (node == 0) {
+				prom_printf("clock_probe: Cannot find timer chip\n");
+				prom_halt();
+			}
+			continue;
+		}
+
+		err = prom_getproperty(node, "reg", (char *)clk_reg,
+				       sizeof(clk_reg));
+		if(err == -1) {
+			prom_printf("clock_probe: Cannot get Mostek reg property\n");
+			prom_halt();
+		}
+
+		if (cbus != NULL) {
+			apply_fhc_ranges(central_bus->child, clk_reg, 1);
+			apply_central_ranges(central_bus, clk_reg, 1);
+		}
+#ifdef CONFIG_PCI
+		else if (ebus != NULL) {
+			struct linux_ebus_device *edev;
+
+			for_each_ebusdev(edev, ebus)
+				if (edev->prom_node == node)
+					break;
+			if (edev == NULL) {
+				if (isa_chain != NULL)
+					goto try_isa_clock;
+				prom_printf("%s: Mostek not probed by EBUS\n",
+					    __FUNCTION__);
+				prom_halt();
+			}
+
+			if (!strcmp(model, "ds1287") ||
+			    !strcmp(model, "m5819") ||
+			    !strcmp(model, "m5819p") ||
+			    !strcmp(model, "m5823")) {
+				ds1287_regs = edev->resource[0].start;
+			} else {
+				mstk48t59_regs = edev->resource[0].start;
+				mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
+			}
+			break;
+		}
+		else if (isa_br != NULL) {
+			struct sparc_isa_device *isadev;
+
+try_isa_clock:
+			for_each_isadev(isadev, isa_br)
+				if (isadev->prom_node == node)
+					break;
+			if (isadev == NULL) {
+				prom_printf("%s: Mostek not probed by ISA\n");
+				prom_halt();
+			}
+			if (!strcmp(model, "ds1287") ||
+			    !strcmp(model, "m5819") ||
+			    !strcmp(model, "m5819p") ||
+			    !strcmp(model, "m5823")) {
+				ds1287_regs = isadev->resource.start;
+			} else {
+				mstk48t59_regs = isadev->resource.start;
+				mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
+			}
+			break;
+		}
+#endif
+		else {
+			if (sbus_root->num_sbus_ranges) {
+				int nranges = sbus_root->num_sbus_ranges;
+				int rngc;
+
+				for (rngc = 0; rngc < nranges; rngc++)
+					if (clk_reg[0].which_io ==
+					    sbus_root->sbus_ranges[rngc].ot_child_space)
+						break;
+				if (rngc == nranges) {
+					prom_printf("clock_probe: Cannot find ranges for "
+						    "clock regs.\n");
+					prom_halt();
+				}
+				clk_reg[0].which_io =
+					sbus_root->sbus_ranges[rngc].ot_parent_space;
+				clk_reg[0].phys_addr +=
+					sbus_root->sbus_ranges[rngc].ot_parent_base;
+			}
+		}
+
+		if(model[5] == '0' && model[6] == '2') {
+			mstk48t02_regs = (((u64)clk_reg[0].phys_addr) |
+					  (((u64)clk_reg[0].which_io)<<32UL));
+		} else if(model[5] == '0' && model[6] == '8') {
+			mstk48t08_regs = (((u64)clk_reg[0].phys_addr) |
+					  (((u64)clk_reg[0].which_io)<<32UL));
+			mstk48t02_regs = mstk48t08_regs + MOSTEK_48T08_48T02;
+		} else {
+			mstk48t59_regs = (((u64)clk_reg[0].phys_addr) |
+					  (((u64)clk_reg[0].which_io)<<32UL));
+			mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
+		}
+		break;
+	}
+
+	if (mstk48t02_regs != 0UL) {
+		/* Report a low battery voltage condition. */
+		if (has_low_battery())
+			prom_printf("NVRAM: Low battery voltage!\n");
+
+		/* Kick start the clock if it is completely stopped. */
+		if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP)
+			kick_start_clock();
+	}
+
+	set_system_time();
+	
+	local_irq_restore(flags);
+}
+
+/* This is gets the master TICK_INT timer going. */
+static unsigned long sparc64_init_timers(void)
+{
+	unsigned long clock;
+	int node;
+#ifdef CONFIG_SMP
+	extern void smp_tick_init(void);
+#endif
+
+	if (tlb_type == spitfire) {
+		unsigned long ver, manuf, impl;
+
+		__asm__ __volatile__ ("rdpr %%ver, %0"
+				      : "=&r" (ver));
+		manuf = ((ver >> 48) & 0xffff);
+		impl = ((ver >> 32) & 0xffff);
+		if (manuf == 0x17 && impl == 0x13) {
+			/* Hummingbird, aka Ultra-IIe */
+			tick_ops = &hbtick_operations;
+			node = prom_root_node;
+			clock = prom_getint(node, "stick-frequency");
+		} else {
+			tick_ops = &tick_operations;
+			cpu_find_by_instance(0, &node, NULL);
+			clock = prom_getint(node, "clock-frequency");
+		}
+	} else {
+		tick_ops = &stick_operations;
+		node = prom_root_node;
+		clock = prom_getint(node, "stick-frequency");
+	}
+	timer_tick_offset = clock / HZ;
+
+#ifdef CONFIG_SMP
+	smp_tick_init();
+#endif
+
+	return clock;
+}
+
+static void sparc64_start_timers(irqreturn_t (*cfunc)(int, void *, struct pt_regs *))
+{
+	unsigned long pstate;
+	int err;
+
+	/* Register IRQ handler. */
+	err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, SA_STATIC_ALLOC,
+			  "timer", NULL);
+
+	if (err) {
+		prom_printf("Serious problem, cannot register TICK_INT\n");
+		prom_halt();
+	}
+
+	/* Guarantee that the following sequences execute
+	 * uninterrupted.
+	 */
+	__asm__ __volatile__("rdpr	%%pstate, %0\n\t"
+			     "wrpr	%0, %1, %%pstate"
+			     : "=r" (pstate)
+			     : "i" (PSTATE_IE));
+
+	tick_ops->init_tick(timer_tick_offset);
+
+	/* Restore PSTATE_IE. */
+	__asm__ __volatile__("wrpr	%0, 0x0, %%pstate"
+			     : /* no outputs */
+			     : "r" (pstate));
+
+	local_irq_enable();
+}
+
+struct freq_table {
+	unsigned long udelay_val_ref;
+	unsigned long clock_tick_ref;
+	unsigned int ref_freq;
+};
+static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0, 0 };
+
+unsigned long sparc64_get_clock_tick(unsigned int cpu)
+{
+	struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
+
+	if (ft->clock_tick_ref)
+		return ft->clock_tick_ref;
+	return cpu_data(cpu).clock_tick;
+}
+
+#ifdef CONFIG_CPU_FREQ
+
+static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
+				    void *data)
+{
+	struct cpufreq_freqs *freq = data;
+	unsigned int cpu = freq->cpu;
+	struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
+
+	if (!ft->ref_freq) {
+		ft->ref_freq = freq->old;
+		ft->udelay_val_ref = cpu_data(cpu).udelay_val;
+		ft->clock_tick_ref = cpu_data(cpu).clock_tick;
+	}
+	if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
+	    (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
+	    (val == CPUFREQ_RESUMECHANGE)) {
+		cpu_data(cpu).udelay_val =
+			cpufreq_scale(ft->udelay_val_ref,
+				      ft->ref_freq,
+				      freq->new);
+		cpu_data(cpu).clock_tick =
+			cpufreq_scale(ft->clock_tick_ref,
+				      ft->ref_freq,
+				      freq->new);
+	}
+
+	return 0;
+}
+
+static struct notifier_block sparc64_cpufreq_notifier_block = {
+	.notifier_call	= sparc64_cpufreq_notifier
+};
+
+#endif /* CONFIG_CPU_FREQ */
+
+static struct time_interpolator sparc64_cpu_interpolator = {
+	.source		=	TIME_SOURCE_CPU,
+	.shift		=	16,
+	.mask		=	0xffffffffffffffffLL
+};
+
+/* The quotient formula is taken from the IA64 port. */
+#define SPARC64_NSEC_PER_CYC_SHIFT	30UL
+void __init time_init(void)
+{
+	unsigned long clock = sparc64_init_timers();
+
+	sparc64_cpu_interpolator.frequency = clock;
+	register_time_interpolator(&sparc64_cpu_interpolator);
+
+	/* Now that the interpolator is registered, it is
+	 * safe to start the timer ticking.
+	 */
+	sparc64_start_timers(timer_interrupt);
+
+	timer_ticks_per_nsec_quotient =
+		(((NSEC_PER_SEC << SPARC64_NSEC_PER_CYC_SHIFT) +
+		  (clock / 2)) / clock);
+
+#ifdef CONFIG_CPU_FREQ
+	cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
+				  CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+}
+
+unsigned long long sched_clock(void)
+{
+	unsigned long ticks = tick_ops->get_tick();
+
+	return (ticks * timer_ticks_per_nsec_quotient)
+		>> SPARC64_NSEC_PER_CYC_SHIFT;
+}
+
+static int set_rtc_mmss(unsigned long nowtime)
+{
+	int real_seconds, real_minutes, chip_minutes;
+	unsigned long mregs = mstk48t02_regs;
+#ifdef CONFIG_PCI
+	unsigned long dregs = ds1287_regs;
+#else
+	unsigned long dregs = 0UL;
+#endif
+	unsigned long flags;
+	u8 tmp;
+
+	/* 
+	 * Not having a register set can lead to trouble.
+	 * Also starfire doesn't have a tod clock.
+	 */
+	if (!mregs && !dregs) 
+		return -1;
+
+	if (mregs) {
+		spin_lock_irqsave(&mostek_lock, flags);
+
+		/* Read the current RTC minutes. */
+		tmp = mostek_read(mregs + MOSTEK_CREG);
+		tmp |= MSTK_CREG_READ;
+		mostek_write(mregs + MOSTEK_CREG, tmp);
+
+		chip_minutes = MSTK_REG_MIN(mregs);
+
+		tmp = mostek_read(mregs + MOSTEK_CREG);
+		tmp &= ~MSTK_CREG_READ;
+		mostek_write(mregs + MOSTEK_CREG, tmp);
+
+		/*
+		 * since we're only adjusting minutes and seconds,
+		 * don't interfere with hour overflow. This avoids
+		 * messing with unknown time zones but requires your
+		 * RTC not to be off by more than 15 minutes
+		 */
+		real_seconds = nowtime % 60;
+		real_minutes = nowtime / 60;
+		if (((abs(real_minutes - chip_minutes) + 15)/30) & 1)
+			real_minutes += 30;	/* correct for half hour time zone */
+		real_minutes %= 60;
+
+		if (abs(real_minutes - chip_minutes) < 30) {
+			tmp = mostek_read(mregs + MOSTEK_CREG);
+			tmp |= MSTK_CREG_WRITE;
+			mostek_write(mregs + MOSTEK_CREG, tmp);
+
+			MSTK_SET_REG_SEC(mregs,real_seconds);
+			MSTK_SET_REG_MIN(mregs,real_minutes);
+
+			tmp = mostek_read(mregs + MOSTEK_CREG);
+			tmp &= ~MSTK_CREG_WRITE;
+			mostek_write(mregs + MOSTEK_CREG, tmp);
+
+			spin_unlock_irqrestore(&mostek_lock, flags);
+
+			return 0;
+		} else {
+			spin_unlock_irqrestore(&mostek_lock, flags);
+
+			return -1;
+		}
+	} else {
+		int retval = 0;
+		unsigned char save_control, save_freq_select;
+
+		/* Stolen from arch/i386/kernel/time.c, see there for
+		 * credits and descriptive comments.
+		 */
+		spin_lock_irqsave(&rtc_lock, flags);
+		save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
+		CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+
+		save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
+		CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+		chip_minutes = CMOS_READ(RTC_MINUTES);
+		if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+			BCD_TO_BIN(chip_minutes);
+		real_seconds = nowtime % 60;
+		real_minutes = nowtime / 60;
+		if (((abs(real_minutes - chip_minutes) + 15)/30) & 1)
+			real_minutes += 30;
+		real_minutes %= 60;
+
+		if (abs(real_minutes - chip_minutes) < 30) {
+			if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+				BIN_TO_BCD(real_seconds);
+				BIN_TO_BCD(real_minutes);
+			}
+			CMOS_WRITE(real_seconds,RTC_SECONDS);
+			CMOS_WRITE(real_minutes,RTC_MINUTES);
+		} else {
+			printk(KERN_WARNING
+			       "set_rtc_mmss: can't update from %d to %d\n",
+			       chip_minutes, real_minutes);
+			retval = -1;
+		}
+
+		CMOS_WRITE(save_control, RTC_CONTROL);
+		CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+		spin_unlock_irqrestore(&rtc_lock, flags);
+
+		return retval;
+	}
+}
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
new file mode 100644
index 0000000..2c8f934
--- /dev/null
+++ b/arch/sparc64/kernel/trampoline.S
@@ -0,0 +1,368 @@
+/* $Id: trampoline.S,v 1.26 2002/02/09 19:49:30 davem Exp $
+ * trampoline.S: Jump start slave processors on sparc64.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/head.h>
+#include <asm/asi.h>
+#include <asm/lsu.h>
+#include <asm/dcr.h>
+#include <asm/dcu.h>
+#include <asm/pstate.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/spitfire.h>
+#include <asm/processor.h>
+#include <asm/thread_info.h>
+#include <asm/mmu.h>
+
+	.data
+	.align	8
+call_method:
+	.asciz	"call-method"
+	.align	8
+itlb_load:
+	.asciz	"SUNW,itlb-load"
+	.align	8
+dtlb_load:
+	.asciz	"SUNW,dtlb-load"
+
+	.text
+	.align		8
+	.globl		sparc64_cpu_startup, sparc64_cpu_startup_end
+sparc64_cpu_startup:
+	flushw
+
+	BRANCH_IF_CHEETAH_BASE(g1,g5,cheetah_startup)
+	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g5,cheetah_plus_startup)
+
+	ba,pt	%xcc, spitfire_startup
+	 nop
+
+cheetah_plus_startup:
+	/* Preserve OBP chosen DCU and DCR register settings.  */
+	ba,pt	%xcc, cheetah_generic_startup
+	 nop
+
+cheetah_startup:
+	mov	DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
+	wr	%g1, %asr18
+
+	sethi	%uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
+	or	%g5, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
+	sllx	%g5, 32, %g5
+	or	%g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5
+	stxa	%g5, [%g0] ASI_DCU_CONTROL_REG
+	membar	#Sync
+
+cheetah_generic_startup:
+	mov	TSB_EXTENSION_P, %g3
+	stxa	%g0, [%g3] ASI_DMMU
+	stxa	%g0, [%g3] ASI_IMMU
+	membar	#Sync
+
+	mov	TSB_EXTENSION_S, %g3
+	stxa	%g0, [%g3] ASI_DMMU
+	membar	#Sync
+
+	mov	TSB_EXTENSION_N, %g3
+	stxa	%g0, [%g3] ASI_DMMU
+	stxa	%g0, [%g3] ASI_IMMU
+	membar	#Sync
+
+	/* Disable STICK_INT interrupts. */
+	sethi		%hi(0x80000000), %g5
+	sllx		%g5, 32, %g5
+	wr		%g5, %asr25
+
+	ba,pt		%xcc, startup_continue
+	 nop
+
+spitfire_startup:
+	mov		(LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), %g1
+	stxa		%g1, [%g0] ASI_LSU_CONTROL
+	membar		#Sync
+
+startup_continue:
+	wrpr		%g0, 15, %pil
+
+	sethi		%hi(0x80000000), %g2
+	sllx		%g2, 32, %g2
+	wr		%g2, 0, %tick_cmpr
+
+	/* Call OBP by hand to lock KERNBASE into i/d tlbs.
+	 * We lock 2 consequetive entries if we are 'bigkernel'.
+	 */
+	mov		%o0, %l0
+
+	sethi		%hi(prom_entry_lock), %g2
+1:	ldstub		[%g2 + %lo(prom_entry_lock)], %g1
+	brnz,pn		%g1, 1b
+	 membar		#StoreLoad | #StoreStore
+
+	sethi		%hi(p1275buf), %g2
+	or		%g2, %lo(p1275buf), %g2
+	ldx		[%g2 + 0x10], %l2
+	mov		%sp, %l1
+	add		%l2, -(192 + 128), %sp
+	flushw
+
+	sethi		%hi(call_method), %g2
+	or		%g2, %lo(call_method), %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x00]
+	mov		5, %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x08]
+	mov		1, %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x10]
+	sethi		%hi(itlb_load), %g2
+	or		%g2, %lo(itlb_load), %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x18]
+	sethi		%hi(mmu_ihandle_cache), %g2
+	lduw		[%g2 + %lo(mmu_ihandle_cache)], %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x20]
+	sethi		%hi(KERNBASE), %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x28]
+	sethi		%hi(kern_locked_tte_data), %g2
+	ldx		[%g2 + %lo(kern_locked_tte_data)], %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x30]
+
+	mov		15, %g2
+	BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
+
+	mov		63, %g2
+1:
+	stx		%g2, [%sp + 2047 + 128 + 0x38]
+	sethi		%hi(p1275buf), %g2
+	or		%g2, %lo(p1275buf), %g2
+	ldx		[%g2 + 0x08], %o1
+	call		%o1
+	 add		%sp, (2047 + 128), %o0
+
+	sethi		%hi(bigkernel), %g2
+	lduw		[%g2 + %lo(bigkernel)], %g2
+	cmp		%g2, 0
+	be,pt		%icc, do_dtlb
+	 nop
+
+	sethi		%hi(call_method), %g2
+	or		%g2, %lo(call_method), %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x00]
+	mov		5, %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x08]
+	mov		1, %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x10]
+	sethi		%hi(itlb_load), %g2
+	or		%g2, %lo(itlb_load), %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x18]
+	sethi		%hi(mmu_ihandle_cache), %g2
+	lduw		[%g2 + %lo(mmu_ihandle_cache)], %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x20]
+	sethi		%hi(KERNBASE + 0x400000), %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x28]
+	sethi		%hi(kern_locked_tte_data), %g2
+	ldx		[%g2 + %lo(kern_locked_tte_data)], %g2
+	sethi		%hi(0x400000), %g1
+	add		%g2, %g1, %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x30]
+
+	mov		14, %g2
+	BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
+
+	mov		62, %g2
+1:
+	stx		%g2, [%sp + 2047 + 128 + 0x38]
+	sethi		%hi(p1275buf), %g2
+	or		%g2, %lo(p1275buf), %g2
+	ldx		[%g2 + 0x08], %o1
+	call		%o1
+	 add		%sp, (2047 + 128), %o0
+
+do_dtlb:
+	sethi		%hi(call_method), %g2
+	or		%g2, %lo(call_method), %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x00]
+	mov		5, %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x08]
+	mov		1, %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x10]
+	sethi		%hi(dtlb_load), %g2
+	or		%g2, %lo(dtlb_load), %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x18]
+	sethi		%hi(mmu_ihandle_cache), %g2
+	lduw		[%g2 + %lo(mmu_ihandle_cache)], %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x20]
+	sethi		%hi(KERNBASE), %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x28]
+	sethi		%hi(kern_locked_tte_data), %g2
+	ldx		[%g2 + %lo(kern_locked_tte_data)], %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x30]
+
+	mov		15, %g2
+	BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
+
+	mov		63, %g2
+1:
+
+	stx		%g2, [%sp + 2047 + 128 + 0x38]
+	sethi		%hi(p1275buf), %g2
+	or		%g2, %lo(p1275buf), %g2
+	ldx		[%g2 + 0x08], %o1
+	call		%o1
+	 add		%sp, (2047 + 128), %o0
+
+	sethi		%hi(bigkernel), %g2
+	lduw		[%g2 + %lo(bigkernel)], %g2
+	cmp		%g2, 0
+	be,pt		%icc, do_unlock
+	 nop
+
+	sethi		%hi(call_method), %g2
+	or		%g2, %lo(call_method), %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x00]
+	mov		5, %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x08]
+	mov		1, %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x10]
+	sethi		%hi(dtlb_load), %g2
+	or		%g2, %lo(dtlb_load), %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x18]
+	sethi		%hi(mmu_ihandle_cache), %g2
+	lduw		[%g2 + %lo(mmu_ihandle_cache)], %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x20]
+	sethi		%hi(KERNBASE + 0x400000), %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x28]
+	sethi		%hi(kern_locked_tte_data), %g2
+	ldx		[%g2 + %lo(kern_locked_tte_data)], %g2
+	sethi		%hi(0x400000), %g1
+	add		%g2, %g1, %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x30]
+
+	mov		14, %g2
+	BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
+
+	mov		62, %g2
+1:
+
+	stx		%g2, [%sp + 2047 + 128 + 0x38]
+	sethi		%hi(p1275buf), %g2
+	or		%g2, %lo(p1275buf), %g2
+	ldx		[%g2 + 0x08], %o1
+	call		%o1
+	 add		%sp, (2047 + 128), %o0
+
+do_unlock:
+	sethi		%hi(prom_entry_lock), %g2
+	stb		%g0, [%g2 + %lo(prom_entry_lock)]
+	membar		#StoreStore | #StoreLoad
+
+	mov		%l1, %sp
+	flushw
+
+	mov		%l0, %o0
+
+	wrpr		%g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
+	wr		%g0, 0, %fprs
+
+	/* XXX Buggy PROM... */
+	srl		%o0, 0, %o0
+	ldx		[%o0], %g6
+
+	wr		%g0, ASI_P, %asi
+
+	mov		PRIMARY_CONTEXT, %g7
+	stxa		%g0, [%g7] ASI_DMMU
+	membar		#Sync
+	mov		SECONDARY_CONTEXT, %g7
+	stxa		%g0, [%g7] ASI_DMMU
+	membar		#Sync
+
+	mov		1, %g5
+	sllx		%g5, THREAD_SHIFT, %g5
+	sub		%g5, (STACKFRAME_SZ + STACK_BIAS), %g5
+	add		%g6, %g5, %sp
+	mov		0, %fp
+
+	wrpr		%g0, 0, %wstate
+	wrpr		%g0, 0, %tl
+
+	/* Setup the trap globals, then we can resurface. */
+	rdpr		%pstate, %o1
+	mov		%g6, %o2
+	wrpr		%o1, PSTATE_AG, %pstate
+	sethi		%hi(sparc64_ttable_tl0), %g5
+	wrpr		%g5, %tba
+	mov		%o2, %g6
+
+	wrpr		%o1, PSTATE_MG, %pstate
+#define KERN_HIGHBITS		((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)
+#define KERN_LOWBITS		(_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
+
+	mov		TSB_REG, %g1
+	stxa		%g0, [%g1] ASI_DMMU
+	membar		#Sync
+	mov		TLB_SFSR, %g1
+	sethi		%uhi(KERN_HIGHBITS), %g2
+	or		%g2, %ulo(KERN_HIGHBITS), %g2
+	sllx		%g2, 32, %g2
+	or		%g2, KERN_LOWBITS, %g2
+
+	BRANCH_IF_ANY_CHEETAH(g3,g7,9f)
+
+	ba,pt		%xcc, 1f
+	 nop
+
+9:
+	sethi		%uhi(VPTE_BASE_CHEETAH), %g3
+	or		%g3, %ulo(VPTE_BASE_CHEETAH), %g3
+	ba,pt		%xcc, 2f
+	 sllx		%g3, 32, %g3
+1:
+	sethi		%uhi(VPTE_BASE_SPITFIRE), %g3
+	or		%g3, %ulo(VPTE_BASE_SPITFIRE), %g3
+	sllx		%g3, 32, %g3
+
+2:
+	clr	%g7
+#undef KERN_HIGHBITS
+#undef KERN_LOWBITS
+
+	wrpr		%o1, 0x0, %pstate
+	ldx		[%g6 + TI_TASK], %g4
+
+	wrpr		%g0, 0, %wstate
+
+	call		init_irqwork_curcpu
+	 nop
+
+	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f)
+	ba,pt	%xcc, 2f
+	 nop
+
+1:	/* Start using proper page size encodings in ctx register.  */
+	sethi	%uhi(CTX_CHEETAH_PLUS_NUC), %g3
+	mov	PRIMARY_CONTEXT, %g1
+	sllx	%g3, 32, %g3
+	sethi	%hi(CTX_CHEETAH_PLUS_CTX0), %g2
+	or	%g3, %g2, %g3
+	stxa	%g3, [%g1] ASI_DMMU
+	membar	#Sync
+
+2:
+	rdpr		%pstate, %o1
+	or		%o1, PSTATE_IE, %o1
+	wrpr		%o1, 0, %pstate
+
+	call		prom_set_trap_table
+	 sethi		%hi(sparc64_ttable_tl0), %o0
+
+	call		smp_callin
+	 nop
+	call		cpu_idle
+	 mov		0, %o0
+	call		cpu_panic
+	 nop
+1:	b,a,pt		%xcc, 1b
+
+	.align		8
+sparc64_cpu_startup_end:
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
new file mode 100644
index 0000000..56b203a
--- /dev/null
+++ b/arch/sparc64/kernel/traps.c
@@ -0,0 +1,2118 @@
+/* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
+ * arch/sparc64/kernel/traps.c
+ *
+ * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
+ */
+
+/*
+ * I like traps on v9, :))))
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/sched.h>  /* for jiffies */
+#include <linux/kernel.h>
+#include <linux/kallsyms.h>
+#include <linux/signal.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+
+#include <asm/delay.h>
+#include <asm/system.h>
+#include <asm/ptrace.h>
+#include <asm/oplib.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/unistd.h>
+#include <asm/uaccess.h>
+#include <asm/fpumacro.h>
+#include <asm/lsu.h>
+#include <asm/dcu.h>
+#include <asm/estate.h>
+#include <asm/chafsr.h>
+#include <asm/psrcompat.h>
+#include <asm/processor.h>
+#include <asm/timer.h>
+#include <asm/kdebug.h>
+#ifdef CONFIG_KMOD
+#include <linux/kmod.h>
+#endif
+
+struct notifier_block *sparc64die_chain;
+static DEFINE_SPINLOCK(die_notifier_lock);
+
+int register_die_notifier(struct notifier_block *nb)
+{
+	int err = 0;
+	unsigned long flags;
+	spin_lock_irqsave(&die_notifier_lock, flags);
+	err = notifier_chain_register(&sparc64die_chain, nb);
+	spin_unlock_irqrestore(&die_notifier_lock, flags);
+	return err;
+}
+
+/* When an irrecoverable trap occurs at tl > 0, the trap entry
+ * code logs the trap state registers at every level in the trap
+ * stack.  It is found at (pt_regs + sizeof(pt_regs)) and the layout
+ * is as follows:
+ */
+struct tl1_traplog {
+	struct {
+		unsigned long tstate;
+		unsigned long tpc;
+		unsigned long tnpc;
+		unsigned long tt;
+	} trapstack[4];
+	unsigned long tl;
+};
+
+static void dump_tl1_traplog(struct tl1_traplog *p)
+{
+	int i;
+
+	printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
+	       p->tl);
+	for (i = 0; i < 4; i++) {
+		printk(KERN_CRIT
+		       "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
+		       "TNPC[%016lx] TT[%lx]\n",
+		       i + 1,
+		       p->trapstack[i].tstate, p->trapstack[i].tpc,
+		       p->trapstack[i].tnpc, p->trapstack[i].tt);
+	}
+}
+
+void do_call_debug(struct pt_regs *regs) 
+{ 
+	notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT); 
+}
+
+void bad_trap(struct pt_regs *regs, long lvl)
+{
+	char buffer[32];
+	siginfo_t info;
+
+	if (notify_die(DIE_TRAP, "bad trap", regs,
+		       0, lvl, SIGTRAP) == NOTIFY_STOP)
+		return;
+
+	if (lvl < 0x100) {
+		sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
+		die_if_kernel(buffer, regs);
+	}
+
+	lvl -= 0x100;
+	if (regs->tstate & TSTATE_PRIV) {
+		sprintf(buffer, "Kernel bad sw trap %lx", lvl);
+		die_if_kernel(buffer, regs);
+	}
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+	info.si_signo = SIGILL;
+	info.si_errno = 0;
+	info.si_code = ILL_ILLTRP;
+	info.si_addr = (void __user *)regs->tpc;
+	info.si_trapno = lvl;
+	force_sig_info(SIGILL, &info, current);
+}
+
+void bad_trap_tl1(struct pt_regs *regs, long lvl)
+{
+	char buffer[32];
+	
+	if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
+		       0, lvl, SIGTRAP) == NOTIFY_STOP)
+		return;
+
+	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+	sprintf (buffer, "Bad trap %lx at tl>0", lvl);
+	die_if_kernel (buffer, regs);
+}
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+void do_BUG(const char *file, int line)
+{
+	bust_spinlocks(1);
+	printk("kernel BUG at %s:%d!\n", file, line);
+}
+#endif
+
+void instruction_access_exception(struct pt_regs *regs,
+				  unsigned long sfsr, unsigned long sfar)
+{
+	siginfo_t info;
+
+	if (notify_die(DIE_TRAP, "instruction access exception", regs,
+		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
+		return;
+
+	if (regs->tstate & TSTATE_PRIV) {
+		printk("instruction_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
+		       sfsr, sfar);
+		die_if_kernel("Iax", regs);
+	}
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+	info.si_signo = SIGSEGV;
+	info.si_errno = 0;
+	info.si_code = SEGV_MAPERR;
+	info.si_addr = (void __user *)regs->tpc;
+	info.si_trapno = 0;
+	force_sig_info(SIGSEGV, &info, current);
+}
+
+void instruction_access_exception_tl1(struct pt_regs *regs,
+				      unsigned long sfsr, unsigned long sfar)
+{
+	if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
+		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
+		return;
+
+	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+	instruction_access_exception(regs, sfsr, sfar);
+}
+
+void data_access_exception(struct pt_regs *regs,
+			   unsigned long sfsr, unsigned long sfar)
+{
+	siginfo_t info;
+
+	if (notify_die(DIE_TRAP, "data access exception", regs,
+		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
+		return;
+
+	if (regs->tstate & TSTATE_PRIV) {
+		/* Test if this comes from uaccess places. */
+		unsigned long fixup;
+		unsigned long g2 = regs->u_regs[UREG_G2];
+
+		if ((fixup = search_extables_range(regs->tpc, &g2))) {
+			/* Ouch, somebody is trying ugly VM hole tricks on us... */
+#ifdef DEBUG_EXCEPTIONS
+			printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
+			printk("EX_TABLE: insn<%016lx> fixup<%016lx> "
+			       "g2<%016lx>\n", regs->tpc, fixup, g2);
+#endif
+			regs->tpc = fixup;
+			regs->tnpc = regs->tpc + 4;
+			regs->u_regs[UREG_G2] = g2;
+			return;
+		}
+		/* Shit... */
+		printk("data_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
+		       sfsr, sfar);
+		die_if_kernel("Dax", regs);
+	}
+
+	info.si_signo = SIGSEGV;
+	info.si_errno = 0;
+	info.si_code = SEGV_MAPERR;
+	info.si_addr = (void __user *)sfar;
+	info.si_trapno = 0;
+	force_sig_info(SIGSEGV, &info, current);
+}
+
+#ifdef CONFIG_PCI
+/* This is really pathetic... */
+extern volatile int pci_poke_in_progress;
+extern volatile int pci_poke_cpu;
+extern volatile int pci_poke_faulted;
+#endif
+
+/* When access exceptions happen, we must do this. */
+static void spitfire_clean_and_reenable_l1_caches(void)
+{
+	unsigned long va;
+
+	if (tlb_type != spitfire)
+		BUG();
+
+	/* Clean 'em. */
+	for (va =  0; va < (PAGE_SIZE << 1); va += 32) {
+		spitfire_put_icache_tag(va, 0x0);
+		spitfire_put_dcache_tag(va, 0x0);
+	}
+
+	/* Re-enable in LSU. */
+	__asm__ __volatile__("flush %%g6\n\t"
+			     "membar #Sync\n\t"
+			     "stxa %0, [%%g0] %1\n\t"
+			     "membar #Sync"
+			     : /* no outputs */
+			     : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
+				    LSU_CONTROL_IM | LSU_CONTROL_DM),
+			     "i" (ASI_LSU_CONTROL)
+			     : "memory");
+}
+
+void do_iae(struct pt_regs *regs)
+{
+	siginfo_t info;
+
+	spitfire_clean_and_reenable_l1_caches();
+
+	if (notify_die(DIE_TRAP, "instruction access exception", regs,
+		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
+		return;
+
+	info.si_signo = SIGBUS;
+	info.si_errno = 0;
+	info.si_code = BUS_OBJERR;
+	info.si_addr = (void *)0;
+	info.si_trapno = 0;
+	force_sig_info(SIGBUS, &info, current);
+}
+
+void do_dae(struct pt_regs *regs)
+{
+	siginfo_t info;
+
+#ifdef CONFIG_PCI
+	if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
+		spitfire_clean_and_reenable_l1_caches();
+
+		pci_poke_faulted = 1;
+
+		/* Why the fuck did they have to change this? */
+		if (tlb_type == cheetah || tlb_type == cheetah_plus)
+			regs->tpc += 4;
+
+		regs->tnpc = regs->tpc + 4;
+		return;
+	}
+#endif
+	spitfire_clean_and_reenable_l1_caches();
+
+	if (notify_die(DIE_TRAP, "data access exception", regs,
+		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
+		return;
+
+	info.si_signo = SIGBUS;
+	info.si_errno = 0;
+	info.si_code = BUS_OBJERR;
+	info.si_addr = (void *)0;
+	info.si_trapno = 0;
+	force_sig_info(SIGBUS, &info, current);
+}
+
+static char ecc_syndrome_table[] = {
+	0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
+	0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
+	0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
+	0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
+	0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
+	0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
+	0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
+	0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
+	0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
+	0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
+	0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
+	0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
+	0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
+	0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
+	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
+	0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
+	0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
+	0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
+	0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
+	0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
+	0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
+	0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
+	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
+	0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
+	0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
+	0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
+	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
+	0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
+	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
+	0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
+	0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
+	0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
+};
+
+/* cee_trap in entry.S encodes AFSR/UDBH/UDBL error status
+ * in the following format.  The AFAR is left as is, with
+ * reserved bits cleared, and is a raw 40-bit physical
+ * address.
+ */
+#define CE_STATUS_UDBH_UE		(1UL << (43 + 9))
+#define CE_STATUS_UDBH_CE		(1UL << (43 + 8))
+#define CE_STATUS_UDBH_ESYNDR		(0xffUL << 43)
+#define CE_STATUS_UDBH_SHIFT		43
+#define CE_STATUS_UDBL_UE		(1UL << (33 + 9))
+#define CE_STATUS_UDBL_CE		(1UL << (33 + 8))
+#define CE_STATUS_UDBL_ESYNDR		(0xffUL << 33)
+#define CE_STATUS_UDBL_SHIFT		33
+#define CE_STATUS_AFSR_MASK		(0x1ffffffffUL)
+#define CE_STATUS_AFSR_ME		(1UL << 32)
+#define CE_STATUS_AFSR_PRIV		(1UL << 31)
+#define CE_STATUS_AFSR_ISAP		(1UL << 30)
+#define CE_STATUS_AFSR_ETP		(1UL << 29)
+#define CE_STATUS_AFSR_IVUE		(1UL << 28)
+#define CE_STATUS_AFSR_TO		(1UL << 27)
+#define CE_STATUS_AFSR_BERR		(1UL << 26)
+#define CE_STATUS_AFSR_LDP		(1UL << 25)
+#define CE_STATUS_AFSR_CP		(1UL << 24)
+#define CE_STATUS_AFSR_WP		(1UL << 23)
+#define CE_STATUS_AFSR_EDP		(1UL << 22)
+#define CE_STATUS_AFSR_UE		(1UL << 21)
+#define CE_STATUS_AFSR_CE		(1UL << 20)
+#define CE_STATUS_AFSR_ETS		(0xfUL << 16)
+#define CE_STATUS_AFSR_ETS_SHIFT	16
+#define CE_STATUS_AFSR_PSYND		(0xffffUL << 0)
+#define CE_STATUS_AFSR_PSYND_SHIFT	0
+
+/* Layout of Ecache TAG Parity Syndrome of AFSR */
+#define AFSR_ETSYNDROME_7_0		0x1UL /* E$-tag bus bits  <7:0> */
+#define AFSR_ETSYNDROME_15_8		0x2UL /* E$-tag bus bits <15:8> */
+#define AFSR_ETSYNDROME_21_16		0x4UL /* E$-tag bus bits <21:16> */
+#define AFSR_ETSYNDROME_24_22		0x8UL /* E$-tag bus bits <24:22> */
+
+static char *syndrome_unknown = "<Unknown>";
+
+asmlinkage void cee_log(unsigned long ce_status,
+			unsigned long afar,
+			struct pt_regs *regs)
+{
+	char memmod_str[64];
+	char *p;
+	unsigned short scode, udb_reg;
+
+	printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
+	       "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx]\n",
+	       smp_processor_id(),
+	       (ce_status & CE_STATUS_AFSR_MASK),
+	       afar,
+	       ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL),
+	       ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL));
+
+	udb_reg = ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL);
+	if (udb_reg & (1 << 8)) {
+		scode = ecc_syndrome_table[udb_reg & 0xff];
+		if (prom_getunumber(scode, afar,
+				    memmod_str, sizeof(memmod_str)) == -1)
+			p = syndrome_unknown;
+		else
+			p = memmod_str;
+		printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
+		       "Memory Module \"%s\"\n",
+		       smp_processor_id(), scode, p);
+	}
+
+	udb_reg = ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL);
+	if (udb_reg & (1 << 8)) {
+		scode = ecc_syndrome_table[udb_reg & 0xff];
+		if (prom_getunumber(scode, afar,
+				    memmod_str, sizeof(memmod_str)) == -1)
+			p = syndrome_unknown;
+		else
+			p = memmod_str;
+		printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
+		       "Memory Module \"%s\"\n",
+		       smp_processor_id(), scode, p);
+	}
+}
+
+/* Cheetah error trap handling. */
+static unsigned long ecache_flush_physbase;
+static unsigned long ecache_flush_linesize;
+static unsigned long ecache_flush_size;
+
+/* WARNING: The error trap handlers in assembly know the precise
+ *	    layout of the following structure.
+ *
+ * C-level handlers below use this information to log the error
+ * and then determine how to recover (if possible).
+ */
+struct cheetah_err_info {
+/*0x00*/u64 afsr;
+/*0x08*/u64 afar;
+
+	/* D-cache state */
+/*0x10*/u64 dcache_data[4];	/* The actual data	*/
+/*0x30*/u64 dcache_index;	/* D-cache index	*/
+/*0x38*/u64 dcache_tag;		/* D-cache tag/valid	*/
+/*0x40*/u64 dcache_utag;	/* D-cache microtag	*/
+/*0x48*/u64 dcache_stag;	/* D-cache snooptag	*/
+
+	/* I-cache state */
+/*0x50*/u64 icache_data[8];	/* The actual insns + predecode	*/
+/*0x90*/u64 icache_index;	/* I-cache index	*/
+/*0x98*/u64 icache_tag;		/* I-cache phys tag	*/
+/*0xa0*/u64 icache_utag;	/* I-cache microtag	*/
+/*0xa8*/u64 icache_stag;	/* I-cache snooptag	*/
+/*0xb0*/u64 icache_upper;	/* I-cache upper-tag	*/
+/*0xb8*/u64 icache_lower;	/* I-cache lower-tag	*/
+
+	/* E-cache state */
+/*0xc0*/u64 ecache_data[4];	/* 32 bytes from staging registers */
+/*0xe0*/u64 ecache_index;	/* E-cache index	*/
+/*0xe8*/u64 ecache_tag;		/* E-cache tag/state	*/
+
+/*0xf0*/u64 __pad[32 - 30];
+};
+#define CHAFSR_INVALID		((u64)-1L)
+
+/* This table is ordered in priority of errors and matches the
+ * AFAR overwrite policy as well.
+ */
+
+struct afsr_error_table {
+	unsigned long mask;
+	const char *name;
+};
+
+static const char CHAFSR_PERR_msg[] =
+	"System interface protocol error";
+static const char CHAFSR_IERR_msg[] =
+	"Internal processor error";
+static const char CHAFSR_ISAP_msg[] =
+	"System request parity error on incoming addresss";
+static const char CHAFSR_UCU_msg[] =
+	"Uncorrectable E-cache ECC error for ifetch/data";
+static const char CHAFSR_UCC_msg[] =
+	"SW Correctable E-cache ECC error for ifetch/data";
+static const char CHAFSR_UE_msg[] =
+	"Uncorrectable system bus data ECC error for read";
+static const char CHAFSR_EDU_msg[] =
+	"Uncorrectable E-cache ECC error for stmerge/blkld";
+static const char CHAFSR_EMU_msg[] =
+	"Uncorrectable system bus MTAG error";
+static const char CHAFSR_WDU_msg[] =
+	"Uncorrectable E-cache ECC error for writeback";
+static const char CHAFSR_CPU_msg[] =
+	"Uncorrectable ECC error for copyout";
+static const char CHAFSR_CE_msg[] =
+	"HW corrected system bus data ECC error for read";
+static const char CHAFSR_EDC_msg[] =
+	"HW corrected E-cache ECC error for stmerge/blkld";
+static const char CHAFSR_EMC_msg[] =
+	"HW corrected system bus MTAG ECC error";
+static const char CHAFSR_WDC_msg[] =
+	"HW corrected E-cache ECC error for writeback";
+static const char CHAFSR_CPC_msg[] =
+	"HW corrected ECC error for copyout";
+static const char CHAFSR_TO_msg[] =
+	"Unmapped error from system bus";
+static const char CHAFSR_BERR_msg[] =
+	"Bus error response from system bus";
+static const char CHAFSR_IVC_msg[] =
+	"HW corrected system bus data ECC error for ivec read";
+static const char CHAFSR_IVU_msg[] =
+	"Uncorrectable system bus data ECC error for ivec read";
+static struct afsr_error_table __cheetah_error_table[] = {
+	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
+	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
+	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
+	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
+	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
+	{	CHAFSR_UE,	CHAFSR_UE_msg		},
+	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
+	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
+	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
+	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
+	{	CHAFSR_CE,	CHAFSR_CE_msg		},
+	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
+	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
+	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
+	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
+	{	CHAFSR_TO,	CHAFSR_TO_msg		},
+	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
+	/* These two do not update the AFAR. */
+	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
+	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
+	{	0,		NULL			},
+};
+static const char CHPAFSR_DTO_msg[] =
+	"System bus unmapped error for prefetch/storequeue-read";
+static const char CHPAFSR_DBERR_msg[] =
+	"System bus error for prefetch/storequeue-read";
+static const char CHPAFSR_THCE_msg[] =
+	"Hardware corrected E-cache Tag ECC error";
+static const char CHPAFSR_TSCE_msg[] =
+	"SW handled correctable E-cache Tag ECC error";
+static const char CHPAFSR_TUE_msg[] =
+	"Uncorrectable E-cache Tag ECC error";
+static const char CHPAFSR_DUE_msg[] =
+	"System bus uncorrectable data ECC error due to prefetch/store-fill";
+static struct afsr_error_table __cheetah_plus_error_table[] = {
+	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
+	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
+	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
+	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
+	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
+	{	CHAFSR_UE,	CHAFSR_UE_msg		},
+	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
+	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
+	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
+	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
+	{	CHAFSR_CE,	CHAFSR_CE_msg		},
+	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
+	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
+	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
+	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
+	{	CHAFSR_TO,	CHAFSR_TO_msg		},
+	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
+	{	CHPAFSR_DTO,	CHPAFSR_DTO_msg		},
+	{	CHPAFSR_DBERR,	CHPAFSR_DBERR_msg	},
+	{	CHPAFSR_THCE,	CHPAFSR_THCE_msg	},
+	{	CHPAFSR_TSCE,	CHPAFSR_TSCE_msg	},
+	{	CHPAFSR_TUE,	CHPAFSR_TUE_msg		},
+	{	CHPAFSR_DUE,	CHPAFSR_DUE_msg		},
+	/* These two do not update the AFAR. */
+	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
+	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
+	{	0,		NULL			},
+};
+static const char JPAFSR_JETO_msg[] =
+	"System interface protocol error, hw timeout caused";
+static const char JPAFSR_SCE_msg[] =
+	"Parity error on system snoop results";
+static const char JPAFSR_JEIC_msg[] =
+	"System interface protocol error, illegal command detected";
+static const char JPAFSR_JEIT_msg[] =
+	"System interface protocol error, illegal ADTYPE detected";
+static const char JPAFSR_OM_msg[] =
+	"Out of range memory error has occurred";
+static const char JPAFSR_ETP_msg[] =
+	"Parity error on L2 cache tag SRAM";
+static const char JPAFSR_UMS_msg[] =
+	"Error due to unsupported store";
+static const char JPAFSR_RUE_msg[] =
+	"Uncorrectable ECC error from remote cache/memory";
+static const char JPAFSR_RCE_msg[] =
+	"Correctable ECC error from remote cache/memory";
+static const char JPAFSR_BP_msg[] =
+	"JBUS parity error on returned read data";
+static const char JPAFSR_WBP_msg[] =
+	"JBUS parity error on data for writeback or block store";
+static const char JPAFSR_FRC_msg[] =
+	"Foreign read to DRAM incurring correctable ECC error";
+static const char JPAFSR_FRU_msg[] =
+	"Foreign read to DRAM incurring uncorrectable ECC error";
+static struct afsr_error_table __jalapeno_error_table[] = {
+	{	JPAFSR_JETO,	JPAFSR_JETO_msg		},
+	{	JPAFSR_SCE,	JPAFSR_SCE_msg		},
+	{	JPAFSR_JEIC,	JPAFSR_JEIC_msg		},
+	{	JPAFSR_JEIT,	JPAFSR_JEIT_msg		},
+	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
+	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
+	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
+	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
+	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
+	{	CHAFSR_UE,	CHAFSR_UE_msg		},
+	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
+	{	JPAFSR_OM,	JPAFSR_OM_msg		},
+	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
+	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
+	{	CHAFSR_CE,	CHAFSR_CE_msg		},
+	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
+	{	JPAFSR_ETP,	JPAFSR_ETP_msg		},
+	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
+	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
+	{	CHAFSR_TO,	CHAFSR_TO_msg		},
+	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
+	{	JPAFSR_UMS,	JPAFSR_UMS_msg		},
+	{	JPAFSR_RUE,	JPAFSR_RUE_msg		},
+	{	JPAFSR_RCE,	JPAFSR_RCE_msg		},
+	{	JPAFSR_BP,	JPAFSR_BP_msg		},
+	{	JPAFSR_WBP,	JPAFSR_WBP_msg		},
+	{	JPAFSR_FRC,	JPAFSR_FRC_msg		},
+	{	JPAFSR_FRU,	JPAFSR_FRU_msg		},
+	/* These two do not update the AFAR. */
+	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
+	{	0,		NULL			},
+};
+static struct afsr_error_table *cheetah_error_table;
+static unsigned long cheetah_afsr_errors;
+
+/* This is allocated at boot time based upon the largest hardware
+ * cpu ID in the system.  We allocate two entries per cpu, one for
+ * TL==0 logging and one for TL >= 1 logging.
+ */
+struct cheetah_err_info *cheetah_error_log;
+
+static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
+{
+	struct cheetah_err_info *p;
+	int cpu = smp_processor_id();
+
+	if (!cheetah_error_log)
+		return NULL;
+
+	p = cheetah_error_log + (cpu * 2);
+	if ((afsr & CHAFSR_TL1) != 0UL)
+		p++;
+
+	return p;
+}
+
+extern unsigned int tl0_icpe[], tl1_icpe[];
+extern unsigned int tl0_dcpe[], tl1_dcpe[];
+extern unsigned int tl0_fecc[], tl1_fecc[];
+extern unsigned int tl0_cee[], tl1_cee[];
+extern unsigned int tl0_iae[], tl1_iae[];
+extern unsigned int tl0_dae[], tl1_dae[];
+extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
+extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
+extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
+extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
+extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
+
+void __init cheetah_ecache_flush_init(void)
+{
+	unsigned long largest_size, smallest_linesize, order, ver;
+	int node, i, instance;
+
+	/* Scan all cpu device tree nodes, note two values:
+	 * 1) largest E-cache size
+	 * 2) smallest E-cache line size
+	 */
+	largest_size = 0UL;
+	smallest_linesize = ~0UL;
+
+	instance = 0;
+	while (!cpu_find_by_instance(instance, &node, NULL)) {
+		unsigned long val;
+
+		val = prom_getintdefault(node, "ecache-size",
+					 (2 * 1024 * 1024));
+		if (val > largest_size)
+			largest_size = val;
+		val = prom_getintdefault(node, "ecache-line-size", 64);
+		if (val < smallest_linesize)
+			smallest_linesize = val;
+		instance++;
+	}
+
+	if (largest_size == 0UL || smallest_linesize == ~0UL) {
+		prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
+			    "parameters.\n");
+		prom_halt();
+	}
+
+	ecache_flush_size = (2 * largest_size);
+	ecache_flush_linesize = smallest_linesize;
+
+	/* Discover a physically contiguous chunk of physical
+	 * memory in 'sp_banks' of size ecache_flush_size calculated
+	 * above.  Store the physical base of this area at
+	 * ecache_flush_physbase.
+	 */
+	for (node = 0; ; node++) {
+		if (sp_banks[node].num_bytes == 0)
+			break;
+		if (sp_banks[node].num_bytes >= ecache_flush_size) {
+			ecache_flush_physbase = sp_banks[node].base_addr;
+			break;
+		}
+	}
+
+	/* Note: Zero would be a valid value of ecache_flush_physbase so
+	 * don't use that as the success test. :-)
+	 */
+	if (sp_banks[node].num_bytes == 0) {
+		prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
+			    "contiguous physical memory.\n", ecache_flush_size);
+		prom_halt();
+	}
+
+	/* Now allocate error trap reporting scoreboard. */
+	node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
+	for (order = 0; order < MAX_ORDER; order++) {
+		if ((PAGE_SIZE << order) >= node)
+			break;
+	}
+	cheetah_error_log = (struct cheetah_err_info *)
+		__get_free_pages(GFP_KERNEL, order);
+	if (!cheetah_error_log) {
+		prom_printf("cheetah_ecache_flush_init: Failed to allocate "
+			    "error logging scoreboard (%d bytes).\n", node);
+		prom_halt();
+	}
+	memset(cheetah_error_log, 0, PAGE_SIZE << order);
+
+	/* Mark all AFSRs as invalid so that the trap handler will
+	 * log new new information there.
+	 */
+	for (i = 0; i < 2 * NR_CPUS; i++)
+		cheetah_error_log[i].afsr = CHAFSR_INVALID;
+
+	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
+	if ((ver >> 32) == 0x003e0016) {
+		cheetah_error_table = &__jalapeno_error_table[0];
+		cheetah_afsr_errors = JPAFSR_ERRORS;
+	} else if ((ver >> 32) == 0x003e0015) {
+		cheetah_error_table = &__cheetah_plus_error_table[0];
+		cheetah_afsr_errors = CHPAFSR_ERRORS;
+	} else {
+		cheetah_error_table = &__cheetah_error_table[0];
+		cheetah_afsr_errors = CHAFSR_ERRORS;
+	}
+
+	/* Now patch trap tables. */
+	memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
+	memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
+	memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
+	memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
+	memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
+	memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
+	memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
+	memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
+	if (tlb_type == cheetah_plus) {
+		memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
+		memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
+		memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
+		memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
+	}
+	flushi(PAGE_OFFSET);
+}
+
+static void cheetah_flush_ecache(void)
+{
+	unsigned long flush_base = ecache_flush_physbase;
+	unsigned long flush_linesize = ecache_flush_linesize;
+	unsigned long flush_size = ecache_flush_size;
+
+	__asm__ __volatile__("1: subcc	%0, %4, %0\n\t"
+			     "   bne,pt	%%xcc, 1b\n\t"
+			     "    ldxa	[%2 + %0] %3, %%g0\n\t"
+			     : "=&r" (flush_size)
+			     : "0" (flush_size), "r" (flush_base),
+			       "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
+}
+
+static void cheetah_flush_ecache_line(unsigned long physaddr)
+{
+	unsigned long alias;
+
+	physaddr &= ~(8UL - 1UL);
+	physaddr = (ecache_flush_physbase +
+		    (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
+	alias = physaddr + (ecache_flush_size >> 1UL);
+	__asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
+			     "ldxa [%1] %2, %%g0\n\t"
+			     "membar #Sync"
+			     : /* no outputs */
+			     : "r" (physaddr), "r" (alias),
+			       "i" (ASI_PHYS_USE_EC));
+}
+
+/* Unfortunately, the diagnostic access to the I-cache tags we need to
+ * use to clear the thing interferes with I-cache coherency transactions.
+ *
+ * So we must only flush the I-cache when it is disabled.
+ */
+static void __cheetah_flush_icache(void)
+{
+	unsigned long i;
+
+	/* Clear the valid bits in all the tags. */
+	for (i = 0; i < (1 << 15); i += (1 << 5)) {
+		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+				     "membar #Sync"
+				     : /* no outputs */
+				     : "r" (i | (2 << 3)), "i" (ASI_IC_TAG));
+	}
+}
+
+static void cheetah_flush_icache(void)
+{
+	unsigned long dcu_save;
+
+	/* Save current DCU, disable I-cache. */
+	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
+			     "or %0, %2, %%g1\n\t"
+			     "stxa %%g1, [%%g0] %1\n\t"
+			     "membar #Sync"
+			     : "=r" (dcu_save)
+			     : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
+			     : "g1");
+
+	__cheetah_flush_icache();
+
+	/* Restore DCU register */
+	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
+			     "membar #Sync"
+			     : /* no outputs */
+			     : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
+}
+
+static void cheetah_flush_dcache(void)
+{
+	unsigned long i;
+
+	for (i = 0; i < (1 << 16); i += (1 << 5)) {
+		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+				     "membar #Sync"
+				     : /* no outputs */
+				     : "r" (i), "i" (ASI_DCACHE_TAG));
+	}
+}
+
+/* In order to make the even parity correct we must do two things.
+ * First, we clear DC_data_parity and set DC_utag to an appropriate value.
+ * Next, we clear out all 32-bytes of data for that line.  Data of
+ * all-zero + tag parity value of zero == correct parity.
+ */
+static void cheetah_plus_zap_dcache_parity(void)
+{
+	unsigned long i;
+
+	for (i = 0; i < (1 << 16); i += (1 << 5)) {
+		unsigned long tag = (i >> 14);
+		unsigned long j;
+
+		__asm__ __volatile__("membar	#Sync\n\t"
+				     "stxa	%0, [%1] %2\n\t"
+				     "membar	#Sync"
+				     : /* no outputs */
+				     : "r" (tag), "r" (i),
+				       "i" (ASI_DCACHE_UTAG));
+		for (j = i; j < i + (1 << 5); j += (1 << 3))
+			__asm__ __volatile__("membar	#Sync\n\t"
+					     "stxa	%%g0, [%0] %1\n\t"
+					     "membar	#Sync"
+					     : /* no outputs */
+					     : "r" (j), "i" (ASI_DCACHE_DATA));
+	}
+}
+
+/* Conversion tables used to frob Cheetah AFSR syndrome values into
+ * something palatable to the memory controller driver get_unumber
+ * routine.
+ */
+#define MT0	137
+#define MT1	138
+#define MT2	139
+#define NONE	254
+#define MTC0	140
+#define MTC1	141
+#define MTC2	142
+#define MTC3	143
+#define C0	128
+#define C1	129
+#define C2	130
+#define C3	131
+#define C4	132
+#define C5	133
+#define C6	134
+#define C7	135
+#define C8	136
+#define M2	144
+#define M3	145
+#define M4	146
+#define M	147
+static unsigned char cheetah_ecc_syntab[] = {
+/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
+/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
+/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
+/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
+/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
+/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
+/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
+/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
+/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
+/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
+/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
+/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
+/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
+/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
+/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
+/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
+/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
+/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
+/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
+/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
+/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
+/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
+/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
+/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
+/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
+/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
+/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
+/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
+/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
+/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
+/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
+/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
+};
+static unsigned char cheetah_mtag_syntab[] = {
+       NONE, MTC0,
+       MTC1, NONE,
+       MTC2, NONE,
+       NONE, MT0,
+       MTC3, NONE,
+       NONE, MT1,
+       NONE, MT2,
+       NONE, NONE
+};
+
+/* Return the highest priority error conditon mentioned. */
+static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
+{
+	unsigned long tmp = 0;
+	int i;
+
+	for (i = 0; cheetah_error_table[i].mask; i++) {
+		if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
+			return tmp;
+	}
+	return tmp;
+}
+
+static const char *cheetah_get_string(unsigned long bit)
+{
+	int i;
+
+	for (i = 0; cheetah_error_table[i].mask; i++) {
+		if ((bit & cheetah_error_table[i].mask) != 0UL)
+			return cheetah_error_table[i].name;
+	}
+	return "???";
+}
+
+extern int chmc_getunumber(int, unsigned long, char *, int);
+
+static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
+			       unsigned long afsr, unsigned long afar, int recoverable)
+{
+	unsigned long hipri;
+	char unum[256];
+
+	printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
+	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+	       afsr, afar,
+	       (afsr & CHAFSR_TL1) ? 1 : 0);
+	printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
+	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+	       regs->tpc, regs->tnpc, regs->tstate);
+	printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
+	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+	       (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
+	       (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
+	       (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
+	       (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
+	hipri = cheetah_get_hipri(afsr);
+	printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
+	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+	       hipri, cheetah_get_string(hipri));
+
+	/* Try to get unumber if relevant. */
+#define ESYND_ERRORS	(CHAFSR_IVC | CHAFSR_IVU | \
+			 CHAFSR_CPC | CHAFSR_CPU | \
+			 CHAFSR_UE  | CHAFSR_CE  | \
+			 CHAFSR_EDC | CHAFSR_EDU  | \
+			 CHAFSR_UCC | CHAFSR_UCU  | \
+			 CHAFSR_WDU | CHAFSR_WDC)
+#define MSYND_ERRORS	(CHAFSR_EMC | CHAFSR_EMU)
+	if (afsr & ESYND_ERRORS) {
+		int syndrome;
+		int ret;
+
+		syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
+		syndrome = cheetah_ecc_syntab[syndrome];
+		ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
+		if (ret != -1)
+			printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
+			       (recoverable ? KERN_WARNING : KERN_CRIT),
+			       smp_processor_id(), unum);
+	} else if (afsr & MSYND_ERRORS) {
+		int syndrome;
+		int ret;
+
+		syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
+		syndrome = cheetah_mtag_syntab[syndrome];
+		ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
+		if (ret != -1)
+			printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
+			       (recoverable ? KERN_WARNING : KERN_CRIT),
+			       smp_processor_id(), unum);
+	}
+
+	/* Now dump the cache snapshots. */
+	printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
+	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+	       (int) info->dcache_index,
+	       info->dcache_tag,
+	       info->dcache_utag,
+	       info->dcache_stag);
+	printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
+	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+	       info->dcache_data[0],
+	       info->dcache_data[1],
+	       info->dcache_data[2],
+	       info->dcache_data[3]);
+	printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
+	       "u[%016lx] l[%016lx]\n",
+	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+	       (int) info->icache_index,
+	       info->icache_tag,
+	       info->icache_utag,
+	       info->icache_stag,
+	       info->icache_upper,
+	       info->icache_lower);
+	printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
+	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+	       info->icache_data[0],
+	       info->icache_data[1],
+	       info->icache_data[2],
+	       info->icache_data[3]);
+	printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
+	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+	       info->icache_data[4],
+	       info->icache_data[5],
+	       info->icache_data[6],
+	       info->icache_data[7]);
+	printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
+	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+	       (int) info->ecache_index, info->ecache_tag);
+	printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
+	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+	       info->ecache_data[0],
+	       info->ecache_data[1],
+	       info->ecache_data[2],
+	       info->ecache_data[3]);
+
+	afsr = (afsr & ~hipri) & cheetah_afsr_errors;
+	while (afsr != 0UL) {
+		unsigned long bit = cheetah_get_hipri(afsr);
+
+		printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
+		       (recoverable ? KERN_WARNING : KERN_CRIT),
+		       bit, cheetah_get_string(bit));
+
+		afsr &= ~bit;
+	}
+
+	if (!recoverable)
+		printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
+}
+
+static int cheetah_recheck_errors(struct cheetah_err_info *logp)
+{
+	unsigned long afsr, afar;
+	int ret = 0;
+
+	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
+			     : "=r" (afsr)
+			     : "i" (ASI_AFSR));
+	if ((afsr & cheetah_afsr_errors) != 0) {
+		if (logp != NULL) {
+			__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
+					     : "=r" (afar)
+					     : "i" (ASI_AFAR));
+			logp->afsr = afsr;
+			logp->afar = afar;
+		}
+		ret = 1;
+	}
+	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
+			     "membar #Sync\n\t"
+			     : : "r" (afsr), "i" (ASI_AFSR));
+
+	return ret;
+}
+
+void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
+{
+	struct cheetah_err_info local_snapshot, *p;
+	int recoverable;
+
+	/* Flush E-cache */
+	cheetah_flush_ecache();
+
+	p = cheetah_get_error_log(afsr);
+	if (!p) {
+		prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
+			    afsr, afar);
+		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
+			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
+		prom_halt();
+	}
+
+	/* Grab snapshot of logged error. */
+	memcpy(&local_snapshot, p, sizeof(local_snapshot));
+
+	/* If the current trap snapshot does not match what the
+	 * trap handler passed along into our args, big trouble.
+	 * In such a case, mark the local copy as invalid.
+	 *
+	 * Else, it matches and we mark the afsr in the non-local
+	 * copy as invalid so we may log new error traps there.
+	 */
+	if (p->afsr != afsr || p->afar != afar)
+		local_snapshot.afsr = CHAFSR_INVALID;
+	else
+		p->afsr = CHAFSR_INVALID;
+
+	cheetah_flush_icache();
+	cheetah_flush_dcache();
+
+	/* Re-enable I-cache/D-cache */
+	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
+			     "or %%g1, %1, %%g1\n\t"
+			     "stxa %%g1, [%%g0] %0\n\t"
+			     "membar #Sync"
+			     : /* no outputs */
+			     : "i" (ASI_DCU_CONTROL_REG),
+			       "i" (DCU_DC | DCU_IC)
+			     : "g1");
+
+	/* Re-enable error reporting */
+	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
+			     "or %%g1, %1, %%g1\n\t"
+			     "stxa %%g1, [%%g0] %0\n\t"
+			     "membar #Sync"
+			     : /* no outputs */
+			     : "i" (ASI_ESTATE_ERROR_EN),
+			       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
+			     : "g1");
+
+	/* Decide if we can continue after handling this trap and
+	 * logging the error.
+	 */
+	recoverable = 1;
+	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
+		recoverable = 0;
+
+	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
+	 * error was logged while we had error reporting traps disabled.
+	 */
+	if (cheetah_recheck_errors(&local_snapshot)) {
+		unsigned long new_afsr = local_snapshot.afsr;
+
+		/* If we got a new asynchronous error, die... */
+		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
+				CHAFSR_WDU | CHAFSR_CPU |
+				CHAFSR_IVU | CHAFSR_UE |
+				CHAFSR_BERR | CHAFSR_TO))
+			recoverable = 0;
+	}
+
+	/* Log errors. */
+	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
+
+	if (!recoverable)
+		panic("Irrecoverable Fast-ECC error trap.\n");
+
+	/* Flush E-cache to kick the error trap handlers out. */
+	cheetah_flush_ecache();
+}
+
+/* Try to fix a correctable error by pushing the line out from
+ * the E-cache.  Recheck error reporting registers to see if the
+ * problem is intermittent.
+ */
+static int cheetah_fix_ce(unsigned long physaddr)
+{
+	unsigned long orig_estate;
+	unsigned long alias1, alias2;
+	int ret;
+
+	/* Make sure correctable error traps are disabled. */
+	__asm__ __volatile__("ldxa	[%%g0] %2, %0\n\t"
+			     "andn	%0, %1, %%g1\n\t"
+			     "stxa	%%g1, [%%g0] %2\n\t"
+			     "membar	#Sync"
+			     : "=&r" (orig_estate)
+			     : "i" (ESTATE_ERROR_CEEN),
+			       "i" (ASI_ESTATE_ERROR_EN)
+			     : "g1");
+
+	/* We calculate alias addresses that will force the
+	 * cache line in question out of the E-cache.  Then
+	 * we bring it back in with an atomic instruction so
+	 * that we get it in some modified/exclusive state,
+	 * then we displace it again to try and get proper ECC
+	 * pushed back into the system.
+	 */
+	physaddr &= ~(8UL - 1UL);
+	alias1 = (ecache_flush_physbase +
+		  (physaddr & ((ecache_flush_size >> 1) - 1)));
+	alias2 = alias1 + (ecache_flush_size >> 1);
+	__asm__ __volatile__("ldxa	[%0] %3, %%g0\n\t"
+			     "ldxa	[%1] %3, %%g0\n\t"
+			     "casxa	[%2] %3, %%g0, %%g0\n\t"
+			     "membar	#StoreLoad | #StoreStore\n\t"
+			     "ldxa	[%0] %3, %%g0\n\t"
+			     "ldxa	[%1] %3, %%g0\n\t"
+			     "membar	#Sync"
+			     : /* no outputs */
+			     : "r" (alias1), "r" (alias2),
+			       "r" (physaddr), "i" (ASI_PHYS_USE_EC));
+
+	/* Did that trigger another error? */
+	if (cheetah_recheck_errors(NULL)) {
+		/* Try one more time. */
+		__asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
+				     "membar #Sync"
+				     : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
+		if (cheetah_recheck_errors(NULL))
+			ret = 2;
+		else
+			ret = 1;
+	} else {
+		/* No new error, intermittent problem. */
+		ret = 0;
+	}
+
+	/* Restore error enables. */
+	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
+			     "membar	#Sync"
+			     : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
+
+	return ret;
+}
+
+/* Return non-zero if PADDR is a valid physical memory address. */
+static int cheetah_check_main_memory(unsigned long paddr)
+{
+	int i;
+
+	for (i = 0; ; i++) {
+		if (sp_banks[i].num_bytes == 0)
+			break;
+		if (paddr >= sp_banks[i].base_addr &&
+		    paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes))
+			return 1;
+	}
+	return 0;
+}
+
+void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
+{
+	struct cheetah_err_info local_snapshot, *p;
+	int recoverable, is_memory;
+
+	p = cheetah_get_error_log(afsr);
+	if (!p) {
+		prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
+			    afsr, afar);
+		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
+			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
+		prom_halt();
+	}
+
+	/* Grab snapshot of logged error. */
+	memcpy(&local_snapshot, p, sizeof(local_snapshot));
+
+	/* If the current trap snapshot does not match what the
+	 * trap handler passed along into our args, big trouble.
+	 * In such a case, mark the local copy as invalid.
+	 *
+	 * Else, it matches and we mark the afsr in the non-local
+	 * copy as invalid so we may log new error traps there.
+	 */
+	if (p->afsr != afsr || p->afar != afar)
+		local_snapshot.afsr = CHAFSR_INVALID;
+	else
+		p->afsr = CHAFSR_INVALID;
+
+	is_memory = cheetah_check_main_memory(afar);
+
+	if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
+		/* XXX Might want to log the results of this operation
+		 * XXX somewhere... -DaveM
+		 */
+		cheetah_fix_ce(afar);
+	}
+
+	{
+		int flush_all, flush_line;
+
+		flush_all = flush_line = 0;
+		if ((afsr & CHAFSR_EDC) != 0UL) {
+			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
+				flush_line = 1;
+			else
+				flush_all = 1;
+		} else if ((afsr & CHAFSR_CPC) != 0UL) {
+			if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
+				flush_line = 1;
+			else
+				flush_all = 1;
+		}
+
+		/* Trap handler only disabled I-cache, flush it. */
+		cheetah_flush_icache();
+
+		/* Re-enable I-cache */
+		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
+				     "or %%g1, %1, %%g1\n\t"
+				     "stxa %%g1, [%%g0] %0\n\t"
+				     "membar #Sync"
+				     : /* no outputs */
+				     : "i" (ASI_DCU_CONTROL_REG),
+				     "i" (DCU_IC)
+				     : "g1");
+
+		if (flush_all)
+			cheetah_flush_ecache();
+		else if (flush_line)
+			cheetah_flush_ecache_line(afar);
+	}
+
+	/* Re-enable error reporting */
+	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
+			     "or %%g1, %1, %%g1\n\t"
+			     "stxa %%g1, [%%g0] %0\n\t"
+			     "membar #Sync"
+			     : /* no outputs */
+			     : "i" (ASI_ESTATE_ERROR_EN),
+			       "i" (ESTATE_ERROR_CEEN)
+			     : "g1");
+
+	/* Decide if we can continue after handling this trap and
+	 * logging the error.
+	 */
+	recoverable = 1;
+	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
+		recoverable = 0;
+
+	/* Re-check AFSR/AFAR */
+	(void) cheetah_recheck_errors(&local_snapshot);
+
+	/* Log errors. */
+	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
+
+	if (!recoverable)
+		panic("Irrecoverable Correctable-ECC error trap.\n");
+}
+
+void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
+{
+	struct cheetah_err_info local_snapshot, *p;
+	int recoverable, is_memory;
+
+#ifdef CONFIG_PCI
+	/* Check for the special PCI poke sequence. */
+	if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
+		cheetah_flush_icache();
+		cheetah_flush_dcache();
+
+		/* Re-enable I-cache/D-cache */
+		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
+				     "or %%g1, %1, %%g1\n\t"
+				     "stxa %%g1, [%%g0] %0\n\t"
+				     "membar #Sync"
+				     : /* no outputs */
+				     : "i" (ASI_DCU_CONTROL_REG),
+				       "i" (DCU_DC | DCU_IC)
+				     : "g1");
+
+		/* Re-enable error reporting */
+		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
+				     "or %%g1, %1, %%g1\n\t"
+				     "stxa %%g1, [%%g0] %0\n\t"
+				     "membar #Sync"
+				     : /* no outputs */
+				     : "i" (ASI_ESTATE_ERROR_EN),
+				       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
+				     : "g1");
+
+		(void) cheetah_recheck_errors(NULL);
+
+		pci_poke_faulted = 1;
+		regs->tpc += 4;
+		regs->tnpc = regs->tpc + 4;
+		return;
+	}
+#endif
+
+	p = cheetah_get_error_log(afsr);
+	if (!p) {
+		prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
+			    afsr, afar);
+		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
+			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
+		prom_halt();
+	}
+
+	/* Grab snapshot of logged error. */
+	memcpy(&local_snapshot, p, sizeof(local_snapshot));
+
+	/* If the current trap snapshot does not match what the
+	 * trap handler passed along into our args, big trouble.
+	 * In such a case, mark the local copy as invalid.
+	 *
+	 * Else, it matches and we mark the afsr in the non-local
+	 * copy as invalid so we may log new error traps there.
+	 */
+	if (p->afsr != afsr || p->afar != afar)
+		local_snapshot.afsr = CHAFSR_INVALID;
+	else
+		p->afsr = CHAFSR_INVALID;
+
+	is_memory = cheetah_check_main_memory(afar);
+
+	{
+		int flush_all, flush_line;
+
+		flush_all = flush_line = 0;
+		if ((afsr & CHAFSR_EDU) != 0UL) {
+			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
+				flush_line = 1;
+			else
+				flush_all = 1;
+		} else if ((afsr & CHAFSR_BERR) != 0UL) {
+			if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
+				flush_line = 1;
+			else
+				flush_all = 1;
+		}
+
+		cheetah_flush_icache();
+		cheetah_flush_dcache();
+
+		/* Re-enable I/D caches */
+		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
+				     "or %%g1, %1, %%g1\n\t"
+				     "stxa %%g1, [%%g0] %0\n\t"
+				     "membar #Sync"
+				     : /* no outputs */
+				     : "i" (ASI_DCU_CONTROL_REG),
+				     "i" (DCU_IC | DCU_DC)
+				     : "g1");
+
+		if (flush_all)
+			cheetah_flush_ecache();
+		else if (flush_line)
+			cheetah_flush_ecache_line(afar);
+	}
+
+	/* Re-enable error reporting */
+	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
+			     "or %%g1, %1, %%g1\n\t"
+			     "stxa %%g1, [%%g0] %0\n\t"
+			     "membar #Sync"
+			     : /* no outputs */
+			     : "i" (ASI_ESTATE_ERROR_EN),
+			     "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
+			     : "g1");
+
+	/* Decide if we can continue after handling this trap and
+	 * logging the error.
+	 */
+	recoverable = 1;
+	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
+		recoverable = 0;
+
+	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
+	 * error was logged while we had error reporting traps disabled.
+	 */
+	if (cheetah_recheck_errors(&local_snapshot)) {
+		unsigned long new_afsr = local_snapshot.afsr;
+
+		/* If we got a new asynchronous error, die... */
+		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
+				CHAFSR_WDU | CHAFSR_CPU |
+				CHAFSR_IVU | CHAFSR_UE |
+				CHAFSR_BERR | CHAFSR_TO))
+			recoverable = 0;
+	}
+
+	/* Log errors. */
+	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
+
+	/* "Recoverable" here means we try to yank the page from ever
+	 * being newly used again.  This depends upon a few things:
+	 * 1) Must be main memory, and AFAR must be valid.
+	 * 2) If we trapped from user, OK.
+	 * 3) Else, if we trapped from kernel we must find exception
+	 *    table entry (ie. we have to have been accessing user
+	 *    space).
+	 *
+	 * If AFAR is not in main memory, or we trapped from kernel
+	 * and cannot find an exception table entry, it is unacceptable
+	 * to try and continue.
+	 */
+	if (recoverable && is_memory) {
+		if ((regs->tstate & TSTATE_PRIV) == 0UL) {
+			/* OK, usermode access. */
+			recoverable = 1;
+		} else {
+			unsigned long g2 = regs->u_regs[UREG_G2];
+			unsigned long fixup = search_extables_range(regs->tpc, &g2);
+
+			if (fixup != 0UL) {
+				/* OK, kernel access to userspace. */
+				recoverable = 1;
+
+			} else {
+				/* BAD, privileged state is corrupted. */
+				recoverable = 0;
+			}
+
+			if (recoverable) {
+				if (pfn_valid(afar >> PAGE_SHIFT))
+					get_page(pfn_to_page(afar >> PAGE_SHIFT));
+				else
+					recoverable = 0;
+
+				/* Only perform fixup if we still have a
+				 * recoverable condition.
+				 */
+				if (recoverable) {
+					regs->tpc = fixup;
+					regs->tnpc = regs->tpc + 4;
+					regs->u_regs[UREG_G2] = g2;
+				}
+			}
+		}
+	} else {
+		recoverable = 0;
+	}
+
+	if (!recoverable)
+		panic("Irrecoverable deferred error trap.\n");
+}
+
+/* Handle a D/I cache parity error trap.  TYPE is encoded as:
+ *
+ * Bit0:	0=dcache,1=icache
+ * Bit1:	0=recoverable,1=unrecoverable
+ *
+ * The hardware has disabled both the I-cache and D-cache in
+ * the %dcr register.  
+ */
+void cheetah_plus_parity_error(int type, struct pt_regs *regs)
+{
+	if (type & 0x1)
+		__cheetah_flush_icache();
+	else
+		cheetah_plus_zap_dcache_parity();
+	cheetah_flush_dcache();
+
+	/* Re-enable I-cache/D-cache */
+	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
+			     "or %%g1, %1, %%g1\n\t"
+			     "stxa %%g1, [%%g0] %0\n\t"
+			     "membar #Sync"
+			     : /* no outputs */
+			     : "i" (ASI_DCU_CONTROL_REG),
+			       "i" (DCU_DC | DCU_IC)
+			     : "g1");
+
+	if (type & 0x2) {
+		printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
+		       smp_processor_id(),
+		       (type & 0x1) ? 'I' : 'D',
+		       regs->tpc);
+		panic("Irrecoverable Cheetah+ parity error.");
+	}
+
+	printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
+	       smp_processor_id(),
+	       (type & 0x1) ? 'I' : 'D',
+	       regs->tpc);
+}
+
+void do_fpe_common(struct pt_regs *regs)
+{
+	if (regs->tstate & TSTATE_PRIV) {
+		regs->tpc = regs->tnpc;
+		regs->tnpc += 4;
+	} else {
+		unsigned long fsr = current_thread_info()->xfsr[0];
+		siginfo_t info;
+
+		if (test_thread_flag(TIF_32BIT)) {
+			regs->tpc &= 0xffffffff;
+			regs->tnpc &= 0xffffffff;
+		}
+		info.si_signo = SIGFPE;
+		info.si_errno = 0;
+		info.si_addr = (void __user *)regs->tpc;
+		info.si_trapno = 0;
+		info.si_code = __SI_FAULT;
+		if ((fsr & 0x1c000) == (1 << 14)) {
+			if (fsr & 0x10)
+				info.si_code = FPE_FLTINV;
+			else if (fsr & 0x08)
+				info.si_code = FPE_FLTOVF;
+			else if (fsr & 0x04)
+				info.si_code = FPE_FLTUND;
+			else if (fsr & 0x02)
+				info.si_code = FPE_FLTDIV;
+			else if (fsr & 0x01)
+				info.si_code = FPE_FLTRES;
+		}
+		force_sig_info(SIGFPE, &info, current);
+	}
+}
+
+void do_fpieee(struct pt_regs *regs)
+{
+	if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
+		       0, 0x24, SIGFPE) == NOTIFY_STOP)
+		return;
+
+	do_fpe_common(regs);
+}
+
+extern int do_mathemu(struct pt_regs *, struct fpustate *);
+
+void do_fpother(struct pt_regs *regs)
+{
+	struct fpustate *f = FPUSTATE;
+	int ret = 0;
+
+	if (notify_die(DIE_TRAP, "fpu exception other", regs,
+		       0, 0x25, SIGFPE) == NOTIFY_STOP)
+		return;
+
+	switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
+	case (2 << 14): /* unfinished_FPop */
+	case (3 << 14): /* unimplemented_FPop */
+		ret = do_mathemu(regs, f);
+		break;
+	}
+	if (ret)
+		return;
+	do_fpe_common(regs);
+}
+
+void do_tof(struct pt_regs *regs)
+{
+	siginfo_t info;
+
+	if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
+		       0, 0x26, SIGEMT) == NOTIFY_STOP)
+		return;
+
+	if (regs->tstate & TSTATE_PRIV)
+		die_if_kernel("Penguin overflow trap from kernel mode", regs);
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+	info.si_signo = SIGEMT;
+	info.si_errno = 0;
+	info.si_code = EMT_TAGOVF;
+	info.si_addr = (void __user *)regs->tpc;
+	info.si_trapno = 0;
+	force_sig_info(SIGEMT, &info, current);
+}
+
+void do_div0(struct pt_regs *regs)
+{
+	siginfo_t info;
+
+	if (notify_die(DIE_TRAP, "integer division by zero", regs,
+		       0, 0x28, SIGFPE) == NOTIFY_STOP)
+		return;
+
+	if (regs->tstate & TSTATE_PRIV)
+		die_if_kernel("TL0: Kernel divide by zero.", regs);
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+	info.si_signo = SIGFPE;
+	info.si_errno = 0;
+	info.si_code = FPE_INTDIV;
+	info.si_addr = (void __user *)regs->tpc;
+	info.si_trapno = 0;
+	force_sig_info(SIGFPE, &info, current);
+}
+
+void instruction_dump (unsigned int *pc)
+{
+	int i;
+
+	if ((((unsigned long) pc) & 3))
+		return;
+
+	printk("Instruction DUMP:");
+	for (i = -3; i < 6; i++)
+		printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
+	printk("\n");
+}
+
+static void user_instruction_dump (unsigned int __user *pc)
+{
+	int i;
+	unsigned int buf[9];
+	
+	if ((((unsigned long) pc) & 3))
+		return;
+		
+	if (copy_from_user(buf, pc - 3, sizeof(buf)))
+		return;
+
+	printk("Instruction DUMP:");
+	for (i = 0; i < 9; i++)
+		printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
+	printk("\n");
+}
+
+void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+{
+	unsigned long pc, fp, thread_base, ksp;
+	struct thread_info *tp = tsk->thread_info;
+	struct reg_window *rw;
+	int count = 0;
+
+	ksp = (unsigned long) _ksp;
+
+	if (tp == current_thread_info())
+		flushw_all();
+
+	fp = ksp + STACK_BIAS;
+	thread_base = (unsigned long) tp;
+
+	printk("Call Trace:");
+#ifdef CONFIG_KALLSYMS
+	printk("\n");
+#endif
+	do {
+		/* Bogus frame pointer? */
+		if (fp < (thread_base + sizeof(struct thread_info)) ||
+		    fp >= (thread_base + THREAD_SIZE))
+			break;
+		rw = (struct reg_window *)fp;
+		pc = rw->ins[7];
+		printk(" [%016lx] ", pc);
+		print_symbol("%s\n", pc);
+		fp = rw->ins[6] + STACK_BIAS;
+	} while (++count < 16);
+#ifndef CONFIG_KALLSYMS
+	printk("\n");
+#endif
+}
+
+void dump_stack(void)
+{
+	unsigned long *ksp;
+
+	__asm__ __volatile__("mov	%%fp, %0"
+			     : "=r" (ksp));
+	show_stack(current, ksp);
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+static inline int is_kernel_stack(struct task_struct *task,
+				  struct reg_window *rw)
+{
+	unsigned long rw_addr = (unsigned long) rw;
+	unsigned long thread_base, thread_end;
+
+	if (rw_addr < PAGE_OFFSET) {
+		if (task != &init_task)
+			return 0;
+	}
+
+	thread_base = (unsigned long) task->thread_info;
+	thread_end = thread_base + sizeof(union thread_union);
+	if (rw_addr >= thread_base &&
+	    rw_addr < thread_end &&
+	    !(rw_addr & 0x7UL))
+		return 1;
+
+	return 0;
+}
+
+static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
+{
+	unsigned long fp = rw->ins[6];
+
+	if (!fp)
+		return NULL;
+
+	return (struct reg_window *) (fp + STACK_BIAS);
+}
+
+void die_if_kernel(char *str, struct pt_regs *regs)
+{
+	static int die_counter;
+	extern void __show_regs(struct pt_regs * regs);
+	extern void smp_report_regs(void);
+	int count = 0;
+	
+	/* Amuse the user. */
+	printk(
+"              \\|/ ____ \\|/\n"
+"              \"@'/ .. \\`@\"\n"
+"              /_| \\__/ |_\\\n"
+"                 \\__U_/\n");
+
+	printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
+	notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
+	__asm__ __volatile__("flushw");
+	__show_regs(regs);
+	if (regs->tstate & TSTATE_PRIV) {
+		struct reg_window *rw = (struct reg_window *)
+			(regs->u_regs[UREG_FP] + STACK_BIAS);
+
+		/* Stop the back trace when we hit userland or we
+		 * find some badly aligned kernel stack.
+		 */
+		while (rw &&
+		       count++ < 30&&
+		       is_kernel_stack(current, rw)) {
+			printk("Caller[%016lx]", rw->ins[7]);
+			print_symbol(": %s", rw->ins[7]);
+			printk("\n");
+
+			rw = kernel_stack_up(rw);
+		}
+		instruction_dump ((unsigned int *) regs->tpc);
+	} else {
+		if (test_thread_flag(TIF_32BIT)) {
+			regs->tpc &= 0xffffffff;
+			regs->tnpc &= 0xffffffff;
+		}
+		user_instruction_dump ((unsigned int __user *) regs->tpc);
+	}
+#ifdef CONFIG_SMP
+	smp_report_regs();
+#endif
+                                                	
+	if (regs->tstate & TSTATE_PRIV)
+		do_exit(SIGKILL);
+	do_exit(SIGSEGV);
+}
+
+extern int handle_popc(u32 insn, struct pt_regs *regs);
+extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
+
+void do_illegal_instruction(struct pt_regs *regs)
+{
+	unsigned long pc = regs->tpc;
+	unsigned long tstate = regs->tstate;
+	u32 insn;
+	siginfo_t info;
+
+	if (notify_die(DIE_TRAP, "illegal instruction", regs,
+		       0, 0x10, SIGILL) == NOTIFY_STOP)
+		return;
+
+	if (tstate & TSTATE_PRIV)
+		die_if_kernel("Kernel illegal instruction", regs);
+	if (test_thread_flag(TIF_32BIT))
+		pc = (u32)pc;
+	if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
+		if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
+			if (handle_popc(insn, regs))
+				return;
+		} else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
+			if (handle_ldf_stq(insn, regs))
+				return;
+		}
+	}
+	info.si_signo = SIGILL;
+	info.si_errno = 0;
+	info.si_code = ILL_ILLOPC;
+	info.si_addr = (void __user *)pc;
+	info.si_trapno = 0;
+	force_sig_info(SIGILL, &info, current);
+}
+
+void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
+{
+	siginfo_t info;
+
+	if (notify_die(DIE_TRAP, "memory address unaligned", regs,
+		       0, 0x34, SIGSEGV) == NOTIFY_STOP)
+		return;
+
+	if (regs->tstate & TSTATE_PRIV) {
+		extern void kernel_unaligned_trap(struct pt_regs *regs,
+						  unsigned int insn, 
+						  unsigned long sfar,
+						  unsigned long sfsr);
+
+		kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc),
+				      sfar, sfsr);
+		return;
+	}
+	info.si_signo = SIGBUS;
+	info.si_errno = 0;
+	info.si_code = BUS_ADRALN;
+	info.si_addr = (void __user *)sfar;
+	info.si_trapno = 0;
+	force_sig_info(SIGBUS, &info, current);
+}
+
+void do_privop(struct pt_regs *regs)
+{
+	siginfo_t info;
+
+	if (notify_die(DIE_TRAP, "privileged operation", regs,
+		       0, 0x11, SIGILL) == NOTIFY_STOP)
+		return;
+
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+	info.si_signo = SIGILL;
+	info.si_errno = 0;
+	info.si_code = ILL_PRVOPC;
+	info.si_addr = (void __user *)regs->tpc;
+	info.si_trapno = 0;
+	force_sig_info(SIGILL, &info, current);
+}
+
+void do_privact(struct pt_regs *regs)
+{
+	do_privop(regs);
+}
+
+/* Trap level 1 stuff or other traps we should never see... */
+void do_cee(struct pt_regs *regs)
+{
+	die_if_kernel("TL0: Cache Error Exception", regs);
+}
+
+void do_cee_tl1(struct pt_regs *regs)
+{
+	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+	die_if_kernel("TL1: Cache Error Exception", regs);
+}
+
+void do_dae_tl1(struct pt_regs *regs)
+{
+	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+	die_if_kernel("TL1: Data Access Exception", regs);
+}
+
+void do_iae_tl1(struct pt_regs *regs)
+{
+	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+	die_if_kernel("TL1: Instruction Access Exception", regs);
+}
+
+void do_div0_tl1(struct pt_regs *regs)
+{
+	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+	die_if_kernel("TL1: DIV0 Exception", regs);
+}
+
+void do_fpdis_tl1(struct pt_regs *regs)
+{
+	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+	die_if_kernel("TL1: FPU Disabled", regs);
+}
+
+void do_fpieee_tl1(struct pt_regs *regs)
+{
+	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+	die_if_kernel("TL1: FPU IEEE Exception", regs);
+}
+
+void do_fpother_tl1(struct pt_regs *regs)
+{
+	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+	die_if_kernel("TL1: FPU Other Exception", regs);
+}
+
+void do_ill_tl1(struct pt_regs *regs)
+{
+	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+	die_if_kernel("TL1: Illegal Instruction Exception", regs);
+}
+
+void do_irq_tl1(struct pt_regs *regs)
+{
+	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+	die_if_kernel("TL1: IRQ Exception", regs);
+}
+
+void do_lddfmna_tl1(struct pt_regs *regs)
+{
+	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+	die_if_kernel("TL1: LDDF Exception", regs);
+}
+
+void do_stdfmna_tl1(struct pt_regs *regs)
+{
+	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+	die_if_kernel("TL1: STDF Exception", regs);
+}
+
+void do_paw(struct pt_regs *regs)
+{
+	die_if_kernel("TL0: Phys Watchpoint Exception", regs);
+}
+
+void do_paw_tl1(struct pt_regs *regs)
+{
+	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+	die_if_kernel("TL1: Phys Watchpoint Exception", regs);
+}
+
+void do_vaw(struct pt_regs *regs)
+{
+	die_if_kernel("TL0: Virt Watchpoint Exception", regs);
+}
+
+void do_vaw_tl1(struct pt_regs *regs)
+{
+	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+	die_if_kernel("TL1: Virt Watchpoint Exception", regs);
+}
+
+void do_tof_tl1(struct pt_regs *regs)
+{
+	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+	die_if_kernel("TL1: Tag Overflow Exception", regs);
+}
+
+void do_getpsr(struct pt_regs *regs)
+{
+	regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
+	regs->tpc   = regs->tnpc;
+	regs->tnpc += 4;
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+}
+
+extern void thread_info_offsets_are_bolixed_dave(void);
+
+/* Only invoked on boot processor. */
+void __init trap_init(void)
+{
+	/* Compile time sanity check. */
+	if (TI_TASK != offsetof(struct thread_info, task) ||
+	    TI_FLAGS != offsetof(struct thread_info, flags) ||
+	    TI_CPU != offsetof(struct thread_info, cpu) ||
+	    TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
+	    TI_KSP != offsetof(struct thread_info, ksp) ||
+	    TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
+	    TI_KREGS != offsetof(struct thread_info, kregs) ||
+	    TI_UTRAPS != offsetof(struct thread_info, utraps) ||
+	    TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
+	    TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
+	    TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
+	    TI_GSR != offsetof(struct thread_info, gsr) ||
+	    TI_XFSR != offsetof(struct thread_info, xfsr) ||
+	    TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
+	    TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
+	    TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
+	    TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
+	    TI_PCR != offsetof(struct thread_info, pcr_reg) ||
+	    TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
+	    TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
+	    TI_FPREGS != offsetof(struct thread_info, fpregs) ||
+	    (TI_FPREGS & (64 - 1)))
+		thread_info_offsets_are_bolixed_dave();
+
+	/* Attach to the address space of init_task.  On SMP we
+	 * do this in smp.c:smp_callin for other cpus.
+	 */
+	atomic_inc(&init_mm.mm_count);
+	current->active_mm = &init_mm;
+}
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
new file mode 100644
index 0000000..491bb36
--- /dev/null
+++ b/arch/sparc64/kernel/ttable.S
@@ -0,0 +1,280 @@
+/* $Id: ttable.S,v 1.38 2002/02/09 19:49:30 davem Exp $
+ * ttable.S: Sparc V9 Trap Table(s) with SpitFire/Cheetah extensions.
+ *
+ * Copyright (C) 1996, 2001 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+
+	.globl	sparc64_ttable_tl0, sparc64_ttable_tl1
+	.globl	tl0_icpe, tl1_icpe
+	.globl	tl0_dcpe, tl1_dcpe
+	.globl	tl0_fecc, tl1_fecc
+	.globl	tl0_cee, tl1_cee
+	.globl	tl0_iae, tl1_iae
+	.globl	tl0_dae, tl1_dae
+
+sparc64_ttable_tl0:
+tl0_resv000:	BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3)
+tl0_resv004:	BTRAP(0x4)  BTRAP(0x5) BTRAP(0x6) BTRAP(0x7)
+tl0_iax:	membar #Sync
+		TRAP_NOSAVE_7INSNS(__do_instruction_access_exception)
+tl0_resv009:	BTRAP(0x9)
+tl0_iae:	TRAP(do_iae)
+tl0_resv00b:	BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf)
+tl0_ill:	membar #Sync
+		TRAP_7INSNS(do_illegal_instruction)
+tl0_privop:	TRAP(do_privop)
+tl0_resv012:	BTRAP(0x12) BTRAP(0x13) BTRAP(0x14) BTRAP(0x15) BTRAP(0x16) BTRAP(0x17)
+tl0_resv018:	BTRAP(0x18) BTRAP(0x19) BTRAP(0x1a) BTRAP(0x1b) BTRAP(0x1c) BTRAP(0x1d)
+tl0_resv01e:	BTRAP(0x1e) BTRAP(0x1f)
+tl0_fpdis:	TRAP_NOSAVE(do_fpdis)
+tl0_fpieee:	TRAP_SAVEFPU(do_fpieee)
+tl0_fpother:	TRAP_NOSAVE(do_fpother_check_fitos)
+tl0_tof:	TRAP(do_tof)
+tl0_cwin:	CLEAN_WINDOW
+tl0_div0:	TRAP(do_div0)
+tl0_resv029:	BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e)
+tl0_resv02f:	BTRAP(0x2f)
+tl0_dax:	TRAP_NOSAVE(__do_data_access_exception)
+tl0_resv031:	BTRAP(0x31)
+tl0_dae:	TRAP(do_dae)
+tl0_resv033:	BTRAP(0x33)
+tl0_mna:	TRAP_NOSAVE(do_mna)
+tl0_lddfmna:	TRAP_NOSAVE(do_lddfmna)
+tl0_stdfmna:	TRAP_NOSAVE(do_stdfmna)
+tl0_privact:	TRAP_NOSAVE(__do_privact)
+tl0_resv038:	BTRAP(0x38) BTRAP(0x39) BTRAP(0x3a) BTRAP(0x3b) BTRAP(0x3c) BTRAP(0x3d)
+tl0_resv03e:	BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
+#ifdef CONFIG_SMP
+tl0_irq1:	TRAP_IRQ(smp_call_function_client, 1)
+tl0_irq2:	TRAP_IRQ(smp_receive_signal_client, 2)
+tl0_irq3:	TRAP_IRQ(smp_penguin_jailcell, 3)
+#else
+tl0_irq1:	BTRAP(0x41)
+tl0_irq2:	BTRAP(0x42)
+tl0_irq3:	BTRAP(0x43)
+#endif
+tl0_irq4:	TRAP_IRQ(handler_irq, 4)
+tl0_irq5:	TRAP_IRQ(handler_irq, 5)  TRAP_IRQ(handler_irq, 6)
+tl0_irq7:	TRAP_IRQ(handler_irq, 7)  TRAP_IRQ(handler_irq, 8)
+tl0_irq9:	TRAP_IRQ(handler_irq, 9)  TRAP_IRQ(handler_irq, 10)
+tl0_irq11:	TRAP_IRQ(handler_irq, 11) TRAP_IRQ(handler_irq, 12)
+tl0_irq13:	TRAP_IRQ(handler_irq, 13)
+#ifndef CONFIG_SMP
+tl0_irq14:	TRAP_IRQ(handler_irq, 14)
+#else
+tl0_irq14:	TICK_SMP_IRQ
+#endif
+tl0_irq15:	TRAP_IRQ(handler_irq, 15)
+tl0_resv050:	BTRAP(0x50) BTRAP(0x51) BTRAP(0x52) BTRAP(0x53) BTRAP(0x54) BTRAP(0x55)
+tl0_resv056:	BTRAP(0x56) BTRAP(0x57) BTRAP(0x58) BTRAP(0x59) BTRAP(0x5a) BTRAP(0x5b)
+tl0_resv05c:	BTRAP(0x5c) BTRAP(0x5d) BTRAP(0x5e) BTRAP(0x5f)
+tl0_ivec:	TRAP_IVEC
+tl0_paw:	TRAP(do_paw)
+tl0_vaw:	TRAP(do_vaw)
+tl0_cee:	TRAP_NOSAVE(cee_trap)
+tl0_iamiss:
+#include	"itlb_base.S"
+tl0_damiss:
+#include	"dtlb_base.S"
+tl0_daprot:
+#include	"dtlb_prot.S"
+tl0_fecc:	BTRAP(0x70)	/* Fast-ECC on Cheetah */
+tl0_dcpe:	BTRAP(0x71)	/* D-cache Parity Error on Cheetah+ */
+tl0_icpe:	BTRAP(0x72)	/* I-cache Parity Error on Cheetah+ */
+tl0_resv073:	BTRAP(0x73) BTRAP(0x74) BTRAP(0x75)
+tl0_resv076:	BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b)
+tl0_resv07c:	BTRAP(0x7c) BTRAP(0x7d) BTRAP(0x7e) BTRAP(0x7f)
+tl0_s0n:	SPILL_0_NORMAL
+tl0_s1n:	SPILL_1_NORMAL
+tl0_s2n:	SPILL_2_NORMAL
+tl0_s3n:	SPILL_3_NORMAL
+tl0_s4n:	SPILL_4_NORMAL
+tl0_s5n:	SPILL_5_NORMAL
+tl0_s6n:	SPILL_6_NORMAL
+tl0_s7n:	SPILL_7_NORMAL
+tl0_s0o:	SPILL_0_OTHER
+tl0_s1o:	SPILL_1_OTHER
+tl0_s2o:	SPILL_2_OTHER
+tl0_s3o:	SPILL_3_OTHER
+tl0_s4o:	SPILL_4_OTHER
+tl0_s5o:	SPILL_5_OTHER
+tl0_s6o:	SPILL_6_OTHER
+tl0_s7o:	SPILL_7_OTHER
+tl0_f0n:	FILL_0_NORMAL
+tl0_f1n:	FILL_1_NORMAL
+tl0_f2n:	FILL_2_NORMAL
+tl0_f3n:	FILL_3_NORMAL
+tl0_f4n:	FILL_4_NORMAL
+tl0_f5n:	FILL_5_NORMAL
+tl0_f6n:	FILL_6_NORMAL
+tl0_f7n:	FILL_7_NORMAL
+tl0_f0o:	FILL_0_OTHER
+tl0_f1o:	FILL_1_OTHER
+tl0_f2o:	FILL_2_OTHER
+tl0_f3o:	FILL_3_OTHER
+tl0_f4o:	FILL_4_OTHER
+tl0_f5o:	FILL_5_OTHER
+tl0_f6o:	FILL_6_OTHER
+tl0_f7o:	FILL_7_OTHER
+tl0_sunos:	SUNOS_SYSCALL_TRAP
+tl0_bkpt:	BREAKPOINT_TRAP
+tl0_divz:	TRAP(do_div0)
+tl0_flushw:	FLUSH_WINDOW_TRAP
+tl0_resv104:	BTRAP(0x104) BTRAP(0x105) BTRAP(0x106) BTRAP(0x107)
+		.globl tl0_solaris
+tl0_solaris:	SOLARIS_SYSCALL_TRAP
+tl0_netbsd:	NETBSD_SYSCALL_TRAP
+tl0_resv10a:	BTRAP(0x10a) BTRAP(0x10b) BTRAP(0x10c) BTRAP(0x10d) BTRAP(0x10e)
+tl0_resv10f:	BTRAP(0x10f)
+tl0_linux32:	LINUX_32BIT_SYSCALL_TRAP
+tl0_oldlinux64:	LINUX_64BIT_SYSCALL_TRAP
+tl0_resv112:	TRAP_UTRAP(UT_TRAP_INSTRUCTION_18,0x112) TRAP_UTRAP(UT_TRAP_INSTRUCTION_19,0x113)
+tl0_resv114:	TRAP_UTRAP(UT_TRAP_INSTRUCTION_20,0x114) TRAP_UTRAP(UT_TRAP_INSTRUCTION_21,0x115)
+tl0_resv116:	TRAP_UTRAP(UT_TRAP_INSTRUCTION_22,0x116) TRAP_UTRAP(UT_TRAP_INSTRUCTION_23,0x117)
+tl0_resv118:	TRAP_UTRAP(UT_TRAP_INSTRUCTION_24,0x118) TRAP_UTRAP(UT_TRAP_INSTRUCTION_25,0x119)
+tl0_resv11a:	TRAP_UTRAP(UT_TRAP_INSTRUCTION_26,0x11a) TRAP_UTRAP(UT_TRAP_INSTRUCTION_27,0x11b)
+tl0_resv11c:	TRAP_UTRAP(UT_TRAP_INSTRUCTION_28,0x11c) TRAP_UTRAP(UT_TRAP_INSTRUCTION_29,0x11d)
+tl0_resv11e:	TRAP_UTRAP(UT_TRAP_INSTRUCTION_30,0x11e) TRAP_UTRAP(UT_TRAP_INSTRUCTION_31,0x11f)
+tl0_getcc:	GETCC_TRAP
+tl0_setcc:	SETCC_TRAP
+tl0_getpsr:	TRAP(do_getpsr)
+tl0_resv123:	BTRAP(0x123) BTRAP(0x124) BTRAP(0x125) BTRAP(0x126)
+tl0_solindir:	INDIRECT_SOLARIS_SYSCALL(156)
+tl0_resv128:	BTRAP(0x128) BTRAP(0x129) BTRAP(0x12a) BTRAP(0x12b) BTRAP(0x12c)
+tl0_resv12d:	BTRAP(0x12d) BTRAP(0x12e) BTRAP(0x12f) BTRAP(0x130) BTRAP(0x131)
+tl0_resv132:	BTRAP(0x132) BTRAP(0x133) BTRAP(0x134) BTRAP(0x135) BTRAP(0x136)
+tl0_resv137:	BTRAP(0x137) BTRAP(0x138) BTRAP(0x139) BTRAP(0x13a) BTRAP(0x13b)
+tl0_resv13c:	BTRAP(0x13c) BTRAP(0x13d) BTRAP(0x13e) BTRAP(0x13f) BTRAP(0x140)
+tl0_resv141:	BTRAP(0x141) BTRAP(0x142) BTRAP(0x143) BTRAP(0x144) BTRAP(0x145)
+tl0_resv146:	BTRAP(0x146) BTRAP(0x147) BTRAP(0x148) BTRAP(0x149) BTRAP(0x14a)
+tl0_resv14b:	BTRAP(0x14b) BTRAP(0x14c) BTRAP(0x14d) BTRAP(0x14e) BTRAP(0x14f)
+tl0_resv150:	BTRAP(0x150) BTRAP(0x151) BTRAP(0x152) BTRAP(0x153) BTRAP(0x154)
+tl0_resv155:	BTRAP(0x155) BTRAP(0x156) BTRAP(0x157) BTRAP(0x158) BTRAP(0x159)
+tl0_resv15a:	BTRAP(0x15a) BTRAP(0x15b) BTRAP(0x15c) BTRAP(0x15d) BTRAP(0x15e)
+tl0_resv15f:	BTRAP(0x15f) BTRAP(0x160) BTRAP(0x161) BTRAP(0x162) BTRAP(0x163)
+tl0_resv164:	BTRAP(0x164) BTRAP(0x165) BTRAP(0x166) BTRAP(0x167) BTRAP(0x168)
+tl0_resv169:	BTRAP(0x169) BTRAP(0x16a) BTRAP(0x16b) BTRAP(0x16c)
+tl0_linux64:	LINUX_64BIT_SYSCALL_TRAP
+tl0_gsctx:	TRAP(sparc64_get_context) TRAP(sparc64_set_context)
+tl0_resv170:	KPROBES_TRAP(0x170) KPROBES_TRAP(0x171) BTRAP(0x172)
+tl0_resv173:	BTRAP(0x173) BTRAP(0x174) BTRAP(0x175) BTRAP(0x176) BTRAP(0x177)
+tl0_resv178:	BTRAP(0x178) BTRAP(0x179) BTRAP(0x17a) BTRAP(0x17b) BTRAP(0x17c)
+tl0_resv17d:	BTRAP(0x17d) BTRAP(0x17e) BTRAP(0x17f)
+#define BTRAPS(x) BTRAP(x) BTRAP(x+1) BTRAP(x+2) BTRAP(x+3) BTRAP(x+4) BTRAP(x+5) BTRAP(x+6) BTRAP(x+7)
+tl0_resv180:	BTRAPS(0x180) BTRAPS(0x188)
+tl0_resv190:	BTRAPS(0x190) BTRAPS(0x198)
+tl0_resv1a0:	BTRAPS(0x1a0) BTRAPS(0x1a8)
+tl0_resv1b0:	BTRAPS(0x1b0) BTRAPS(0x1b8)
+tl0_resv1c0:	BTRAPS(0x1c0) BTRAPS(0x1c8)
+tl0_resv1d0:	BTRAPS(0x1d0) BTRAPS(0x1d8)
+tl0_resv1e0:	BTRAPS(0x1e0) BTRAPS(0x1e8)
+tl0_resv1f0:	BTRAPS(0x1f0) BTRAPS(0x1f8)
+
+sparc64_ttable_tl1:
+tl1_resv000:	BOOT_KERNEL    BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3)
+tl1_resv004:	BTRAPTL1(0x4)  BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7)
+tl1_iax:	TRAP_NOSAVE(__do_instruction_access_exception_tl1)
+tl1_resv009:	BTRAPTL1(0x9)
+tl1_iae:	TRAPTL1(do_iae_tl1)
+tl1_resv00b:	BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf)
+tl1_ill:	TRAPTL1(do_ill_tl1)
+tl1_privop:	BTRAPTL1(0x11)
+tl1_resv012:	BTRAPTL1(0x12) BTRAPTL1(0x13) BTRAPTL1(0x14) BTRAPTL1(0x15)
+tl1_resv016:	BTRAPTL1(0x16) BTRAPTL1(0x17) BTRAPTL1(0x18) BTRAPTL1(0x19)
+tl1_resv01a:	BTRAPTL1(0x1a) BTRAPTL1(0x1b) BTRAPTL1(0x1c) BTRAPTL1(0x1d)
+tl1_resv01e:	BTRAPTL1(0x1e) BTRAPTL1(0x1f)
+tl1_fpdis:	TRAP_NOSAVE(do_fpdis)
+tl1_fpieee:	TRAPTL1(do_fpieee_tl1)
+tl1_fpother:	TRAPTL1(do_fpother_tl1)
+tl1_tof:	TRAPTL1(do_tof_tl1)
+tl1_cwin:	CLEAN_WINDOW
+tl1_div0:	TRAPTL1(do_div0_tl1)
+tl1_resv029:	BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c)
+tl1_resv02d:	BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f)
+tl1_dax:	TRAP_NOSAVE(__do_data_access_exception_tl1)
+tl1_resv031:	BTRAPTL1(0x31)
+tl1_dae:	TRAPTL1(do_dae_tl1)
+tl1_resv033:	BTRAPTL1(0x33)
+tl1_mna:	TRAP_NOSAVE(do_mna)
+tl1_lddfmna:	TRAPTL1(do_lddfmna_tl1)
+tl1_stdfmna:	TRAPTL1(do_stdfmna_tl1)
+tl1_privact:	BTRAPTL1(0x37)
+tl1_resv038:	BTRAPTL1(0x38) BTRAPTL1(0x39) BTRAPTL1(0x3a) BTRAPTL1(0x3b)
+tl1_resv03c:	BTRAPTL1(0x3c) BTRAPTL1(0x3d) BTRAPTL1(0x3e) BTRAPTL1(0x3f)
+tl1_resv040:	BTRAPTL1(0x40)
+tl1_irq1:	TRAP_IRQ(do_irq_tl1, 1)  TRAP_IRQ(do_irq_tl1, 2)  TRAP_IRQ(do_irq_tl1, 3)
+tl1_irq4:	TRAP_IRQ(do_irq_tl1, 4)  TRAP_IRQ(do_irq_tl1, 5)  TRAP_IRQ(do_irq_tl1, 6)
+tl1_irq7:	TRAP_IRQ(do_irq_tl1, 7)  TRAP_IRQ(do_irq_tl1, 8)  TRAP_IRQ(do_irq_tl1, 9)
+tl1_irq10:	TRAP_IRQ(do_irq_tl1, 10) TRAP_IRQ(do_irq_tl1, 11)
+tl1_irq12:	TRAP_IRQ(do_irq_tl1, 12) TRAP_IRQ(do_irq_tl1, 13)
+tl1_irq14:	TRAP_IRQ(do_irq_tl1, 14) TRAP_IRQ(do_irq_tl1, 15)
+tl1_resv050:	BTRAPTL1(0x50) BTRAPTL1(0x51) BTRAPTL1(0x52) BTRAPTL1(0x53)
+tl1_resv054:	BTRAPTL1(0x54) BTRAPTL1(0x55) BTRAPTL1(0x56) BTRAPTL1(0x57)
+tl1_resv058:	BTRAPTL1(0x58) BTRAPTL1(0x59) BTRAPTL1(0x5a) BTRAPTL1(0x5b)
+tl1_resv05c:	BTRAPTL1(0x5c) BTRAPTL1(0x5d) BTRAPTL1(0x5e) BTRAPTL1(0x5f)
+tl1_ivec:	TRAP_IVEC
+tl1_paw:	TRAPTL1(do_paw_tl1)
+tl1_vaw:	TRAPTL1(do_vaw_tl1)
+
+		/* The grotty trick to save %g1 into current->thread.cee_stuff
+		 * is because when we take this trap we could be interrupting trap
+		 * code already using the trap alternate global registers.
+		 *
+		 * We cross our fingers and pray that this store/load does
+		 * not cause yet another CEE trap.
+		 */
+tl1_cee:	membar	#Sync
+		stx	%g1, [%g6 + TI_CEE_STUFF]
+		ldxa	[%g0] ASI_AFSR, %g1
+		membar	#Sync
+		stxa	%g1, [%g0] ASI_AFSR
+		membar	#Sync
+		ldx	[%g6 + TI_CEE_STUFF], %g1
+		retry
+
+tl1_iamiss:	BTRAPTL1(0x64) BTRAPTL1(0x65) BTRAPTL1(0x66) BTRAPTL1(0x67)
+tl1_damiss:
+#include	"dtlb_backend.S"
+tl1_daprot:
+#include	"dtlb_prot.S"
+tl1_fecc:	BTRAPTL1(0x70)	/* Fast-ECC on Cheetah */
+tl1_dcpe:	BTRAPTL1(0x71)	/* D-cache Parity Error on Cheetah+ */
+tl1_icpe:	BTRAPTL1(0x72)	/* I-cache Parity Error on Cheetah+ */
+tl1_resv073:	BTRAPTL1(0x73)
+tl1_resv074:	BTRAPTL1(0x74) BTRAPTL1(0x75) BTRAPTL1(0x76) BTRAPTL1(0x77)
+tl1_resv078:	BTRAPTL1(0x78) BTRAPTL1(0x79) BTRAPTL1(0x7a) BTRAPTL1(0x7b)
+tl1_resv07c:	BTRAPTL1(0x7c) BTRAPTL1(0x7d) BTRAPTL1(0x7e) BTRAPTL1(0x7f)
+tl1_s0n:	SPILL_0_NORMAL
+tl1_s1n:	SPILL_1_NORMAL
+tl1_s2n:	SPILL_2_NORMAL
+tl1_s3n:	SPILL_3_NORMAL
+tl1_s4n:	SPILL_4_NORMAL
+tl1_s5n:	SPILL_5_NORMAL
+tl1_s6n:	SPILL_6_NORMAL
+tl1_s7n:	SPILL_7_NORMAL
+tl1_s0o:	SPILL_0_OTHER
+tl1_s1o:	SPILL_1_OTHER
+tl1_s2o:	SPILL_2_OTHER
+tl1_s3o:	SPILL_3_OTHER
+tl1_s4o:	SPILL_4_OTHER
+tl1_s5o:	SPILL_5_OTHER
+tl1_s6o:	SPILL_6_OTHER
+tl1_s7o:	SPILL_7_OTHER
+tl1_f0n:	FILL_0_NORMAL
+tl1_f1n:	FILL_1_NORMAL
+tl1_f2n:	FILL_2_NORMAL
+tl1_f3n:	FILL_3_NORMAL
+tl1_f4n:	FILL_4_NORMAL
+tl1_f5n:	FILL_5_NORMAL
+tl1_f6n:	FILL_6_NORMAL
+tl1_f7n:	FILL_7_NORMAL
+tl1_f0o:	FILL_0_OTHER
+tl1_f1o:	FILL_1_OTHER
+tl1_f2o:	FILL_2_OTHER
+tl1_f3o:	FILL_3_OTHER
+tl1_f4o:	FILL_4_OTHER
+tl1_f5o:	FILL_5_OTHER
+tl1_f6o:	FILL_6_OTHER
+tl1_f7o:	FILL_7_OTHER
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
new file mode 100644
index 0000000..4372bf3
--- /dev/null
+++ b/arch/sparc64/kernel/unaligned.c
@@ -0,0 +1,729 @@
+/* $Id: unaligned.c,v 1.24 2002/02/09 19:49:31 davem Exp $
+ * unaligned.c: Unaligned load/store trap handling with special
+ *              cases for the kernel to do them more quickly.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <asm/asi.h>
+#include <asm/ptrace.h>
+#include <asm/pstate.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/bitops.h>
+#include <asm/fpumacro.h>
+
+/* #define DEBUG_MNA */
+
+enum direction {
+	load,    /* ld, ldd, ldh, ldsh */
+	store,   /* st, std, sth, stsh */
+	both,    /* Swap, ldstub, cas, ... */
+	fpld,
+	fpst,
+	invalid,
+};
+
+#ifdef DEBUG_MNA
+static char *dirstrings[] = {
+  "load", "store", "both", "fpload", "fpstore", "invalid"
+};
+#endif
+
+static inline enum direction decode_direction(unsigned int insn)
+{
+	unsigned long tmp = (insn >> 21) & 1;
+
+	if (!tmp)
+		return load;
+	else {
+		switch ((insn>>19)&0xf) {
+		case 15: /* swap* */
+			return both;
+		default:
+			return store;
+		}
+	}
+}
+
+/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
+static inline int decode_access_size(unsigned int insn)
+{
+	unsigned int tmp;
+
+	tmp = ((insn >> 19) & 0xf);
+	if (tmp == 11 || tmp == 14) /* ldx/stx */
+		return 8;
+	tmp &= 3;
+	if (!tmp)
+		return 4;
+	else if (tmp == 3)
+		return 16;	/* ldd/std - Although it is actually 8 */
+	else if (tmp == 2)
+		return 2;
+	else {
+		printk("Impossible unaligned trap. insn=%08x\n", insn);
+		die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs);
+
+		/* GCC should never warn that control reaches the end
+		 * of this function without returning a value because
+		 * die_if_kernel() is marked with attribute 'noreturn'.
+		 * Alas, some versions do...
+		 */
+
+		return 0;
+	}
+}
+
+static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
+{
+	if (insn & 0x800000) {
+		if (insn & 0x2000)
+			return (unsigned char)(regs->tstate >> 24);	/* %asi */
+		else
+			return (unsigned char)(insn >> 5);		/* imm_asi */
+	} else
+		return ASI_P;
+}
+
+/* 0x400000 = signed, 0 = unsigned */
+static inline int decode_signedness(unsigned int insn)
+{
+	return (insn & 0x400000);
+}
+
+static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
+				       unsigned int rd, int from_kernel)
+{
+	if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
+		if (from_kernel != 0)
+			__asm__ __volatile__("flushw");
+		else
+			flushw_user();
+	}
+}
+
+static inline long sign_extend_imm13(long imm)
+{
+	return imm << 51 >> 51;
+}
+
+static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
+{
+	unsigned long value;
+	
+	if (reg < 16)
+		return (!reg ? 0 : regs->u_regs[reg]);
+	if (regs->tstate & TSTATE_PRIV) {
+		struct reg_window *win;
+		win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+		value = win->locals[reg - 16];
+	} else if (test_thread_flag(TIF_32BIT)) {
+		struct reg_window32 __user *win32;
+		win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+		get_user(value, &win32->locals[reg - 16]);
+	} else {
+		struct reg_window __user *win;
+		win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+		get_user(value, &win->locals[reg - 16]);
+	}
+	return value;
+}
+
+static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
+{
+	if (reg < 16)
+		return &regs->u_regs[reg];
+	if (regs->tstate & TSTATE_PRIV) {
+		struct reg_window *win;
+		win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+		return &win->locals[reg - 16];
+	} else if (test_thread_flag(TIF_32BIT)) {
+		struct reg_window32 *win32;
+		win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+		return (unsigned long *)&win32->locals[reg - 16];
+	} else {
+		struct reg_window *win;
+		win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+		return &win->locals[reg - 16];
+	}
+}
+
+unsigned long compute_effective_address(struct pt_regs *regs,
+					unsigned int insn, unsigned int rd)
+{
+	unsigned int rs1 = (insn >> 14) & 0x1f;
+	unsigned int rs2 = insn & 0x1f;
+	int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
+
+	if (insn & 0x2000) {
+		maybe_flush_windows(rs1, 0, rd, from_kernel);
+		return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
+	} else {
+		maybe_flush_windows(rs1, rs2, rd, from_kernel);
+		return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
+	}
+}
+
+/* This is just to make gcc think die_if_kernel does return... */
+static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs)
+{
+	die_if_kernel(str, regs);
+}
+
+#define do_integer_load(dest_reg, size, saddr, is_signed, asi, errh) ({		\
+__asm__ __volatile__ (								\
+	"wr	%4, 0, %%asi\n\t"						\
+	"cmp	%1, 8\n\t"							\
+	"bge,pn	%%icc, 9f\n\t"							\
+	" cmp	%1, 4\n\t"							\
+	"be,pt	%%icc, 6f\n"							\
+"4:\t"	" lduba	[%2] %%asi, %%l1\n"						\
+"5:\t"	"lduba	[%2 + 1] %%asi, %%l2\n\t"					\
+	"sll	%%l1, 8, %%l1\n\t"						\
+	"brz,pt	%3, 3f\n\t"							\
+	" add	%%l1, %%l2, %%l1\n\t"						\
+	"sllx	%%l1, 48, %%l1\n\t"						\
+	"srax	%%l1, 48, %%l1\n"						\
+"3:\t"	"ba,pt	%%xcc, 0f\n\t"							\
+	" stx	%%l1, [%0]\n"							\
+"6:\t"	"lduba	[%2 + 1] %%asi, %%l2\n\t"					\
+	"sll	%%l1, 24, %%l1\n"						\
+"7:\t"	"lduba	[%2 + 2] %%asi, %%g7\n\t"					\
+	"sll	%%l2, 16, %%l2\n"						\
+"8:\t"	"lduba	[%2 + 3] %%asi, %%g1\n\t"					\
+	"sll	%%g7, 8, %%g7\n\t"						\
+	"or	%%l1, %%l2, %%l1\n\t"						\
+	"or	%%g7, %%g1, %%g7\n\t"						\
+	"or	%%l1, %%g7, %%l1\n\t"						\
+	"brnz,a,pt %3, 3f\n\t"							\
+	" sra	%%l1, 0, %%l1\n"						\
+"3:\t"	"ba,pt	%%xcc, 0f\n\t"							\
+	" stx	%%l1, [%0]\n"							\
+"9:\t"	"lduba	[%2] %%asi, %%l1\n"						\
+"10:\t"	"lduba	[%2 + 1] %%asi, %%l2\n\t"					\
+	"sllx	%%l1, 56, %%l1\n"						\
+"11:\t"	"lduba	[%2 + 2] %%asi, %%g7\n\t"					\
+	"sllx	%%l2, 48, %%l2\n"						\
+"12:\t"	"lduba	[%2 + 3] %%asi, %%g1\n\t"					\
+	"sllx	%%g7, 40, %%g7\n\t"						\
+	"sllx	%%g1, 32, %%g1\n\t"						\
+	"or	%%l1, %%l2, %%l1\n\t"						\
+	"or	%%g7, %%g1, %%g7\n"						\
+"13:\t"	"lduba	[%2 + 4] %%asi, %%l2\n\t"					\
+	"or	%%l1, %%g7, %%g7\n"						\
+"14:\t"	"lduba	[%2 + 5] %%asi, %%g1\n\t"					\
+	"sllx	%%l2, 24, %%l2\n"						\
+"15:\t"	"lduba	[%2 + 6] %%asi, %%l1\n\t"					\
+	"sllx	%%g1, 16, %%g1\n\t"						\
+	"or	%%g7, %%l2, %%g7\n"						\
+"16:\t"	"lduba	[%2 + 7] %%asi, %%l2\n\t"					\
+	"sllx	%%l1, 8, %%l1\n\t"						\
+	"or	%%g7, %%g1, %%g7\n\t"						\
+	"or	%%l1, %%l2, %%l1\n\t"						\
+	"or	%%g7, %%l1, %%g7\n\t"						\
+	"cmp	%1, 8\n\t"							\
+	"be,a,pt %%icc, 0f\n\t"							\
+	" stx	%%g7, [%0]\n\t"							\
+	"srlx	%%g7, 32, %%l1\n\t"						\
+	"sra	%%g7, 0, %%g7\n\t"						\
+	"stx	%%l1, [%0]\n\t"							\
+	"stx	%%g7, [%0 + 8]\n"						\
+"0:\n\t"									\
+	"wr	%%g0, %5, %%asi\n\n\t"						\
+	".section __ex_table\n\t"						\
+	".word	4b, " #errh "\n\t"						\
+	".word	5b, " #errh "\n\t"						\
+	".word	6b, " #errh "\n\t"						\
+	".word	7b, " #errh "\n\t"						\
+	".word	8b, " #errh "\n\t"						\
+	".word	9b, " #errh "\n\t"						\
+	".word	10b, " #errh "\n\t"						\
+	".word	11b, " #errh "\n\t"						\
+	".word	12b, " #errh "\n\t"						\
+	".word	13b, " #errh "\n\t"						\
+	".word	14b, " #errh "\n\t"						\
+	".word	15b, " #errh "\n\t"						\
+	".word	16b, " #errh "\n\n\t"						\
+	".previous\n\t"								\
+	: : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed),		\
+	  "r" (asi), "i" (ASI_AIUS)						\
+	: "l1", "l2", "g7", "g1", "cc");					\
+})
+	
+#define store_common(dst_addr, size, src_val, asi, errh) ({			\
+__asm__ __volatile__ (								\
+	"wr	%3, 0, %%asi\n\t"						\
+	"ldx	[%2], %%l1\n"							\
+	"cmp	%1, 2\n\t"							\
+	"be,pn	%%icc, 2f\n\t"							\
+	" cmp	%1, 4\n\t"							\
+	"be,pt	%%icc, 1f\n\t"							\
+	" srlx	%%l1, 24, %%l2\n\t"						\
+	"srlx	%%l1, 56, %%g1\n\t"						\
+	"srlx	%%l1, 48, %%g7\n"						\
+"4:\t"	"stba	%%g1, [%0] %%asi\n\t"						\
+	"srlx	%%l1, 40, %%g1\n"						\
+"5:\t"	"stba	%%g7, [%0 + 1] %%asi\n\t"					\
+	"srlx	%%l1, 32, %%g7\n"						\
+"6:\t"	"stba	%%g1, [%0 + 2] %%asi\n"						\
+"7:\t"	"stba	%%g7, [%0 + 3] %%asi\n\t"					\
+	"srlx	%%l1, 16, %%g1\n"						\
+"8:\t"	"stba	%%l2, [%0 + 4] %%asi\n\t"					\
+	"srlx	%%l1, 8, %%g7\n"						\
+"9:\t"	"stba	%%g1, [%0 + 5] %%asi\n"						\
+"10:\t"	"stba	%%g7, [%0 + 6] %%asi\n\t"					\
+	"ba,pt	%%xcc, 0f\n"							\
+"11:\t"	" stba	%%l1, [%0 + 7] %%asi\n"						\
+"1:\t"	"srl	%%l1, 16, %%g7\n"						\
+"12:\t"	"stba	%%l2, [%0] %%asi\n\t"						\
+	"srl	%%l1, 8, %%l2\n"						\
+"13:\t"	"stba	%%g7, [%0 + 1] %%asi\n"						\
+"14:\t"	"stba	%%l2, [%0 + 2] %%asi\n\t"					\
+	"ba,pt	%%xcc, 0f\n"							\
+"15:\t"	" stba	%%l1, [%0 + 3] %%asi\n"						\
+"2:\t"	"srl	%%l1, 8, %%l2\n"						\
+"16:\t"	"stba	%%l2, [%0] %%asi\n"						\
+"17:\t"	"stba	%%l1, [%0 + 1] %%asi\n"						\
+"0:\n\t"									\
+	"wr	%%g0, %4, %%asi\n\n\t"						\
+	".section __ex_table\n\t"						\
+	".word	4b, " #errh "\n\t"						\
+	".word	5b, " #errh "\n\t"						\
+	".word	6b, " #errh "\n\t"						\
+	".word	7b, " #errh "\n\t"						\
+	".word	8b, " #errh "\n\t"						\
+	".word	9b, " #errh "\n\t"						\
+	".word	10b, " #errh "\n\t"						\
+	".word	11b, " #errh "\n\t"						\
+	".word	12b, " #errh "\n\t"						\
+	".word	13b, " #errh "\n\t"						\
+	".word	14b, " #errh "\n\t"						\
+	".word	15b, " #errh "\n\t"						\
+	".word	16b, " #errh "\n\t"						\
+	".word	17b, " #errh "\n\n\t"						\
+	".previous\n\t"								\
+	: : "r" (dst_addr), "r" (size), "r" (src_val), "r" (asi), "i" (ASI_AIUS)\
+	: "l1", "l2", "g7", "g1", "cc");					\
+})
+
+#define do_integer_store(reg_num, size, dst_addr, regs, asi, errh) ({		\
+	unsigned long zero = 0;							\
+	unsigned long *src_val = &zero;						\
+										\
+	if (size == 16) {							\
+		size = 8;							\
+		zero = (((long)(reg_num ? 					\
+		        (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |	\
+			(unsigned)fetch_reg(reg_num + 1, regs);			\
+	} else if (reg_num) src_val = fetch_reg_addr(reg_num, regs);		\
+	store_common(dst_addr, size, src_val, asi, errh);			\
+})
+
+extern void smp_capture(void);
+extern void smp_release(void);
+
+#define do_atomic(srcdest_reg, mem, errh) ({					\
+	unsigned long flags, tmp;						\
+										\
+	smp_capture();								\
+	local_irq_save(flags);							\
+	tmp = *srcdest_reg;							\
+	do_integer_load(srcdest_reg, 4, mem, 0, errh);				\
+	store_common(mem, 4, &tmp, errh);					\
+	local_irq_restore(flags);						\
+	smp_release();								\
+})
+
+static inline void advance(struct pt_regs *regs)
+{
+	regs->tpc   = regs->tnpc;
+	regs->tnpc += 4;
+	if (test_thread_flag(TIF_32BIT)) {
+		regs->tpc &= 0xffffffff;
+		regs->tnpc &= 0xffffffff;
+	}
+}
+
+static inline int floating_point_load_or_store_p(unsigned int insn)
+{
+	return (insn >> 24) & 1;
+}
+
+static inline int ok_for_kernel(unsigned int insn)
+{
+	return !floating_point_load_or_store_p(insn);
+}
+
+void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("kernel_mna_trap_fault");
+
+void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
+{
+	unsigned long g2 = regs->u_regs [UREG_G2];
+	unsigned long fixup = search_extables_range(regs->tpc, &g2);
+
+	if (!fixup) {
+		unsigned long address = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
+        	if (address < PAGE_SIZE) {
+                	printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
+        	} else
+                	printk(KERN_ALERT "Unable to handle kernel paging request in mna handler");
+	        printk(KERN_ALERT " at virtual address %016lx\n",address);
+		printk(KERN_ALERT "current->{mm,active_mm}->context = %016lx\n",
+			(current->mm ? CTX_HWBITS(current->mm->context) :
+			CTX_HWBITS(current->active_mm->context)));
+		printk(KERN_ALERT "current->{mm,active_mm}->pgd = %016lx\n",
+			(current->mm ? (unsigned long) current->mm->pgd :
+			(unsigned long) current->active_mm->pgd));
+	        die_if_kernel("Oops", regs);
+		/* Not reached */
+	}
+	regs->tpc = fixup;
+	regs->tnpc = regs->tpc + 4;
+	regs->u_regs [UREG_G2] = g2;
+
+	regs->tstate &= ~TSTATE_ASI;
+	regs->tstate |= (ASI_AIUS << 24UL);
+}
+
+asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, unsigned long sfar, unsigned long sfsr)
+{
+	enum direction dir = decode_direction(insn);
+	int size = decode_access_size(insn);
+
+	if (!ok_for_kernel(insn) || dir == both) {
+		printk("Unsupported unaligned load/store trap for kernel at <%016lx>.\n",
+		       regs->tpc);
+		unaligned_panic("Kernel does fpu/atomic unaligned load/store.", regs);
+
+		__asm__ __volatile__ ("\n"
+"kernel_unaligned_trap_fault:\n\t"
+		"mov	%0, %%o0\n\t"
+		"call	kernel_mna_trap_fault\n\t"
+		" mov	%1, %%o1\n\t"
+		:
+		: "r" (regs), "r" (insn)
+		: "o0", "o1", "o2", "o3", "o4", "o5", "o7",
+		  "g1", "g2", "g3", "g4", "g7", "cc");
+	} else {
+		unsigned long addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
+
+#ifdef DEBUG_MNA
+		printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] retpc[%016lx]\n",
+		       regs->tpc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
+#endif
+		switch (dir) {
+		case load:
+			do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
+					size, (unsigned long *) addr,
+					decode_signedness(insn), decode_asi(insn, regs),
+					kernel_unaligned_trap_fault);
+			break;
+
+		case store:
+			do_integer_store(((insn>>25)&0x1f), size,
+					 (unsigned long *) addr, regs,
+					 decode_asi(insn, regs),
+					 kernel_unaligned_trap_fault);
+			break;
+#if 0 /* unsupported */
+		case both:
+			do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
+				  (unsigned long *) addr,
+				  kernel_unaligned_trap_fault);
+			break;
+#endif
+		default:
+			panic("Impossible kernel unaligned trap.");
+			/* Not reached... */
+		}
+		advance(regs);
+	}
+}
+
+static char popc_helper[] = {
+0, 1, 1, 2, 1, 2, 2, 3,
+1, 2, 2, 3, 2, 3, 3, 4, 
+};
+
+int handle_popc(u32 insn, struct pt_regs *regs)
+{
+	u64 value;
+	int ret, i, rd = ((insn >> 25) & 0x1f);
+	int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
+	                        
+	if (insn & 0x2000) {
+		maybe_flush_windows(0, 0, rd, from_kernel);
+		value = sign_extend_imm13(insn);
+	} else {
+		maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
+		value = fetch_reg(insn & 0x1f, regs);
+	}
+	for (ret = 0, i = 0; i < 16; i++) {
+		ret += popc_helper[value & 0xf];
+		value >>= 4;
+	}
+	if (rd < 16) {
+		if (rd)
+			regs->u_regs[rd] = ret;
+	} else {
+		if (test_thread_flag(TIF_32BIT)) {
+			struct reg_window32 __user *win32;
+			win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+			put_user(ret, &win32->locals[rd - 16]);
+		} else {
+			struct reg_window __user *win;
+			win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+			put_user(ret, &win->locals[rd - 16]);
+		}
+	}
+	advance(regs);
+	return 1;
+}
+
+extern void do_fpother(struct pt_regs *regs);
+extern void do_privact(struct pt_regs *regs);
+extern void data_access_exception(struct pt_regs *regs,
+				  unsigned long sfsr,
+				  unsigned long sfar);
+
+int handle_ldf_stq(u32 insn, struct pt_regs *regs)
+{
+	unsigned long addr = compute_effective_address(regs, insn, 0);
+	int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
+	struct fpustate *f = FPUSTATE;
+	int asi = decode_asi(insn, regs);
+	int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
+
+	save_and_clear_fpu();
+	current_thread_info()->xfsr[0] &= ~0x1c000;
+	if (freg & 3) {
+		current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
+		do_fpother(regs);
+		return 0;
+	}
+	if (insn & 0x200000) {
+		/* STQ */
+		u64 first = 0, second = 0;
+		
+		if (current_thread_info()->fpsaved[0] & flag) {
+			first = *(u64 *)&f->regs[freg];
+			second = *(u64 *)&f->regs[freg+2];
+		}
+		if (asi < 0x80) {
+			do_privact(regs);
+			return 1;
+		}
+		switch (asi) {
+		case ASI_P:
+		case ASI_S: break;
+		case ASI_PL:
+		case ASI_SL: 
+			{
+				/* Need to convert endians */
+				u64 tmp = __swab64p(&first);
+				
+				first = __swab64p(&second);
+				second = tmp;
+				break;
+			}
+		default:
+			data_access_exception(regs, 0, addr);
+			return 1;
+		}
+		if (put_user (first >> 32, (u32 __user *)addr) ||
+		    __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
+		    __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
+		    __put_user ((u32)second, (u32 __user *)(addr + 12))) {
+		    	data_access_exception(regs, 0, addr);
+		    	return 1;
+		}
+	} else {
+		/* LDF, LDDF, LDQF */
+		u32 data[4] __attribute__ ((aligned(8)));
+		int size, i;
+		int err;
+
+		if (asi < 0x80) {
+			do_privact(regs);
+			return 1;
+		} else if (asi > ASI_SNFL) {
+			data_access_exception(regs, 0, addr);
+			return 1;
+		}
+		switch (insn & 0x180000) {
+		case 0x000000: size = 1; break;
+		case 0x100000: size = 4; break;
+		default: size = 2; break;
+		}
+		for (i = 0; i < size; i++)
+			data[i] = 0;
+		
+		err = get_user (data[0], (u32 __user *) addr);
+		if (!err) {
+			for (i = 1; i < size; i++)
+				err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
+		}
+		if (err && !(asi & 0x2 /* NF */)) {
+			data_access_exception(regs, 0, addr);
+			return 1;
+		}
+		if (asi & 0x8) /* Little */ {
+			u64 tmp;
+
+			switch (size) {
+			case 1: data[0] = le32_to_cpup(data + 0); break;
+			default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
+				break;
+			case 4: tmp = le64_to_cpup((u64 *)(data + 0));
+				*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
+				*(u64 *)(data + 2) = tmp;
+				break;
+			}
+		}
+		if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
+			current_thread_info()->fpsaved[0] = FPRS_FEF;
+			current_thread_info()->gsr[0] = 0;
+		}
+		if (!(current_thread_info()->fpsaved[0] & flag)) {
+			if (freg < 32)
+				memset(f->regs, 0, 32*sizeof(u32));
+			else
+				memset(f->regs+32, 0, 32*sizeof(u32));
+		}
+		memcpy(f->regs + freg, data, size * 4);
+		current_thread_info()->fpsaved[0] |= flag;
+	}
+	advance(regs);
+	return 1;
+}
+
+void handle_ld_nf(u32 insn, struct pt_regs *regs)
+{
+	int rd = ((insn >> 25) & 0x1f);
+	int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
+	unsigned long *reg;
+	                        
+	maybe_flush_windows(0, 0, rd, from_kernel);
+	reg = fetch_reg_addr(rd, regs);
+	if (from_kernel || rd < 16) {
+		reg[0] = 0;
+		if ((insn & 0x780000) == 0x180000)
+			reg[1] = 0;
+	} else if (test_thread_flag(TIF_32BIT)) {
+		put_user(0, (int __user *) reg);
+		if ((insn & 0x780000) == 0x180000)
+			put_user(0, ((int __user *) reg) + 1);
+	} else {
+		put_user(0, (unsigned long __user *) reg);
+		if ((insn & 0x780000) == 0x180000)
+			put_user(0, (unsigned long __user *) reg + 1);
+	}
+	advance(regs);
+}
+
+void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
+{
+	unsigned long pc = regs->tpc;
+	unsigned long tstate = regs->tstate;
+	u32 insn;
+	u32 first, second;
+	u64 value;
+	u8 asi, freg;
+	int flag;
+	struct fpustate *f = FPUSTATE;
+
+	if (tstate & TSTATE_PRIV)
+		die_if_kernel("lddfmna from kernel", regs);
+	if (test_thread_flag(TIF_32BIT))
+		pc = (u32)pc;
+	if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
+		asi = sfsr >> 16;
+		if ((asi > ASI_SNFL) ||
+		    (asi < ASI_P))
+			goto daex;
+		if (get_user(first, (u32 __user *)sfar) ||
+		     get_user(second, (u32 __user *)(sfar + 4))) {
+			if (asi & 0x2) /* NF */ {
+				first = 0; second = 0;
+			} else
+				goto daex;
+		}
+		save_and_clear_fpu();
+		freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
+		value = (((u64)first) << 32) | second;
+		if (asi & 0x8) /* Little */
+			value = __swab64p(&value);
+		flag = (freg < 32) ? FPRS_DL : FPRS_DU;
+		if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
+			current_thread_info()->fpsaved[0] = FPRS_FEF;
+			current_thread_info()->gsr[0] = 0;
+		}
+		if (!(current_thread_info()->fpsaved[0] & flag)) {
+			if (freg < 32)
+				memset(f->regs, 0, 32*sizeof(u32));
+			else
+				memset(f->regs+32, 0, 32*sizeof(u32));
+		}
+		*(u64 *)(f->regs + freg) = value;
+		current_thread_info()->fpsaved[0] |= flag;
+	} else {
+daex:		data_access_exception(regs, sfsr, sfar);
+		return;
+	}
+	advance(regs);
+	return;
+}
+
+void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
+{
+	unsigned long pc = regs->tpc;
+	unsigned long tstate = regs->tstate;
+	u32 insn;
+	u64 value;
+	u8 asi, freg;
+	int flag;
+	struct fpustate *f = FPUSTATE;
+
+	if (tstate & TSTATE_PRIV)
+		die_if_kernel("stdfmna from kernel", regs);
+	if (test_thread_flag(TIF_32BIT))
+		pc = (u32)pc;
+	if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
+		freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
+		asi = sfsr >> 16;
+		value = 0;
+		flag = (freg < 32) ? FPRS_DL : FPRS_DU;
+		if ((asi > ASI_SNFL) ||
+		    (asi < ASI_P))
+			goto daex;
+		save_and_clear_fpu();
+		if (current_thread_info()->fpsaved[0] & flag)
+			value = *(u64 *)&f->regs[freg];
+		switch (asi) {
+		case ASI_P:
+		case ASI_S: break;
+		case ASI_PL:
+		case ASI_SL: 
+			value = __swab64p(&value); break;
+		default: goto daex;
+		}
+		if (put_user (value >> 32, (u32 __user *) sfar) ||
+		    __put_user ((u32)value, (u32 __user *)(sfar + 4)))
+			goto daex;
+	} else {
+daex:		data_access_exception(regs, sfsr, sfar);
+		return;
+	}
+	advance(regs);
+	return;
+}
diff --git a/arch/sparc64/kernel/us2e_cpufreq.c b/arch/sparc64/kernel/us2e_cpufreq.c
new file mode 100644
index 0000000..7aae0a1
--- /dev/null
+++ b/arch/sparc64/kernel/us2e_cpufreq.c
@@ -0,0 +1,400 @@
+/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support
+ *
+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
+ *
+ * Many thanks to Dominik Brodowski for fixing up the cpufreq
+ * infrastructure in order to make this driver easier to implement.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/cpufreq.h>
+#include <linux/threads.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <asm/asi.h>
+#include <asm/timer.h>
+
+static struct cpufreq_driver *cpufreq_us2e_driver;
+
+struct us2e_freq_percpu_info {
+	struct cpufreq_frequency_table table[6];
+};
+
+/* Indexed by cpu number. */
+static struct us2e_freq_percpu_info *us2e_freq_table;
+
+#define HBIRD_MEM_CNTL0_ADDR	0x1fe0000f010UL
+#define HBIRD_ESTAR_MODE_ADDR	0x1fe0000f080UL
+
+/* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8.  These are controlled
+ * in the ESTAR mode control register.
+ */
+#define ESTAR_MODE_DIV_1	0x0000000000000000UL
+#define ESTAR_MODE_DIV_2	0x0000000000000001UL
+#define ESTAR_MODE_DIV_4	0x0000000000000003UL
+#define ESTAR_MODE_DIV_6	0x0000000000000002UL
+#define ESTAR_MODE_DIV_8	0x0000000000000004UL
+#define ESTAR_MODE_DIV_MASK	0x0000000000000007UL
+
+#define MCTRL0_SREFRESH_ENAB	0x0000000000010000UL
+#define MCTRL0_REFR_COUNT_MASK	0x0000000000007f00UL
+#define MCTRL0_REFR_COUNT_SHIFT	8
+#define MCTRL0_REFR_INTERVAL	7800
+#define MCTRL0_REFR_CLKS_P_CNT	64
+
+static unsigned long read_hbreg(unsigned long addr)
+{
+	unsigned long ret;
+
+	__asm__ __volatile__("ldxa	[%1] %2, %0"
+			     : "=&r" (ret)
+			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
+	return ret;
+}
+
+static void write_hbreg(unsigned long addr, unsigned long val)
+{
+	__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
+			     "membar	#Sync"
+			     : /* no outputs */
+			     : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
+			     : "memory");
+	if (addr == HBIRD_ESTAR_MODE_ADDR) {
+		/* Need to wait 16 clock cycles for the PLL to lock.  */
+		udelay(1);
+	}
+}
+
+static void self_refresh_ctl(int enable)
+{
+	unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+
+	if (enable)
+		mctrl |= MCTRL0_SREFRESH_ENAB;
+	else
+		mctrl &= ~MCTRL0_SREFRESH_ENAB;
+	write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
+	(void) read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+}
+
+static void frob_mem_refresh(int cpu_slowing_down,
+			     unsigned long clock_tick,
+			     unsigned long old_divisor, unsigned long divisor)
+{
+	unsigned long old_refr_count, refr_count, mctrl;
+
+
+	refr_count  = (clock_tick * MCTRL0_REFR_INTERVAL);
+	refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
+
+	mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+	old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK)
+		>> MCTRL0_REFR_COUNT_SHIFT;
+
+	mctrl &= ~MCTRL0_REFR_COUNT_MASK;
+	mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT;
+	write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
+	mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+
+	if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) {
+		unsigned long usecs;
+
+		/* We have to wait for both refresh counts (old
+		 * and new) to go to zero.
+		 */
+		usecs = (MCTRL0_REFR_CLKS_P_CNT *
+			 (refr_count + old_refr_count) *
+			 1000000UL *
+			 old_divisor) / clock_tick;
+		udelay(usecs + 1UL);
+	}
+}
+
+static void us2e_transition(unsigned long estar, unsigned long new_bits,
+			    unsigned long clock_tick,
+			    unsigned long old_divisor, unsigned long divisor)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	estar &= ~ESTAR_MODE_DIV_MASK;
+
+	/* This is based upon the state transition diagram in the IIe manual.  */
+	if (old_divisor == 2 && divisor == 1) {
+		self_refresh_ctl(0);
+		write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+		frob_mem_refresh(0, clock_tick, old_divisor, divisor);
+	} else if (old_divisor == 1 && divisor == 2) {
+		frob_mem_refresh(1, clock_tick, old_divisor, divisor);
+		write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+		self_refresh_ctl(1);
+	} else if (old_divisor == 1 && divisor > 2) {
+		us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
+				1, 2);
+		us2e_transition(estar, new_bits, clock_tick,
+				2, divisor);
+	} else if (old_divisor > 2 && divisor == 1) {
+		us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
+				old_divisor, 2);
+		us2e_transition(estar, new_bits, clock_tick,
+				2, divisor);
+	} else if (old_divisor < divisor) {
+		frob_mem_refresh(0, clock_tick, old_divisor, divisor);
+		write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+	} else if (old_divisor > divisor) {
+		write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+		frob_mem_refresh(1, clock_tick, old_divisor, divisor);
+	} else {
+		BUG();
+	}
+
+	local_irq_restore(flags);
+}
+
+static unsigned long index_to_estar_mode(unsigned int index)
+{
+	switch (index) {
+	case 0:
+		return ESTAR_MODE_DIV_1;
+
+	case 1:
+		return ESTAR_MODE_DIV_2;
+
+	case 2:
+		return ESTAR_MODE_DIV_4;
+
+	case 3:
+		return ESTAR_MODE_DIV_6;
+
+	case 4:
+		return ESTAR_MODE_DIV_8;
+
+	default:
+		BUG();
+	};
+}
+
+static unsigned long index_to_divisor(unsigned int index)
+{
+	switch (index) {
+	case 0:
+		return 1;
+
+	case 1:
+		return 2;
+
+	case 2:
+		return 4;
+
+	case 3:
+		return 6;
+
+	case 4:
+		return 8;
+
+	default:
+		BUG();
+	};
+}
+
+static unsigned long estar_to_divisor(unsigned long estar)
+{
+	unsigned long ret;
+
+	switch (estar & ESTAR_MODE_DIV_MASK) {
+	case ESTAR_MODE_DIV_1:
+		ret = 1;
+		break;
+	case ESTAR_MODE_DIV_2:
+		ret = 2;
+		break;
+	case ESTAR_MODE_DIV_4:
+		ret = 4;
+		break;
+	case ESTAR_MODE_DIV_6:
+		ret = 6;
+		break;
+	case ESTAR_MODE_DIV_8:
+		ret = 8;
+		break;
+	default:
+		BUG();
+	};
+
+	return ret;
+}
+
+static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
+{
+	unsigned long new_bits, new_freq;
+	unsigned long clock_tick, divisor, old_divisor, estar;
+	cpumask_t cpus_allowed;
+	struct cpufreq_freqs freqs;
+
+	if (!cpu_online(cpu))
+		return;
+
+	cpus_allowed = current->cpus_allowed;
+	set_cpus_allowed(current, cpumask_of_cpu(cpu));
+
+	new_freq = clock_tick = sparc64_get_clock_tick(cpu);
+	new_bits = index_to_estar_mode(index);
+	divisor = index_to_divisor(index);
+	new_freq /= divisor;
+
+	estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
+
+	old_divisor = estar_to_divisor(estar);
+
+	freqs.old = clock_tick / old_divisor;
+	freqs.new = new_freq;
+	freqs.cpu = cpu;
+	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+	if (old_divisor != divisor)
+		us2e_transition(estar, new_bits, clock_tick, old_divisor, divisor);
+
+	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+	set_cpus_allowed(current, cpus_allowed);
+}
+
+static int us2e_freq_target(struct cpufreq_policy *policy,
+			  unsigned int target_freq,
+			  unsigned int relation)
+{
+	unsigned int new_index = 0;
+
+	if (cpufreq_frequency_table_target(policy,
+					      &us2e_freq_table[policy->cpu].table[0],
+					      target_freq,
+					      relation,
+					      &new_index))
+		return -EINVAL;
+
+	us2e_set_cpu_divider_index(policy->cpu, new_index);
+
+	return 0;
+}
+
+static int us2e_freq_verify(struct cpufreq_policy *policy)
+{
+	return cpufreq_frequency_table_verify(policy,
+					      &us2e_freq_table[policy->cpu].table[0]);
+}
+
+static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
+{
+	unsigned int cpu = policy->cpu;
+	unsigned long clock_tick = sparc64_get_clock_tick(cpu);
+	struct cpufreq_frequency_table *table =
+		&us2e_freq_table[cpu].table[0];
+
+	table[0].index = 0;
+	table[0].frequency = clock_tick / 1;
+	table[1].index = 1;
+	table[1].frequency = clock_tick / 2;
+	table[2].index = 2;
+	table[2].frequency = clock_tick / 4;
+	table[2].index = 3;
+	table[2].frequency = clock_tick / 6;
+	table[2].index = 4;
+	table[2].frequency = clock_tick / 8;
+	table[2].index = 5;
+	table[3].frequency = CPUFREQ_TABLE_END;
+
+	policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+	policy->cpuinfo.transition_latency = 0;
+	policy->cur = clock_tick;
+
+	return cpufreq_frequency_table_cpuinfo(policy, table);
+}
+
+static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
+{
+	if (cpufreq_us2e_driver)
+		us2e_set_cpu_divider_index(policy->cpu, 0);
+
+	return 0;
+}
+
+static int __init us2e_freq_init(void)
+{
+	unsigned long manuf, impl, ver;
+	int ret;
+
+	__asm__("rdpr %%ver, %0" : "=r" (ver));
+	manuf = ((ver >> 48) & 0xffff);
+	impl  = ((ver >> 32) & 0xffff);
+
+	if (manuf == 0x17 && impl == 0x13) {
+		struct cpufreq_driver *driver;
+
+		ret = -ENOMEM;
+		driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
+		if (!driver)
+			goto err_out;
+		memset(driver, 0, sizeof(*driver));
+
+		us2e_freq_table = kmalloc(
+			(NR_CPUS * sizeof(struct us2e_freq_percpu_info)),
+			GFP_KERNEL);
+		if (!us2e_freq_table)
+			goto err_out;
+
+		memset(us2e_freq_table, 0,
+		       (NR_CPUS * sizeof(struct us2e_freq_percpu_info)));
+
+		driver->verify = us2e_freq_verify;
+		driver->target = us2e_freq_target;
+		driver->init = us2e_freq_cpu_init;
+		driver->exit = us2e_freq_cpu_exit;
+		driver->owner = THIS_MODULE,
+		strcpy(driver->name, "UltraSPARC-IIe");
+
+		cpufreq_us2e_driver = driver;
+		ret = cpufreq_register_driver(driver);
+		if (ret)
+			goto err_out;
+
+		return 0;
+
+err_out:
+		if (driver) {
+			kfree(driver);
+			cpufreq_us2e_driver = NULL;
+		}
+		if (us2e_freq_table) {
+			kfree(us2e_freq_table);
+			us2e_freq_table = NULL;
+		}
+		return ret;
+	}
+
+	return -ENODEV;
+}
+
+static void __exit us2e_freq_exit(void)
+{
+	if (cpufreq_us2e_driver) {
+		cpufreq_unregister_driver(cpufreq_us2e_driver);
+
+		kfree(cpufreq_us2e_driver);
+		cpufreq_us2e_driver = NULL;
+		kfree(us2e_freq_table);
+		us2e_freq_table = NULL;
+	}
+}
+
+MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
+MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe");
+MODULE_LICENSE("GPL");
+
+module_init(us2e_freq_init);
+module_exit(us2e_freq_exit);
diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c
new file mode 100644
index 0000000..18fe54b
--- /dev/null
+++ b/arch/sparc64/kernel/us3_cpufreq.c
@@ -0,0 +1,255 @@
+/* us3_cpufreq.c: UltraSPARC-III cpu frequency support
+ *
+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
+ *
+ * Many thanks to Dominik Brodowski for fixing up the cpufreq
+ * infrastructure in order to make this driver easier to implement.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/cpufreq.h>
+#include <linux/threads.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <asm/head.h>
+#include <asm/timer.h>
+
+static struct cpufreq_driver *cpufreq_us3_driver;
+
+struct us3_freq_percpu_info {
+	struct cpufreq_frequency_table table[4];
+};
+
+/* Indexed by cpu number. */
+static struct us3_freq_percpu_info *us3_freq_table;
+
+/* UltraSPARC-III has three dividers: 1, 2, and 32.  These are controlled
+ * in the Safari config register.
+ */
+#define SAFARI_CFG_DIV_1	0x0000000000000000UL
+#define SAFARI_CFG_DIV_2	0x0000000040000000UL
+#define SAFARI_CFG_DIV_32	0x0000000080000000UL
+#define SAFARI_CFG_DIV_MASK	0x00000000C0000000UL
+
+static unsigned long read_safari_cfg(void)
+{
+	unsigned long ret;
+
+	__asm__ __volatile__("ldxa	[%%g0] %1, %0"
+			     : "=&r" (ret)
+			     : "i" (ASI_SAFARI_CONFIG));
+	return ret;
+}
+
+static void write_safari_cfg(unsigned long val)
+{
+	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
+			     "membar	#Sync"
+			     : /* no outputs */
+			     : "r" (val), "i" (ASI_SAFARI_CONFIG)
+			     : "memory");
+}
+
+static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg)
+{
+	unsigned long clock_tick = sparc64_get_clock_tick(cpu);
+	unsigned long ret;
+
+	switch (safari_cfg & SAFARI_CFG_DIV_MASK) {
+	case SAFARI_CFG_DIV_1:
+		ret = clock_tick / 1;
+		break;
+	case SAFARI_CFG_DIV_2:
+		ret = clock_tick / 2;
+		break;
+	case SAFARI_CFG_DIV_32:
+		ret = clock_tick / 32;
+		break;
+	default:
+		BUG();
+	};
+
+	return ret;
+}
+
+static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
+{
+	unsigned long new_bits, new_freq, reg;
+	cpumask_t cpus_allowed;
+	struct cpufreq_freqs freqs;
+
+	if (!cpu_online(cpu))
+		return;
+
+	cpus_allowed = current->cpus_allowed;
+	set_cpus_allowed(current, cpumask_of_cpu(cpu));
+
+	new_freq = sparc64_get_clock_tick(cpu);
+	switch (index) {
+	case 0:
+		new_bits = SAFARI_CFG_DIV_1;
+		new_freq /= 1;
+		break;
+	case 1:
+		new_bits = SAFARI_CFG_DIV_2;
+		new_freq /= 2;
+		break;
+	case 2:
+		new_bits = SAFARI_CFG_DIV_32;
+		new_freq /= 32;
+		break;
+
+	default:
+		BUG();
+	};
+
+	reg = read_safari_cfg();
+
+	freqs.old = get_current_freq(cpu, reg);
+	freqs.new = new_freq;
+	freqs.cpu = cpu;
+	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+	reg &= ~SAFARI_CFG_DIV_MASK;
+	reg |= new_bits;
+	write_safari_cfg(reg);
+
+	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+	set_cpus_allowed(current, cpus_allowed);
+}
+
+static int us3_freq_target(struct cpufreq_policy *policy,
+			  unsigned int target_freq,
+			  unsigned int relation)
+{
+	unsigned int new_index = 0;
+
+	if (cpufreq_frequency_table_target(policy,
+					   &us3_freq_table[policy->cpu].table[0],
+					   target_freq,
+					   relation,
+					   &new_index))
+		return -EINVAL;
+
+	us3_set_cpu_divider_index(policy->cpu, new_index);
+
+	return 0;
+}
+
+static int us3_freq_verify(struct cpufreq_policy *policy)
+{
+	return cpufreq_frequency_table_verify(policy,
+					      &us3_freq_table[policy->cpu].table[0]);
+}
+
+static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
+{
+	unsigned int cpu = policy->cpu;
+	unsigned long clock_tick = sparc64_get_clock_tick(cpu);
+	struct cpufreq_frequency_table *table =
+		&us3_freq_table[cpu].table[0];
+
+	table[0].index = 0;
+	table[0].frequency = clock_tick / 1;
+	table[1].index = 1;
+	table[1].frequency = clock_tick / 2;
+	table[2].index = 2;
+	table[2].frequency = clock_tick / 32;
+	table[3].index = 0;
+	table[3].frequency = CPUFREQ_TABLE_END;
+
+	policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+	policy->cpuinfo.transition_latency = 0;
+	policy->cur = clock_tick;
+
+	return cpufreq_frequency_table_cpuinfo(policy, table);
+}
+
+static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
+{
+	if (cpufreq_us3_driver)
+		us3_set_cpu_divider_index(policy->cpu, 0);
+
+	return 0;
+}
+
+static int __init us3_freq_init(void)
+{
+	unsigned long manuf, impl, ver;
+	int ret;
+
+	__asm__("rdpr %%ver, %0" : "=r" (ver));
+	manuf = ((ver >> 48) & 0xffff);
+	impl  = ((ver >> 32) & 0xffff);
+
+	if (manuf == CHEETAH_MANUF &&
+	    (impl == CHEETAH_IMPL || impl == CHEETAH_PLUS_IMPL)) {
+		struct cpufreq_driver *driver;
+
+		ret = -ENOMEM;
+		driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
+		if (!driver)
+			goto err_out;
+		memset(driver, 0, sizeof(*driver));
+
+		us3_freq_table = kmalloc(
+			(NR_CPUS * sizeof(struct us3_freq_percpu_info)),
+			GFP_KERNEL);
+		if (!us3_freq_table)
+			goto err_out;
+
+		memset(us3_freq_table, 0,
+		       (NR_CPUS * sizeof(struct us3_freq_percpu_info)));
+
+		driver->verify = us3_freq_verify;
+		driver->target = us3_freq_target;
+		driver->init = us3_freq_cpu_init;
+		driver->exit = us3_freq_cpu_exit;
+		driver->owner = THIS_MODULE,
+		strcpy(driver->name, "UltraSPARC-III");
+
+		cpufreq_us3_driver = driver;
+		ret = cpufreq_register_driver(driver);
+		if (ret)
+			goto err_out;
+
+		return 0;
+
+err_out:
+		if (driver) {
+			kfree(driver);
+			cpufreq_us3_driver = NULL;
+		}
+		if (us3_freq_table) {
+			kfree(us3_freq_table);
+			us3_freq_table = NULL;
+		}
+		return ret;
+	}
+
+	return -ENODEV;
+}
+
+static void __exit us3_freq_exit(void)
+{
+	if (cpufreq_us3_driver) {
+		cpufreq_unregister_driver(cpufreq_us3_driver);
+
+		kfree(cpufreq_us3_driver);
+		cpufreq_us3_driver = NULL;
+		kfree(us3_freq_table);
+		us3_freq_table = NULL;
+	}
+}
+
+MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
+MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III");
+MODULE_LICENSE("GPL");
+
+module_init(us3_freq_init);
+module_exit(us3_freq_exit);
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
new file mode 100644
index 0000000..382fd67
--- /dev/null
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -0,0 +1,106 @@
+/* ld script to make UltraLinux kernel */
+
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_FORMAT("elf64-sparc", "elf64-sparc", "elf64-sparc")
+OUTPUT_ARCH(sparc:v9a)
+ENTRY(_start)
+
+jiffies = jiffies_64;
+SECTIONS
+{
+  swapper_pmd_dir = 0x0000000000402000;
+  empty_pg_dir = 0x0000000000403000;
+  . = 0x4000;
+  .text 0x0000000000404000 :
+  {
+    *(.text)
+    SCHED_TEXT
+    LOCK_TEXT
+    *(.gnu.warning)
+  } =0
+  _etext = .;
+  PROVIDE (etext = .);
+
+  RODATA
+
+  .data    :
+  {
+    *(.data)
+    CONSTRUCTORS
+  }
+  .data1   : { *(.data1) }
+  . = ALIGN(64);
+  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+  _edata  =  .;
+  PROVIDE (edata = .);
+  .fixup   : { *(.fixup) }
+
+  . = ALIGN(16);
+  __start___ex_table = .;
+  __ex_table : { *(__ex_table) }
+  __stop___ex_table = .;
+
+  . = ALIGN(8192);
+  __init_begin = .;
+  .init.text : { 
+	_sinittext = .;
+	*(.init.text)
+	_einittext = .;
+  }
+  .init.data : { *(.init.data) }
+  . = ALIGN(16);
+  __setup_start = .;
+  .init.setup : { *(.init.setup) }
+  __setup_end = .;
+  __initcall_start = .;
+  .initcall.init : {
+	*(.initcall1.init) 
+	*(.initcall2.init) 
+	*(.initcall3.init) 
+	*(.initcall4.init) 
+	*(.initcall5.init) 
+	*(.initcall6.init) 
+	*(.initcall7.init)
+  }
+  __initcall_end = .;
+  __con_initcall_start = .;
+  .con_initcall.init : { *(.con_initcall.init) }
+  __con_initcall_end = .;
+  SECURITY_INIT
+  . = ALIGN(8192); 
+  __initramfs_start = .;
+  .init.ramfs : { *(.init.ramfs) }
+  __initramfs_end = .;
+  . = ALIGN(8192);
+  __per_cpu_start = .;
+  .data.percpu  : { *(.data.percpu) }
+  __per_cpu_end = .;
+  . = ALIGN(8192);
+  __init_end = .;
+  __bss_start = .;
+  .sbss      : { *(.sbss) *(.scommon) }
+  .bss       :
+  {
+   *(.dynbss)
+   *(.bss)
+   *(COMMON)
+  }
+  _end = . ;
+  PROVIDE (end = .);
+  /* Stabs debugging sections.  */
+  .stab 0 : { *(.stab) }
+  .stabstr 0 : { *(.stabstr) }
+  .stab.excl 0 : { *(.stab.excl) }
+  .stab.exclstr 0 : { *(.stab.exclstr) }
+  .stab.index 0 : { *(.stab.index) }
+  .stab.indexstr 0 : { *(.stab.indexstr) }
+  .comment 0 : { *(.comment) }
+  .debug          0 : { *(.debug) }
+  .debug_srcinfo  0 : { *(.debug_srcinfo) }
+  .debug_aranges  0 : { *(.debug_aranges) }
+  .debug_pubnames 0 : { *(.debug_pubnames) }
+  .debug_sfnames  0 : { *(.debug_sfnames) }
+  .line           0 : { *(.line) }
+  /DISCARD/ : { *(.exit.text) *(.exit.data) *(.exitcall.exit) }
+}
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S
new file mode 100644
index 0000000..dfbc7e0
--- /dev/null
+++ b/arch/sparc64/kernel/winfixup.S
@@ -0,0 +1,417 @@
+/* $Id: winfixup.S,v 1.30 2002/02/09 19:49:30 davem Exp $
+ *
+ * winfixup.S: Handle cases where user stack pointer is found to be bogus.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/asi.h>
+#include <asm/head.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/spitfire.h>
+#include <asm/thread_info.h>
+
+	.text
+
+set_pcontext:
+cplus_winfixup_insn_1:
+	sethi	%hi(0), %l1
+	mov	PRIMARY_CONTEXT, %g1
+	sllx	%l1, 32, %l1
+cplus_winfixup_insn_2:
+	sethi	%hi(0), %g2
+	or	%l1, %g2, %l1
+	stxa	%l1, [%g1] ASI_DMMU
+	flush	%g6
+	retl
+	 nop
+
+cplus_wfinsn_1:
+	sethi	%uhi(CTX_CHEETAH_PLUS_NUC), %l1
+cplus_wfinsn_2:
+	sethi	%hi(CTX_CHEETAH_PLUS_CTX0), %g2
+
+	.align	32
+
+	/* Here are the rules, pay attention.
+	 *
+	 * The kernel is disallowed from touching user space while
+	 * the trap level is greater than zero, except for from within
+	 * the window spill/fill handlers.  This must be followed
+	 * so that we can easily detect the case where we tried to
+	 * spill/fill with a bogus (or unmapped) user stack pointer.
+	 *
+	 * These are layed out in a special way for cache reasons,
+	 * don't touch...
+	 */
+	.globl	fill_fixup, spill_fixup
+fill_fixup:
+	rdpr		%tstate, %g1
+	andcc		%g1, TSTATE_PRIV, %g0
+	or		%g4, FAULT_CODE_WINFIXUP, %g4
+	be,pt		%xcc, window_scheisse_from_user_common
+	 and		%g1, TSTATE_CWP, %g1
+
+	/* This is the extremely complex case, but it does happen from
+	 * time to time if things are just right.  Essentially the restore
+	 * done in rtrap right before going back to user mode, with tl=1
+	 * and that levels trap stack registers all setup, took a fill trap,
+	 * the user stack was not mapped in the tlb, and tlb miss occurred,
+	 * the pte found was not valid, and a simple ref bit watch update
+	 * could not satisfy the miss, so we got here.
+	 *
+	 * We must carefully unwind the state so we get back to tl=0, preserve
+	 * all the register values we were going to give to the user.  Luckily
+	 * most things are where they need to be, we also have the address
+	 * which triggered the fault handy as well.
+	 *
+	 * Also note that we must preserve %l5 and %l6.  If the user was
+	 * returning from a system call, we must make it look this way
+	 * after we process the fill fault on the users stack.
+	 *
+	 * First, get into the window where the original restore was executed.
+	 */
+
+	rdpr		%wstate, %g2			! Grab user mode wstate.
+	wrpr		%g1, %cwp			! Get into the right window.
+	sll		%g2, 3, %g2			! NORMAL-->OTHER
+
+	wrpr		%g0, 0x0, %canrestore		! Standard etrap stuff.
+	wrpr		%g2, 0x0, %wstate		! This must be consistent.
+	wrpr		%g0, 0x0, %otherwin		! We know this.
+	call		set_pcontext			! Change contexts...
+	 nop
+	rdpr		%pstate, %l1			! Prepare to change globals.
+	mov		%g6, %o7			! Get current.
+
+	andn		%l1, PSTATE_MM, %l1		! We want to be in RMO
+	stb		%g4, [%g6 + TI_FAULT_CODE]
+	stx		%g5, [%g6 + TI_FAULT_ADDR]
+	wrpr		%g0, 0x0, %tl			! Out of trap levels.
+	wrpr		%l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
+	mov		%o7, %g6
+	ldx		[%g6 + TI_TASK], %g4
+#ifdef CONFIG_SMP
+	mov		TSB_REG, %g1
+	ldxa		[%g1] ASI_IMMU, %g5
+#endif
+
+	/* This is the same as below, except we handle this a bit special
+	 * since we must preserve %l5 and %l6, see comment above.
+	 */
+	call		do_sparc64_fault
+	 add		%sp, PTREGS_OFF, %o0
+	ba,pt		%xcc, rtrap
+	 nop						! yes, nop is correct
+
+	/* Be very careful about usage of the alternate globals here.
+	 * You cannot touch %g4/%g5 as that has the fault information
+	 * should this be from usermode.  Also be careful for the case
+	 * where we get here from the save instruction in etrap.S when
+	 * coming from either user or kernel (does not matter which, it
+	 * is the same problem in both cases).  Essentially this means
+	 * do not touch %g7 or %g2 so we handle the two cases fine.
+	 */
+spill_fixup:
+	ldx		[%g6 + TI_FLAGS], %g1
+	andcc		%g1, _TIF_32BIT, %g0
+	ldub		[%g6 + TI_WSAVED], %g1
+
+	sll		%g1, 3, %g3
+	add		%g6, %g3, %g3
+	stx		%sp, [%g3 + TI_RWIN_SPTRS]
+	sll		%g1, 7, %g3
+	bne,pt		%xcc, 1f
+	 add		%g6, %g3, %g3
+	stx		%l0, [%g3 + TI_REG_WINDOW + 0x00]
+	stx		%l1, [%g3 + TI_REG_WINDOW + 0x08]
+
+	stx		%l2, [%g3 + TI_REG_WINDOW + 0x10]
+	stx		%l3, [%g3 + TI_REG_WINDOW + 0x18]
+	stx		%l4, [%g3 + TI_REG_WINDOW + 0x20]
+	stx		%l5, [%g3 + TI_REG_WINDOW + 0x28]
+	stx		%l6, [%g3 + TI_REG_WINDOW + 0x30]
+	stx		%l7, [%g3 + TI_REG_WINDOW + 0x38]
+	stx		%i0, [%g3 + TI_REG_WINDOW + 0x40]
+	stx		%i1, [%g3 + TI_REG_WINDOW + 0x48]
+
+	stx		%i2, [%g3 + TI_REG_WINDOW + 0x50]
+	stx		%i3, [%g3 + TI_REG_WINDOW + 0x58]
+	stx		%i4, [%g3 + TI_REG_WINDOW + 0x60]
+	stx		%i5, [%g3 + TI_REG_WINDOW + 0x68]
+	stx		%i6, [%g3 + TI_REG_WINDOW + 0x70]
+	b,pt		%xcc, 2f
+	 stx		%i7, [%g3 + TI_REG_WINDOW + 0x78]
+1:	stw		%l0, [%g3 + TI_REG_WINDOW + 0x00]
+
+	stw		%l1, [%g3 + TI_REG_WINDOW + 0x04]
+	stw		%l2, [%g3 + TI_REG_WINDOW + 0x08]
+	stw		%l3, [%g3 + TI_REG_WINDOW + 0x0c]
+	stw		%l4, [%g3 + TI_REG_WINDOW + 0x10]
+	stw		%l5, [%g3 + TI_REG_WINDOW + 0x14]
+	stw		%l6, [%g3 + TI_REG_WINDOW + 0x18]
+	stw		%l7, [%g3 + TI_REG_WINDOW + 0x1c]
+	stw		%i0, [%g3 + TI_REG_WINDOW + 0x20]
+
+	stw		%i1, [%g3 + TI_REG_WINDOW + 0x24]
+	stw		%i2, [%g3 + TI_REG_WINDOW + 0x28]
+	stw		%i3, [%g3 + TI_REG_WINDOW + 0x2c]
+	stw		%i4, [%g3 + TI_REG_WINDOW + 0x30]
+	stw		%i5, [%g3 + TI_REG_WINDOW + 0x34]
+	stw		%i6, [%g3 + TI_REG_WINDOW + 0x38]
+	stw		%i7, [%g3 + TI_REG_WINDOW + 0x3c]
+2:	add		%g1, 1, %g1
+
+	stb		%g1, [%g6 + TI_WSAVED]
+	rdpr		%tstate, %g1
+	andcc		%g1, TSTATE_PRIV, %g0
+	saved
+	and		%g1, TSTATE_CWP, %g1
+	be,pn		%xcc, window_scheisse_from_user_common
+	 mov		FAULT_CODE_WRITE | FAULT_CODE_DTLB | FAULT_CODE_WINFIXUP, %g4
+	retry
+
+window_scheisse_from_user_common:
+	stb		%g4, [%g6 + TI_FAULT_CODE]
+	stx		%g5, [%g6 + TI_FAULT_ADDR]
+	wrpr		%g1, %cwp
+	ba,pt		%xcc, etrap
+	 rd		%pc, %g7
+	call		do_sparc64_fault
+	 add		%sp, PTREGS_OFF, %o0
+	ba,a,pt		%xcc, rtrap_clr_l6
+
+	.globl		winfix_mna, fill_fixup_mna, spill_fixup_mna
+winfix_mna:
+	andn		%g3, 0x7f, %g3
+	add		%g3, 0x78, %g3
+	wrpr		%g3, %tnpc
+	done
+fill_fixup_mna:
+	rdpr		%tstate, %g1
+	andcc		%g1, TSTATE_PRIV, %g0
+	be,pt		%xcc, window_mna_from_user_common
+	 and		%g1, TSTATE_CWP, %g1
+
+	/* Please, see fill_fixup commentary about why we must preserve
+	 * %l5 and %l6 to preserve absolute correct semantics.
+	 */
+	rdpr		%wstate, %g2			! Grab user mode wstate.
+	wrpr		%g1, %cwp			! Get into the right window.
+	sll		%g2, 3, %g2			! NORMAL-->OTHER
+	wrpr		%g0, 0x0, %canrestore		! Standard etrap stuff.
+
+	wrpr		%g2, 0x0, %wstate		! This must be consistent.
+	wrpr		%g0, 0x0, %otherwin		! We know this.
+	call		set_pcontext			! Change contexts...
+	 nop
+	rdpr		%pstate, %l1			! Prepare to change globals.
+	mov		%g4, %o2			! Setup args for
+	mov		%g5, %o1			! final call to mem_address_unaligned.
+	andn		%l1, PSTATE_MM, %l1		! We want to be in RMO
+
+	mov		%g6, %o7			! Stash away current.
+	wrpr		%g0, 0x0, %tl			! Out of trap levels.
+	wrpr		%l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
+	mov		%o7, %g6			! Get current back.
+	ldx		[%g6 + TI_TASK], %g4		! Finish it.
+#ifdef CONFIG_SMP
+	mov		TSB_REG, %g1
+	ldxa		[%g1] ASI_IMMU, %g5
+#endif
+	call		mem_address_unaligned
+	 add		%sp, PTREGS_OFF, %o0
+
+	b,pt		%xcc, rtrap
+	 nop						! yes, the nop is correct
+spill_fixup_mna:
+	ldx		[%g6 + TI_FLAGS], %g1
+	andcc		%g1, _TIF_32BIT, %g0
+	ldub		[%g6 + TI_WSAVED], %g1
+	sll		%g1, 3, %g3
+	add		%g6, %g3, %g3
+	stx		%sp, [%g3 + TI_RWIN_SPTRS]
+
+	sll		%g1, 7, %g3
+	bne,pt		%xcc, 1f
+	 add		%g6, %g3, %g3
+	stx		%l0, [%g3 + TI_REG_WINDOW + 0x00]
+	stx		%l1, [%g3 + TI_REG_WINDOW + 0x08]
+	stx		%l2, [%g3 + TI_REG_WINDOW + 0x10]
+	stx		%l3, [%g3 + TI_REG_WINDOW + 0x18]
+	stx		%l4, [%g3 + TI_REG_WINDOW + 0x20]
+
+	stx		%l5, [%g3 + TI_REG_WINDOW + 0x28]
+	stx		%l6, [%g3 + TI_REG_WINDOW + 0x30]
+	stx		%l7, [%g3 + TI_REG_WINDOW + 0x38]
+	stx		%i0, [%g3 + TI_REG_WINDOW + 0x40]
+	stx		%i1, [%g3 + TI_REG_WINDOW + 0x48]
+	stx		%i2, [%g3 + TI_REG_WINDOW + 0x50]
+	stx		%i3, [%g3 + TI_REG_WINDOW + 0x58]
+	stx		%i4, [%g3 + TI_REG_WINDOW + 0x60]
+
+	stx		%i5, [%g3 + TI_REG_WINDOW + 0x68]
+	stx		%i6, [%g3 + TI_REG_WINDOW + 0x70]
+	stx		%i7, [%g3 + TI_REG_WINDOW + 0x78]
+	b,pt		%xcc, 2f
+	 add		%g1, 1, %g1
+1:	std		%l0, [%g3 + TI_REG_WINDOW + 0x00]
+	std		%l2, [%g3 + TI_REG_WINDOW + 0x08]
+	std		%l4, [%g3 + TI_REG_WINDOW + 0x10]
+
+	std		%l6, [%g3 + TI_REG_WINDOW + 0x18]
+	std		%i0, [%g3 + TI_REG_WINDOW + 0x20]
+	std		%i2, [%g3 + TI_REG_WINDOW + 0x28]
+	std		%i4, [%g3 + TI_REG_WINDOW + 0x30]
+	std		%i6, [%g3 + TI_REG_WINDOW + 0x38]
+	add		%g1, 1, %g1
+2:	stb		%g1, [%g6 + TI_WSAVED]
+	rdpr		%tstate, %g1
+
+	andcc		%g1, TSTATE_PRIV, %g0
+	saved
+	be,pn		%xcc, window_mna_from_user_common
+	 and		%g1, TSTATE_CWP, %g1
+	retry
+window_mna_from_user_common:
+	wrpr		%g1, %cwp
+	sethi		%hi(109f), %g7
+	ba,pt		%xcc, etrap
+109:	 or		%g7, %lo(109b), %g7
+	mov		%l4, %o2
+	mov		%l5, %o1
+	call		mem_address_unaligned
+	 add		%sp, PTREGS_OFF, %o0
+	ba,pt		%xcc, rtrap
+	 clr		%l6
+	
+	/* These are only needed for 64-bit mode processes which
+	 * put their stack pointer into the VPTE area and there
+	 * happens to be a VPTE tlb entry mapped there during
+	 * a spill/fill trap to that stack frame.
+	 */
+	.globl		winfix_dax, fill_fixup_dax, spill_fixup_dax
+winfix_dax:
+	andn		%g3, 0x7f, %g3
+	add		%g3, 0x74, %g3
+	wrpr		%g3, %tnpc
+	done
+fill_fixup_dax:
+	rdpr		%tstate, %g1
+	andcc		%g1, TSTATE_PRIV, %g0
+	be,pt		%xcc, window_dax_from_user_common
+	 and		%g1, TSTATE_CWP, %g1
+
+	/* Please, see fill_fixup commentary about why we must preserve
+	 * %l5 and %l6 to preserve absolute correct semantics.
+	 */
+	rdpr		%wstate, %g2			! Grab user mode wstate.
+	wrpr		%g1, %cwp			! Get into the right window.
+	sll		%g2, 3, %g2			! NORMAL-->OTHER
+	wrpr		%g0, 0x0, %canrestore		! Standard etrap stuff.
+
+	wrpr		%g2, 0x0, %wstate		! This must be consistent.
+	wrpr		%g0, 0x0, %otherwin		! We know this.
+	call		set_pcontext			! Change contexts...
+	 nop
+	rdpr		%pstate, %l1			! Prepare to change globals.
+	mov		%g4, %o1			! Setup args for
+	mov		%g5, %o2			! final call to data_access_exception.
+	andn		%l1, PSTATE_MM, %l1		! We want to be in RMO
+
+	mov		%g6, %o7			! Stash away current.
+	wrpr		%g0, 0x0, %tl			! Out of trap levels.
+	wrpr		%l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
+	mov		%o7, %g6			! Get current back.
+	ldx		[%g6 + TI_TASK], %g4		! Finish it.
+#ifdef CONFIG_SMP
+	mov		TSB_REG, %g1
+	ldxa		[%g1] ASI_IMMU, %g5
+#endif
+	call		data_access_exception
+	 add		%sp, PTREGS_OFF, %o0
+
+	b,pt		%xcc, rtrap
+	 nop						! yes, the nop is correct
+spill_fixup_dax:
+	ldx		[%g6 + TI_FLAGS], %g1
+	andcc		%g1, _TIF_32BIT, %g0
+	ldub		[%g6 + TI_WSAVED], %g1
+	sll		%g1, 3, %g3
+	add		%g6, %g3, %g3
+	stx		%sp, [%g3 + TI_RWIN_SPTRS]
+
+	sll		%g1, 7, %g3
+	bne,pt		%xcc, 1f
+	 add		%g6, %g3, %g3
+	stx		%l0, [%g3 + TI_REG_WINDOW + 0x00]
+	stx		%l1, [%g3 + TI_REG_WINDOW + 0x08]
+	stx		%l2, [%g3 + TI_REG_WINDOW + 0x10]
+	stx		%l3, [%g3 + TI_REG_WINDOW + 0x18]
+	stx		%l4, [%g3 + TI_REG_WINDOW + 0x20]
+
+	stx		%l5, [%g3 + TI_REG_WINDOW + 0x28]
+	stx		%l6, [%g3 + TI_REG_WINDOW + 0x30]
+	stx		%l7, [%g3 + TI_REG_WINDOW + 0x38]
+	stx		%i0, [%g3 + TI_REG_WINDOW + 0x40]
+	stx		%i1, [%g3 + TI_REG_WINDOW + 0x48]
+	stx		%i2, [%g3 + TI_REG_WINDOW + 0x50]
+	stx		%i3, [%g3 + TI_REG_WINDOW + 0x58]
+	stx		%i4, [%g3 + TI_REG_WINDOW + 0x60]
+
+	stx		%i5, [%g3 + TI_REG_WINDOW + 0x68]
+	stx		%i6, [%g3 + TI_REG_WINDOW + 0x70]
+	stx		%i7, [%g3 + TI_REG_WINDOW + 0x78]
+	b,pt		%xcc, 2f
+	 add		%g1, 1, %g1
+1:	std		%l0, [%g3 + TI_REG_WINDOW + 0x00]
+	std		%l2, [%g3 + TI_REG_WINDOW + 0x08]
+	std		%l4, [%g3 + TI_REG_WINDOW + 0x10]
+
+	std		%l6, [%g3 + TI_REG_WINDOW + 0x18]
+	std		%i0, [%g3 + TI_REG_WINDOW + 0x20]
+	std		%i2, [%g3 + TI_REG_WINDOW + 0x28]
+	std		%i4, [%g3 + TI_REG_WINDOW + 0x30]
+	std		%i6, [%g3 + TI_REG_WINDOW + 0x38]
+	add		%g1, 1, %g1
+2:	stb		%g1, [%g6 + TI_WSAVED]
+	rdpr		%tstate, %g1
+
+	andcc		%g1, TSTATE_PRIV, %g0
+	saved
+	be,pn		%xcc, window_dax_from_user_common
+	 and		%g1, TSTATE_CWP, %g1
+	retry
+window_dax_from_user_common:
+	wrpr		%g1, %cwp
+	sethi		%hi(109f), %g7
+	ba,pt		%xcc, etrap
+109:	 or		%g7, %lo(109b), %g7
+	mov		%l4, %o1
+	mov		%l5, %o2
+	call		data_access_exception
+	 add		%sp, PTREGS_OFF, %o0
+	ba,pt		%xcc, rtrap
+	 clr		%l6
+	
+
+	.globl		cheetah_plus_patch_winfixup
+cheetah_plus_patch_winfixup:
+	sethi			%hi(cplus_wfinsn_1), %o0
+	sethi			%hi(cplus_winfixup_insn_1), %o2
+	lduw			[%o0 + %lo(cplus_wfinsn_1)], %o1
+	or			%o2, %lo(cplus_winfixup_insn_1), %o2
+	stw			%o1, [%o2]
+	flush			%o2
+
+	sethi			%hi(cplus_wfinsn_2), %o0
+	sethi			%hi(cplus_winfixup_insn_2), %o2
+	lduw			[%o0 + %lo(cplus_wfinsn_2)], %o1
+	or			%o2, %lo(cplus_winfixup_insn_2), %o2
+	stw			%o1, [%o2]
+	flush			%o2
+
+	retl
+	 nop
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
new file mode 100644
index 0000000..40dbeec
--- /dev/null
+++ b/arch/sparc64/lib/Makefile
@@ -0,0 +1,20 @@
+# $Id: Makefile,v 1.25 2000/12/14 22:57:25 davem Exp $
+# Makefile for Sparc64 library files..
+#
+
+EXTRA_AFLAGS := -ansi
+EXTRA_CFLAGS := -Werror
+
+lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
+	 memscan.o strncpy_from_user.o strlen_user.o memcmp.o checksum.o \
+	 bzero.o csum_copy.o csum_copy_from_user.o csum_copy_to_user.o \
+	 VISsave.o atomic.o bitops.o \
+	 U1memcpy.o U1copy_from_user.o U1copy_to_user.o \
+	 U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \
+	 copy_in_user.o user_fixup.o memmove.o \
+	 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
+
+lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o
+lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
+
+obj-y += iomap.o
diff --git a/arch/sparc64/lib/PeeCeeI.c b/arch/sparc64/lib/PeeCeeI.c
new file mode 100644
index 0000000..3008d53
--- /dev/null
+++ b/arch/sparc64/lib/PeeCeeI.c
@@ -0,0 +1,237 @@
+/* $Id: PeeCeeI.c,v 1.4 1999/09/06 01:17:35 davem Exp $
+ * PeeCeeI.c: The emerging standard...
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+void outsb(void __iomem *addr, const void *src, unsigned long count)
+{
+	const u8 *p = src;
+
+	while(count--)
+		outb(*p++, addr);
+}
+
+void outsw(void __iomem *addr, const void *src, unsigned long count)
+{
+	if(count) {
+		u16 *ps = (u16 *)src;
+		u32 *pi;
+
+		if(((u64)src) & 0x2) {
+			u16 val = le16_to_cpup(ps);
+			outw(val, addr);
+			ps++;
+			count--;
+		}
+		pi = (u32 *)ps;
+		while(count >= 2) {
+			u32 w = le32_to_cpup(pi);
+
+			pi++;
+			outw(w >> 0, addr);
+			outw(w >> 16, addr);
+			count -= 2;
+		}
+		ps = (u16 *)pi;
+		if(count) {
+			u16 val = le16_to_cpup(ps);
+			outw(val, addr);
+		}
+	}
+}
+
+void outsl(void __iomem *addr, const void *src, unsigned long count)
+{
+	if(count) {
+		if((((u64)src) & 0x3) == 0) {
+			u32 *p = (u32 *)src;
+			while(count--) {
+				u32 val = cpu_to_le32p(p);
+				outl(val, addr);
+				p++;
+			}
+		} else {
+			u8 *pb;
+			u16 *ps = (u16 *)src;
+			u32 l = 0, l2;
+			u32 *pi;
+
+			switch(((u64)src) & 0x3) {
+			case 0x2:
+				count -= 1;
+				l = cpu_to_le16p(ps) << 16;
+				ps++;
+				pi = (u32 *)ps;
+				while(count--) {
+					l2 = cpu_to_le32p(pi);
+					pi++;
+					outl(((l >> 16) | (l2 << 16)), addr);
+					l = l2;
+				}
+				ps = (u16 *)pi;
+				l2 = cpu_to_le16p(ps);
+				outl(((l >> 16) | (l2 << 16)), addr);
+				break;
+
+			case 0x1:
+				count -= 1;
+				pb = (u8 *)src;
+				l = (*pb++ << 8);
+				ps = (u16 *)pb;
+				l2 = cpu_to_le16p(ps);
+				ps++;
+				l |= (l2 << 16);
+				pi = (u32 *)ps;
+				while(count--) {
+					l2 = cpu_to_le32p(pi);
+					pi++;
+					outl(((l >> 8) | (l2 << 24)), addr);
+					l = l2;
+				}
+				pb = (u8 *)pi;
+				outl(((l >> 8) | (*pb << 24)), addr);
+				break;
+
+			case 0x3:
+				count -= 1;
+				pb = (u8 *)src;
+				l = (*pb++ << 24);
+				pi = (u32 *)pb;
+				while(count--) {
+					l2 = cpu_to_le32p(pi);
+					pi++;
+					outl(((l >> 24) | (l2 << 8)), addr);
+					l = l2;
+				}
+				ps = (u16 *)pi;
+				l2 = cpu_to_le16p(ps);
+				ps++;
+				pb = (u8 *)ps;
+				l2 |= (*pb << 16);
+				outl(((l >> 24) | (l2 << 8)), addr);
+				break;
+			}
+		}
+	}
+}
+
+void insb(void __iomem *addr, void *dst, unsigned long count)
+{
+	if(count) {
+		u32 *pi;
+		u8 *pb = dst;
+
+		while((((unsigned long)pb) & 0x3) && count--)
+			*pb++ = inb(addr);
+		pi = (u32 *)pb;
+		while(count >= 4) {
+			u32 w;
+
+			w  = (inb(addr) << 24);
+			w |= (inb(addr) << 16);
+			w |= (inb(addr) << 8);
+			w |= (inb(addr) << 0);
+			*pi++ = w;
+			count -= 4;
+		}
+		pb = (u8 *)pi;
+		while(count--)
+			*pb++ = inb(addr);
+	}
+}
+
+void insw(void __iomem *addr, void *dst, unsigned long count)
+{
+	if(count) {
+		u16 *ps = dst;
+		u32 *pi;
+
+		if(((unsigned long)ps) & 0x2) {
+			*ps++ = le16_to_cpu(inw(addr));
+			count--;
+		}
+		pi = (u32 *)ps;
+		while(count >= 2) {
+			u32 w;
+
+			w  = (le16_to_cpu(inw(addr)) << 16);
+			w |= (le16_to_cpu(inw(addr)) << 0);
+			*pi++ = w;
+			count -= 2;
+		}
+		ps = (u16 *)pi;
+		if(count)
+			*ps = le16_to_cpu(inw(addr));
+	}
+}
+
+void insl(void __iomem *addr, void *dst, unsigned long count)
+{
+	if(count) {
+		if((((unsigned long)dst) & 0x3) == 0) {
+			u32 *pi = dst;
+			while(count--)
+				*pi++ = le32_to_cpu(inl(addr));
+		} else {
+			u32 l = 0, l2, *pi;
+			u16 *ps;
+			u8 *pb;
+
+			switch(((unsigned long)dst) & 3) {
+			case 0x2:
+				ps = dst;
+				count -= 1;
+				l = le32_to_cpu(inl(addr));
+				*ps++ = l;
+				pi = (u32 *)ps;
+				while(count--) {
+					l2 = le32_to_cpu(inl(addr));
+					*pi++ = (l << 16) | (l2 >> 16);
+					l = l2;
+				}
+				ps = (u16 *)pi;
+				*ps = l;
+				break;
+
+			case 0x1:
+				pb = dst;
+				count -= 1;
+				l = le32_to_cpu(inl(addr));
+				*pb++ = l >> 24;
+				ps = (u16 *)pb;
+				*ps++ = ((l >> 8) & 0xffff);
+				pi = (u32 *)ps;
+				while(count--) {
+					l2 = le32_to_cpu(inl(addr));
+					*pi++ = (l << 24) | (l2 >> 8);
+					l = l2;
+				}
+				pb = (u8 *)pi;
+				*pb = l;
+				break;
+
+			case 0x3:
+				pb = (u8 *)dst;
+				count -= 1;
+				l = le32_to_cpu(inl(addr));
+				*pb++ = l >> 24;
+				pi = (u32 *)pb;
+				while(count--) {
+					l2 = le32_to_cpu(inl(addr));
+					*pi++ = (l << 8) | (l2 >> 24);
+					l = l2;
+				}
+				ps = (u16 *)pi;
+				*ps++ = ((l >> 8) & 0xffff);
+				pb = (u8 *)ps;
+				*pb = l;
+				break;
+			}
+		}
+	}
+}
+
diff --git a/arch/sparc64/lib/U1copy_from_user.S b/arch/sparc64/lib/U1copy_from_user.S
new file mode 100644
index 0000000..93146a8
--- /dev/null
+++ b/arch/sparc64/lib/U1copy_from_user.S
@@ -0,0 +1,33 @@
+/* U1copy_from_user.S: UltraSparc-I/II/IIi/IIe optimized copy from userspace.
+ *
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ */
+
+#define EX_LD(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	1, %o0;		\
+	.section __ex_table;	\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#define FUNC_NAME		___copy_from_user
+#define LOAD(type,addr,dest)	type##a [addr] %asi, dest
+#define LOAD_BLK(addr,dest)	ldda [addr] ASI_BLK_AIUS, dest
+#define EX_RETVAL(x)		0
+
+	/* Writing to %asi is _expensive_ so we hardcode it.
+	 * Reading %asi to check for KERNEL_DS is comparatively
+	 * cheap.
+	 */
+#define PREAMBLE					\
+	rd		%asi, %g1;			\
+	cmp		%g1, ASI_AIUS;			\
+	bne,pn		%icc, memcpy_user_stub;		\
+	 nop;						\
+
+#include "U1memcpy.S"
diff --git a/arch/sparc64/lib/U1copy_to_user.S b/arch/sparc64/lib/U1copy_to_user.S
new file mode 100644
index 0000000..1fccc52
--- /dev/null
+++ b/arch/sparc64/lib/U1copy_to_user.S
@@ -0,0 +1,33 @@
+/* U1copy_to_user.S: UltraSparc-I/II/IIi/IIe optimized copy to userspace.
+ *
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ */
+
+#define EX_ST(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	1, %o0;		\
+	.section __ex_table;	\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#define FUNC_NAME		___copy_to_user
+#define STORE(type,src,addr)	type##a src, [addr] ASI_AIUS
+#define STORE_BLK(src,addr)	stda src, [addr] ASI_BLK_AIUS
+#define EX_RETVAL(x)		0
+
+	/* Writing to %asi is _expensive_ so we hardcode it.
+	 * Reading %asi to check for KERNEL_DS is comparatively
+	 * cheap.
+	 */
+#define PREAMBLE					\
+	rd		%asi, %g1;			\
+	cmp		%g1, ASI_AIUS;			\
+	bne,pn		%icc, memcpy_user_stub;		\
+	 nop;						\
+
+#include "U1memcpy.S"
diff --git a/arch/sparc64/lib/U1memcpy.S b/arch/sparc64/lib/U1memcpy.S
new file mode 100644
index 0000000..da9b520
--- /dev/null
+++ b/arch/sparc64/lib/U1memcpy.S
@@ -0,0 +1,560 @@
+/* U1memcpy.S: UltraSPARC-I/II/IIi/IIe optimized memcpy.
+ *
+ * Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#ifdef __KERNEL__
+#include <asm/visasm.h>
+#include <asm/asi.h>
+#define GLOBAL_SPARE	g7
+#else
+#define GLOBAL_SPARE	g5
+#define ASI_BLK_P 0xf0
+#define FPRS_FEF  0x04
+#ifdef MEMCPY_DEBUG
+#define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
+		 clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
+#define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#else
+#define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
+#define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#endif
+#endif
+
+#ifndef EX_LD
+#define EX_LD(x)	x
+#endif
+
+#ifndef EX_ST
+#define EX_ST(x)	x
+#endif
+
+#ifndef EX_RETVAL
+#define EX_RETVAL(x)	x
+#endif
+
+#ifndef LOAD
+#define LOAD(type,addr,dest)	type [addr], dest
+#endif
+
+#ifndef LOAD_BLK
+#define LOAD_BLK(addr,dest)	ldda [addr] ASI_BLK_P, dest
+#endif
+
+#ifndef STORE
+#define STORE(type,src,addr)	type src, [addr]
+#endif
+
+#ifndef STORE_BLK
+#define STORE_BLK(src,addr)	stda src, [addr] ASI_BLK_P
+#endif
+
+#ifndef FUNC_NAME
+#define FUNC_NAME	memcpy
+#endif
+
+#ifndef PREAMBLE
+#define PREAMBLE
+#endif
+
+#ifndef XCC
+#define XCC xcc
+#endif
+
+#define FREG_FROB(f1, f2, f3, f4, f5, f6, f7, f8, f9)		\
+	faligndata		%f1, %f2, %f48;			\
+	faligndata		%f2, %f3, %f50;			\
+	faligndata		%f3, %f4, %f52;			\
+	faligndata		%f4, %f5, %f54;			\
+	faligndata		%f5, %f6, %f56;			\
+	faligndata		%f6, %f7, %f58;			\
+	faligndata		%f7, %f8, %f60;			\
+	faligndata		%f8, %f9, %f62;
+
+#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt)	\
+	EX_LD(LOAD_BLK(%src, %fdest));				\
+	EX_ST(STORE_BLK(%fsrc, %dest));				\
+	add			%src, 0x40, %src;		\
+	subcc			%len, 0x40, %len;		\
+	be,pn			%xcc, jmptgt;			\
+	 add			%dest, 0x40, %dest;		\
+
+#define LOOP_CHUNK1(src, dest, len, branch_dest)		\
+	MAIN_LOOP_CHUNK(src, dest, f0,  f48, len, branch_dest)
+#define LOOP_CHUNK2(src, dest, len, branch_dest)		\
+	MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest)
+#define LOOP_CHUNK3(src, dest, len, branch_dest)		\
+	MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest)
+
+#define STORE_SYNC(dest, fsrc)				\
+	EX_ST(STORE_BLK(%fsrc, %dest));			\
+	add			%dest, 0x40, %dest;
+
+#define STORE_JUMP(dest, fsrc, target)			\
+	EX_ST(STORE_BLK(%fsrc, %dest));			\
+	add			%dest, 0x40, %dest;	\
+	ba,pt			%xcc, target;
+
+#define FINISH_VISCHUNK(dest, f0, f1, left)	\
+	subcc			%left, 8, %left;\
+	bl,pn			%xcc, 95f;	\
+	 faligndata		%f0, %f1, %f48;	\
+	EX_ST(STORE(std, %f48, %dest));		\
+	add			%dest, 8, %dest;
+
+#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left)	\
+	subcc			%left, 8, %left;	\
+	bl,pn			%xcc, 95f;		\
+	 fsrc1			%f0, %f1;
+
+#define UNEVEN_VISCHUNK(dest, f0, f1, left)		\
+	UNEVEN_VISCHUNK_LAST(dest, f0, f1, left)	\
+	ba,a,pt			%xcc, 93f;
+
+	.register	%g2,#scratch
+	.register	%g3,#scratch
+
+	.text
+	.align		64
+
+	.globl		FUNC_NAME
+	.type		FUNC_NAME,#function
+FUNC_NAME:		/* %o0=dst, %o1=src, %o2=len */
+	srlx		%o2, 31, %g2
+	cmp		%g2, 0
+	tne		%xcc, 5
+	PREAMBLE
+	mov		%o0, %o4
+	cmp		%o2, 0
+	be,pn		%XCC, 85f
+	 or		%o0, %o1, %o3
+	cmp		%o2, 16
+	blu,a,pn	%XCC, 80f
+	 or		%o3, %o2, %o3
+
+	cmp		%o2, (5 * 64)
+	blu,pt		%XCC, 70f
+	 andcc		%o3, 0x7, %g0
+
+	/* Clobbers o5/g1/g2/g3/g7/icc/xcc.  */
+	VISEntry
+
+	/* Is 'dst' already aligned on an 64-byte boundary? */
+	andcc		%o0, 0x3f, %g2
+	be,pt		%XCC, 2f
+
+	/* Compute abs((dst & 0x3f) - 0x40) into %g2.  This is the number
+	 * of bytes to copy to make 'dst' 64-byte aligned.  We pre-
+	 * subtract this from 'len'.
+	 */
+	 sub		%o0, %o1, %GLOBAL_SPARE
+	sub		%g2, 0x40, %g2
+	sub		%g0, %g2, %g2
+	sub		%o2, %g2, %o2
+	andcc		%g2, 0x7, %g1
+	be,pt		%icc, 2f
+	 and		%g2, 0x38, %g2
+
+1:	subcc		%g1, 0x1, %g1
+	EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
+	EX_ST(STORE(stb, %o3, %o1 + %GLOBAL_SPARE))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 0x1, %o1
+
+	add		%o1, %GLOBAL_SPARE, %o0
+
+2:	cmp		%g2, 0x0
+	and		%o1, 0x7, %g1
+	be,pt		%icc, 3f
+	 alignaddr	%o1, %g0, %o1
+
+	EX_LD(LOAD(ldd, %o1, %f4))
+1:	EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
+	add		%o1, 0x8, %o1
+	subcc		%g2, 0x8, %g2
+	faligndata	%f4, %f6, %f0
+	EX_ST(STORE(std, %f0, %o0))
+	be,pn		%icc, 3f
+	 add		%o0, 0x8, %o0
+
+	EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
+	add		%o1, 0x8, %o1
+	subcc		%g2, 0x8, %g2
+	faligndata	%f6, %f4, %f0
+	EX_ST(STORE(std, %f0, %o0))
+	bne,pt		%icc, 1b
+	 add		%o0, 0x8, %o0
+
+	/* Destination is 64-byte aligned.  */
+3:	
+	membar		  #LoadStore | #StoreStore | #StoreLoad
+
+	subcc		%o2, 0x40, %GLOBAL_SPARE
+	add		%o1, %g1, %g1
+	andncc		%GLOBAL_SPARE, (0x40 - 1), %GLOBAL_SPARE
+	srl		%g1, 3, %g2
+	sub		%o2, %GLOBAL_SPARE, %g3
+	andn		%o1, (0x40 - 1), %o1
+	and		%g2, 7, %g2
+	andncc		%g3, 0x7, %g3
+	fmovd		%f0, %f2
+	sub		%g3, 0x8, %g3
+	sub		%o2, %GLOBAL_SPARE, %o2
+
+	add		%g1, %GLOBAL_SPARE, %g1
+	subcc		%o2, %g3, %o2
+
+	EX_LD(LOAD_BLK(%o1, %f0))
+	add		%o1, 0x40, %o1
+	add		%g1, %g3, %g1
+	EX_LD(LOAD_BLK(%o1, %f16))
+	add		%o1, 0x40, %o1
+	sub		%GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
+	EX_LD(LOAD_BLK(%o1, %f32))
+	add		%o1, 0x40, %o1
+
+	/* There are 8 instances of the unrolled loop,
+	 * one for each possible alignment of the
+	 * source buffer.  Each loop instance is 452
+	 * bytes.
+	 */
+	sll		%g2, 3, %o3
+	sub		%o3, %g2, %o3
+	sllx		%o3, 4, %o3
+	add		%o3, %g2, %o3
+	sllx		%o3, 2, %g2
+1:	rd		%pc, %o3
+	add		%o3, %lo(1f - 1b), %o3
+	jmpl		%o3 + %g2, %g0
+	 nop
+
+	.align		64
+1:	FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f0, %f2, %f48
+1:	FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
+	STORE_JUMP(o0, f48, 40f) membar #Sync
+2:	FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
+	STORE_JUMP(o0, f48, 48f) membar #Sync
+3:	FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
+	STORE_JUMP(o0, f48, 56f) membar #Sync
+
+1:	FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f2, %f4, %f48
+1:	FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
+	STORE_JUMP(o0, f48, 41f) membar #Sync
+2:	FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
+	STORE_JUMP(o0, f48, 49f) membar #Sync
+3:	FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
+	STORE_JUMP(o0, f48, 57f) membar #Sync
+
+1:	FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f4, %f6, %f48
+1:	FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
+	STORE_JUMP(o0, f48, 42f) membar #Sync
+2:	FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
+	STORE_JUMP(o0, f48, 50f) membar #Sync
+3:	FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
+	STORE_JUMP(o0, f48, 58f) membar #Sync
+
+1:	FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) 
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f6, %f8, %f48
+1:	FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
+	STORE_JUMP(o0, f48, 43f) membar #Sync
+2:	FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
+	STORE_JUMP(o0, f48, 51f) membar #Sync
+3:	FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
+	STORE_JUMP(o0, f48, 59f) membar #Sync
+
+1:	FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f8, %f10, %f48
+1:	FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
+	STORE_JUMP(o0, f48, 44f) membar #Sync
+2:	FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
+	STORE_JUMP(o0, f48, 52f) membar #Sync
+3:	FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
+	STORE_JUMP(o0, f48, 60f) membar #Sync
+
+1:	FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f10, %f12, %f48
+1:	FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
+	STORE_JUMP(o0, f48, 45f) membar #Sync
+2:	FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
+	STORE_JUMP(o0, f48, 53f) membar #Sync
+3:	FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
+	STORE_JUMP(o0, f48, 61f) membar #Sync
+
+1:	FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f12, %f14, %f48
+1:	FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
+	STORE_JUMP(o0, f48, 46f) membar #Sync
+2:	FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
+	STORE_JUMP(o0, f48, 54f) membar #Sync
+3:	FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
+	STORE_JUMP(o0, f48, 62f) membar #Sync
+
+1:	FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f14, %f16, %f48
+1:	FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
+	STORE_JUMP(o0, f48, 47f) membar #Sync
+2:	FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
+	STORE_JUMP(o0, f48, 55f) membar #Sync
+3:	FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
+	STORE_JUMP(o0, f48, 63f) membar #Sync
+
+40:	FINISH_VISCHUNK(o0, f0,  f2,  g3)
+41:	FINISH_VISCHUNK(o0, f2,  f4,  g3)
+42:	FINISH_VISCHUNK(o0, f4,  f6,  g3)
+43:	FINISH_VISCHUNK(o0, f6,  f8,  g3)
+44:	FINISH_VISCHUNK(o0, f8,  f10, g3)
+45:	FINISH_VISCHUNK(o0, f10, f12, g3)
+46:	FINISH_VISCHUNK(o0, f12, f14, g3)
+47:	UNEVEN_VISCHUNK(o0, f14, f0,  g3)
+48:	FINISH_VISCHUNK(o0, f16, f18, g3)
+49:	FINISH_VISCHUNK(o0, f18, f20, g3)
+50:	FINISH_VISCHUNK(o0, f20, f22, g3)
+51:	FINISH_VISCHUNK(o0, f22, f24, g3)
+52:	FINISH_VISCHUNK(o0, f24, f26, g3)
+53:	FINISH_VISCHUNK(o0, f26, f28, g3)
+54:	FINISH_VISCHUNK(o0, f28, f30, g3)
+55:	UNEVEN_VISCHUNK(o0, f30, f0,  g3)
+56:	FINISH_VISCHUNK(o0, f32, f34, g3)
+57:	FINISH_VISCHUNK(o0, f34, f36, g3)
+58:	FINISH_VISCHUNK(o0, f36, f38, g3)
+59:	FINISH_VISCHUNK(o0, f38, f40, g3)
+60:	FINISH_VISCHUNK(o0, f40, f42, g3)
+61:	FINISH_VISCHUNK(o0, f42, f44, g3)
+62:	FINISH_VISCHUNK(o0, f44, f46, g3)
+63:	UNEVEN_VISCHUNK_LAST(o0, f46, f0,  g3)
+
+93:	EX_LD(LOAD(ldd, %o1, %f2))
+	add		%o1, 8, %o1
+	subcc		%g3, 8, %g3
+	faligndata	%f0, %f2, %f8
+	EX_ST(STORE(std, %f8, %o0))
+	bl,pn		%xcc, 95f
+	 add		%o0, 8, %o0
+	EX_LD(LOAD(ldd, %o1, %f0))
+	add		%o1, 8, %o1
+	subcc		%g3, 8, %g3
+	faligndata	%f2, %f0, %f8
+	EX_ST(STORE(std, %f8, %o0))
+	bge,pt		%xcc, 93b
+	 add		%o0, 8, %o0
+
+95:	brz,pt		%o2, 2f
+	 mov		%g1, %o1
+
+1:	EX_LD(LOAD(ldub, %o1, %o3))
+	add		%o1, 1, %o1
+	subcc		%o2, 1, %o2
+	EX_ST(STORE(stb, %o3, %o0))
+	bne,pt		%xcc, 1b
+	 add		%o0, 1, %o0
+
+2:	membar		#StoreLoad | #StoreStore
+	VISExit
+	retl
+	 mov		EX_RETVAL(%o4), %o0
+
+	.align		64
+70:	/* 16 < len <= (5 * 64) */
+	bne,pn		%XCC, 75f
+	 sub		%o0, %o1, %o3
+
+72:	andn		%o2, 0xf, %GLOBAL_SPARE
+	and		%o2, 0xf, %o2
+1:	EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
+	EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
+	subcc		%GLOBAL_SPARE, 0x10, %GLOBAL_SPARE
+	EX_ST(STORE(stx, %o5, %o1 + %o3))
+	add		%o1, 0x8, %o1
+	EX_ST(STORE(stx, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 0x8, %o1
+73:	andcc		%o2, 0x8, %g0
+	be,pt		%XCC, 1f
+	 nop
+	EX_LD(LOAD(ldx, %o1, %o5))
+	sub		%o2, 0x8, %o2
+	EX_ST(STORE(stx, %o5, %o1 + %o3))
+	add		%o1, 0x8, %o1
+1:	andcc		%o2, 0x4, %g0
+	be,pt		%XCC, 1f
+	 nop
+	EX_LD(LOAD(lduw, %o1, %o5))
+	sub		%o2, 0x4, %o2
+	EX_ST(STORE(stw, %o5, %o1 + %o3))
+	add		%o1, 0x4, %o1
+1:	cmp		%o2, 0
+	be,pt		%XCC, 85f
+	 nop
+	ba,pt		%xcc, 90f
+	 nop
+
+75:	andcc		%o0, 0x7, %g1
+	sub		%g1, 0x8, %g1
+	be,pn		%icc, 2f
+	 sub		%g0, %g1, %g1
+	sub		%o2, %g1, %o2
+
+1:	EX_LD(LOAD(ldub, %o1, %o5))
+	subcc		%g1, 1, %g1
+	EX_ST(STORE(stb, %o5, %o1 + %o3))
+	bgu,pt		%icc, 1b
+	 add		%o1, 1, %o1
+
+2:	add		%o1, %o3, %o0
+	andcc		%o1, 0x7, %g1
+	bne,pt		%icc, 8f
+	 sll		%g1, 3, %g1
+
+	cmp		%o2, 16
+	bgeu,pt		%icc, 72b
+	 nop
+	ba,a,pt		%xcc, 73b
+
+8:	mov		64, %o3
+	andn		%o1, 0x7, %o1
+	EX_LD(LOAD(ldx, %o1, %g2))
+	sub		%o3, %g1, %o3
+	andn		%o2, 0x7, %GLOBAL_SPARE
+	sllx		%g2, %g1, %g2
+1:	EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
+	subcc		%GLOBAL_SPARE, 0x8, %GLOBAL_SPARE
+	add		%o1, 0x8, %o1
+	srlx		%g3, %o3, %o5
+	or		%o5, %g2, %o5
+	EX_ST(STORE(stx, %o5, %o0))
+	add		%o0, 0x8, %o0
+	bgu,pt		%icc, 1b
+	 sllx		%g3, %g1, %g2
+
+	srl		%g1, 3, %g1
+	andcc		%o2, 0x7, %o2
+	be,pn		%icc, 85f
+	 add		%o1, %g1, %o1
+	ba,pt		%xcc, 90f
+	 sub		%o0, %o1, %o3
+
+	.align		64
+80:	/* 0 < len <= 16 */
+	andcc		%o3, 0x3, %g0
+	bne,pn		%XCC, 90f
+	 sub		%o0, %o1, %o3
+
+1:	EX_LD(LOAD(lduw, %o1, %g1))
+	subcc		%o2, 4, %o2
+	EX_ST(STORE(stw, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 4, %o1
+
+85:	retl
+	 mov		EX_RETVAL(%o4), %o0
+
+	.align		32
+90:	EX_LD(LOAD(ldub, %o1, %g1))
+	subcc		%o2, 1, %o2
+	EX_ST(STORE(stb, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 90b
+	 add		%o1, 1, %o1
+	retl
+	 mov		EX_RETVAL(%o4), %o0
+
+	.size		FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc64/lib/U3copy_from_user.S b/arch/sparc64/lib/U3copy_from_user.S
new file mode 100644
index 0000000..df600b6
--- /dev/null
+++ b/arch/sparc64/lib/U3copy_from_user.S
@@ -0,0 +1,22 @@
+/* U3copy_from_user.S: UltraSparc-III optimized copy from userspace.
+ *
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ */
+
+#define EX_LD(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	1, %o0;		\
+	.section __ex_table;	\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#define FUNC_NAME		U3copy_from_user
+#define LOAD(type,addr,dest)	type##a [addr] %asi, dest
+#define EX_RETVAL(x)		0
+
+#include "U3memcpy.S"
diff --git a/arch/sparc64/lib/U3copy_to_user.S b/arch/sparc64/lib/U3copy_to_user.S
new file mode 100644
index 0000000..f337f22
--- /dev/null
+++ b/arch/sparc64/lib/U3copy_to_user.S
@@ -0,0 +1,33 @@
+/* U3copy_to_user.S: UltraSparc-III optimized copy to userspace.
+ *
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ */
+
+#define EX_ST(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	1, %o0;		\
+	.section __ex_table;	\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#define FUNC_NAME		U3copy_to_user
+#define STORE(type,src,addr)	type##a src, [addr] ASI_AIUS
+#define STORE_BLK(src,addr)	stda src, [addr] ASI_BLK_AIUS
+#define EX_RETVAL(x)		0
+
+	/* Writing to %asi is _expensive_ so we hardcode it.
+	 * Reading %asi to check for KERNEL_DS is comparatively
+	 * cheap.
+	 */
+#define PREAMBLE					\
+	rd		%asi, %g1;			\
+	cmp		%g1, ASI_AIUS;			\
+	bne,pn		%icc, memcpy_user_stub;		\
+	 nop;						\
+
+#include "U3memcpy.S"
diff --git a/arch/sparc64/lib/U3memcpy.S b/arch/sparc64/lib/U3memcpy.S
new file mode 100644
index 0000000..7cae9cc
--- /dev/null
+++ b/arch/sparc64/lib/U3memcpy.S
@@ -0,0 +1,422 @@
+/* U3memcpy.S: UltraSparc-III optimized memcpy.
+ *
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ */
+
+#ifdef __KERNEL__
+#include <asm/visasm.h>
+#include <asm/asi.h>
+#define GLOBAL_SPARE	%g7
+#else
+#define ASI_BLK_P 0xf0
+#define FPRS_FEF  0x04
+#ifdef MEMCPY_DEBUG
+#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
+		     clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#else
+#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#endif
+#define GLOBAL_SPARE	%g5
+#endif
+
+#ifndef EX_LD
+#define EX_LD(x)	x
+#endif
+
+#ifndef EX_ST
+#define EX_ST(x)	x
+#endif
+
+#ifndef EX_RETVAL
+#define EX_RETVAL(x)	x
+#endif
+
+#ifndef LOAD
+#define LOAD(type,addr,dest)	type [addr], dest
+#endif
+
+#ifndef STORE
+#define STORE(type,src,addr)	type src, [addr]
+#endif
+
+#ifndef STORE_BLK
+#define STORE_BLK(src,addr)	stda src, [addr] ASI_BLK_P
+#endif
+
+#ifndef FUNC_NAME
+#define FUNC_NAME	U3memcpy
+#endif
+
+#ifndef PREAMBLE
+#define PREAMBLE
+#endif
+
+#ifndef XCC
+#define XCC xcc
+#endif
+
+	.register	%g2,#scratch
+	.register	%g3,#scratch
+
+	/* Special/non-trivial issues of this code:
+	 *
+	 * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
+	 * 2) Only low 32 FPU registers are used so that only the
+	 *    lower half of the FPU register set is dirtied by this
+	 *    code.  This is especially important in the kernel.
+	 * 3) This code never prefetches cachelines past the end
+	 *    of the source buffer.
+	 */
+
+	.text
+	.align		64
+
+	/* The cheetah's flexible spine, oversized liver, enlarged heart,
+	 * slender muscular body, and claws make it the swiftest hunter
+	 * in Africa and the fastest animal on land.  Can reach speeds
+	 * of up to 2.4GB per second.
+	 */
+
+	.globl	FUNC_NAME
+	.type	FUNC_NAME,#function
+FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
+	srlx		%o2, 31, %g2
+	cmp		%g2, 0
+	tne		%xcc, 5
+	PREAMBLE
+	mov		%o0, %o4
+	cmp		%o2, 0
+	be,pn		%XCC, 85f
+	 or		%o0, %o1, %o3
+	cmp		%o2, 16
+	blu,a,pn	%XCC, 80f
+	 or		%o3, %o2, %o3
+
+	cmp		%o2, (3 * 64)
+	blu,pt		%XCC, 70f
+	 andcc		%o3, 0x7, %g0
+
+	/* Clobbers o5/g1/g2/g3/g7/icc/xcc.  We must preserve
+	 * o5 from here until we hit VISExitHalf.
+	 */
+	VISEntryHalf
+
+	/* Is 'dst' already aligned on an 64-byte boundary? */
+	andcc		%o0, 0x3f, %g2
+	be,pt		%XCC, 2f
+
+	/* Compute abs((dst & 0x3f) - 0x40) into %g2.  This is the number
+	 * of bytes to copy to make 'dst' 64-byte aligned.  We pre-
+	 * subtract this from 'len'.
+	 */
+	 sub		%o0, %o1, GLOBAL_SPARE
+	sub		%g2, 0x40, %g2
+	sub		%g0, %g2, %g2
+	sub		%o2, %g2, %o2
+	andcc		%g2, 0x7, %g1
+	be,pt		%icc, 2f
+	 and		%g2, 0x38, %g2
+
+1:	subcc		%g1, 0x1, %g1
+	EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
+	EX_ST(STORE(stb, %o3, %o1 + GLOBAL_SPARE))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 0x1, %o1
+
+	add		%o1, GLOBAL_SPARE, %o0
+
+2:	cmp		%g2, 0x0
+	and		%o1, 0x7, %g1
+	be,pt		%icc, 3f
+	 alignaddr	%o1, %g0, %o1
+
+	EX_LD(LOAD(ldd, %o1, %f4))
+1:	EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
+	add		%o1, 0x8, %o1
+	subcc		%g2, 0x8, %g2
+	faligndata	%f4, %f6, %f0
+	EX_ST(STORE(std, %f0, %o0))
+	be,pn		%icc, 3f
+	 add		%o0, 0x8, %o0
+
+	EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
+	add		%o1, 0x8, %o1
+	subcc		%g2, 0x8, %g2
+	faligndata	%f6, %f4, %f2
+	EX_ST(STORE(std, %f2, %o0))
+	bne,pt		%icc, 1b
+	 add		%o0, 0x8, %o0
+
+3:	LOAD(prefetch, %o1 + 0x000, #one_read)
+	LOAD(prefetch, %o1 + 0x040, #one_read)
+	andn		%o2, (0x40 - 1), GLOBAL_SPARE
+	LOAD(prefetch, %o1 + 0x080, #one_read)
+	LOAD(prefetch, %o1 + 0x0c0, #one_read)
+	LOAD(prefetch, %o1 + 0x100, #one_read)
+	EX_LD(LOAD(ldd, %o1 + 0x000, %f0))
+	LOAD(prefetch, %o1 + 0x140, #one_read)
+	EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
+	LOAD(prefetch, %o1 + 0x180, #one_read)
+	EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
+	LOAD(prefetch, %o1 + 0x1c0, #one_read)
+	faligndata	%f0, %f2, %f16
+	EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
+	faligndata	%f2, %f4, %f18
+	EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
+	faligndata	%f4, %f6, %f20
+	EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
+	faligndata	%f6, %f8, %f22
+
+	EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
+	faligndata	%f8, %f10, %f24
+	EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
+	faligndata	%f10, %f12, %f26
+	EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
+
+	subcc		GLOBAL_SPARE, 0x80, GLOBAL_SPARE
+	add		%o1, 0x40, %o1
+	bgu,pt		%XCC, 1f
+	 srl		GLOBAL_SPARE, 6, %o3
+	ba,pt		%xcc, 2f
+	 nop
+
+	.align		64
+1:
+	EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
+	faligndata	%f12, %f14, %f28
+	EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
+	faligndata	%f14, %f0, %f30
+	EX_ST(STORE_BLK(%f16, %o0))
+	EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
+	faligndata	%f0, %f2, %f16
+	add		%o0, 0x40, %o0
+
+	EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
+	faligndata	%f2, %f4, %f18
+	EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
+	faligndata	%f4, %f6, %f20
+	EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
+	subcc		%o3, 0x01, %o3
+	faligndata	%f6, %f8, %f22
+	EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
+
+	faligndata	%f8, %f10, %f24
+	EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
+	LOAD(prefetch, %o1 + 0x1c0, #one_read)
+	faligndata	%f10, %f12, %f26
+	bg,pt		%XCC, 1b
+	 add		%o1, 0x40, %o1
+
+	/* Finally we copy the last full 64-byte block. */
+2:
+	EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
+	faligndata	%f12, %f14, %f28
+	EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
+	faligndata	%f14, %f0, %f30
+	EX_ST(STORE_BLK(%f16, %o0))
+	EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
+	faligndata	%f0, %f2, %f16
+	EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
+	faligndata	%f2, %f4, %f18
+	EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
+	faligndata	%f4, %f6, %f20
+	EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
+	faligndata	%f6, %f8, %f22
+	EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
+	faligndata	%f8, %f10, %f24
+	cmp		%g1, 0
+	be,pt		%XCC, 1f
+	 add		%o0, 0x40, %o0
+	EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
+1:	faligndata	%f10, %f12, %f26
+	faligndata	%f12, %f14, %f28
+	faligndata	%f14, %f0, %f30
+	EX_ST(STORE_BLK(%f16, %o0))
+	add		%o0, 0x40, %o0
+	add		%o1, 0x40, %o1
+	membar		#Sync
+
+	/* Now we copy the (len modulo 64) bytes at the end.
+	 * Note how we borrow the %f0 loaded above.
+	 *
+	 * Also notice how this code is careful not to perform a
+	 * load past the end of the src buffer.
+	 */
+	and		%o2, 0x3f, %o2
+	andcc		%o2, 0x38, %g2
+	be,pn		%XCC, 2f
+	 subcc		%g2, 0x8, %g2
+	be,pn		%XCC, 2f
+	 cmp		%g1, 0
+
+	sub		%o2, %g2, %o2
+	be,a,pt		%XCC, 1f
+	 EX_LD(LOAD(ldd, %o1 + 0x00, %f0))
+
+1:	EX_LD(LOAD(ldd, %o1 + 0x08, %f2))
+	add		%o1, 0x8, %o1
+	subcc		%g2, 0x8, %g2
+	faligndata	%f0, %f2, %f8
+	EX_ST(STORE(std, %f8, %o0))
+	be,pn		%XCC, 2f
+	 add		%o0, 0x8, %o0
+	EX_LD(LOAD(ldd, %o1 + 0x08, %f0))
+	add		%o1, 0x8, %o1
+	subcc		%g2, 0x8, %g2
+	faligndata	%f2, %f0, %f8
+	EX_ST(STORE(std, %f8, %o0))
+	bne,pn		%XCC, 1b
+	 add		%o0, 0x8, %o0
+
+	/* If anything is left, we copy it one byte at a time.
+	 * Note that %g1 is (src & 0x3) saved above before the
+	 * alignaddr was performed.
+	 */
+2:
+	cmp		%o2, 0
+	add		%o1, %g1, %o1
+	VISExitHalf
+	be,pn		%XCC, 85f
+	 sub		%o0, %o1, %o3
+
+	andcc		%g1, 0x7, %g0
+	bne,pn		%icc, 90f
+	 andcc		%o2, 0x8, %g0
+	be,pt		%icc, 1f
+	 nop
+	EX_LD(LOAD(ldx, %o1, %o5))
+	EX_ST(STORE(stx, %o5, %o1 + %o3))
+	add		%o1, 0x8, %o1
+
+1:	andcc		%o2, 0x4, %g0
+	be,pt		%icc, 1f
+	 nop
+	EX_LD(LOAD(lduw, %o1, %o5))
+	EX_ST(STORE(stw, %o5, %o1 + %o3))
+	add		%o1, 0x4, %o1
+
+1:	andcc		%o2, 0x2, %g0
+	be,pt		%icc, 1f
+	 nop
+	EX_LD(LOAD(lduh, %o1, %o5))
+	EX_ST(STORE(sth, %o5, %o1 + %o3))
+	add		%o1, 0x2, %o1
+
+1:	andcc		%o2, 0x1, %g0
+	be,pt		%icc, 85f
+	 nop
+	EX_LD(LOAD(ldub, %o1, %o5))
+	ba,pt		%xcc, 85f
+	 EX_ST(STORE(stb, %o5, %o1 + %o3))
+
+	.align		64
+70: /* 16 < len <= 64 */
+	bne,pn		%XCC, 75f
+	 sub		%o0, %o1, %o3
+
+72:
+	andn		%o2, 0xf, GLOBAL_SPARE
+	and		%o2, 0xf, %o2
+1:	subcc		GLOBAL_SPARE, 0x10, GLOBAL_SPARE
+	EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
+	EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
+	EX_ST(STORE(stx, %o5, %o1 + %o3))
+	add		%o1, 0x8, %o1
+	EX_ST(STORE(stx, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 0x8, %o1
+73:	andcc		%o2, 0x8, %g0
+	be,pt		%XCC, 1f
+	 nop
+	sub		%o2, 0x8, %o2
+	EX_LD(LOAD(ldx, %o1, %o5))
+	EX_ST(STORE(stx, %o5, %o1 + %o3))
+	add		%o1, 0x8, %o1
+1:	andcc		%o2, 0x4, %g0
+	be,pt		%XCC, 1f
+	 nop
+	sub		%o2, 0x4, %o2
+	EX_LD(LOAD(lduw, %o1, %o5))
+	EX_ST(STORE(stw, %o5, %o1 + %o3))
+	add		%o1, 0x4, %o1
+1:	cmp		%o2, 0
+	be,pt		%XCC, 85f
+	 nop
+	ba,pt		%xcc, 90f
+	 nop
+
+75:
+	andcc		%o0, 0x7, %g1
+	sub		%g1, 0x8, %g1
+	be,pn		%icc, 2f
+	 sub		%g0, %g1, %g1
+	sub		%o2, %g1, %o2
+
+1:	subcc		%g1, 1, %g1
+	EX_LD(LOAD(ldub, %o1, %o5))
+	EX_ST(STORE(stb, %o5, %o1 + %o3))
+	bgu,pt		%icc, 1b
+	 add		%o1, 1, %o1
+
+2:	add		%o1, %o3, %o0
+	andcc		%o1, 0x7, %g1
+	bne,pt		%icc, 8f
+	 sll		%g1, 3, %g1
+
+	cmp		%o2, 16
+	bgeu,pt		%icc, 72b
+	 nop
+	ba,a,pt		%xcc, 73b
+
+8:	mov		64, %o3
+	andn		%o1, 0x7, %o1
+	EX_LD(LOAD(ldx, %o1, %g2))
+	sub		%o3, %g1, %o3
+	andn		%o2, 0x7, GLOBAL_SPARE
+	sllx		%g2, %g1, %g2
+1:	EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
+	subcc		GLOBAL_SPARE, 0x8, GLOBAL_SPARE
+	add		%o1, 0x8, %o1
+	srlx		%g3, %o3, %o5
+	or		%o5, %g2, %o5
+	EX_ST(STORE(stx, %o5, %o0))
+	add		%o0, 0x8, %o0
+	bgu,pt		%icc, 1b
+	 sllx		%g3, %g1, %g2
+
+	srl		%g1, 3, %g1
+	andcc		%o2, 0x7, %o2
+	be,pn		%icc, 85f
+	 add		%o1, %g1, %o1
+	ba,pt		%xcc, 90f
+	 sub		%o0, %o1, %o3
+
+	.align		64
+80: /* 0 < len <= 16 */
+	andcc		%o3, 0x3, %g0
+	bne,pn		%XCC, 90f
+	 sub		%o0, %o1, %o3
+
+1:
+	subcc		%o2, 4, %o2
+	EX_LD(LOAD(lduw, %o1, %g1))
+	EX_ST(STORE(stw, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 4, %o1
+
+85:	retl
+	 mov		EX_RETVAL(%o4), %o0
+
+	.align		32
+90:
+	subcc		%o2, 1, %o2
+	EX_LD(LOAD(ldub, %o1, %g1))
+	EX_ST(STORE(stb, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 90b
+	 add		%o1, 1, %o1
+	retl
+	 mov		EX_RETVAL(%o4), %o0
+
+	.size		FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc64/lib/U3patch.S b/arch/sparc64/lib/U3patch.S
new file mode 100644
index 0000000..e2b6c5e
--- /dev/null
+++ b/arch/sparc64/lib/U3patch.S
@@ -0,0 +1,32 @@
+/* U3patch.S: Patch Ultra-I routines with Ultra-III variant.
+ *
+ * Copyright (C) 2004 David S. Miller <davem@redhat.com>
+ */
+
+#define BRANCH_ALWAYS	0x10680000
+#define NOP		0x01000000
+#define ULTRA3_DO_PATCH(OLD, NEW)	\
+	sethi	%hi(NEW), %g1; \
+	or	%g1, %lo(NEW), %g1; \
+	sethi	%hi(OLD), %g2; \
+	or	%g2, %lo(OLD), %g2; \
+	sub	%g1, %g2, %g1; \
+	sethi	%hi(BRANCH_ALWAYS), %g3; \
+	srl	%g1, 2, %g1; \
+	or	%g3, %lo(BRANCH_ALWAYS), %g3; \
+	or	%g3, %g1, %g3; \
+	stw	%g3, [%g2]; \
+	sethi	%hi(NOP), %g3; \
+	or	%g3, %lo(NOP), %g3; \
+	stw	%g3, [%g2 + 0x4]; \
+	flush	%g2;
+
+	.globl	cheetah_patch_copyops
+	.type	cheetah_patch_copyops,#function
+cheetah_patch_copyops:
+	ULTRA3_DO_PATCH(memcpy, U3memcpy)
+	ULTRA3_DO_PATCH(___copy_from_user, U3copy_from_user)
+	ULTRA3_DO_PATCH(___copy_to_user, U3copy_to_user)
+	retl
+	 nop
+	.size	cheetah_patch_copyops,.-cheetah_patch_copyops
diff --git a/arch/sparc64/lib/VISsave.S b/arch/sparc64/lib/VISsave.S
new file mode 100644
index 0000000..65e328d
--- /dev/null
+++ b/arch/sparc64/lib/VISsave.S
@@ -0,0 +1,131 @@
+/* $Id: VISsave.S,v 1.6 2002/02/09 19:49:30 davem Exp $
+ * VISsave.S: Code for saving FPU register state for
+ *            VIS routines. One should not call this directly,
+ *            but use macros provided in <asm/visasm.h>.
+ *
+ * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#include <asm/asi.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/visasm.h>
+#include <asm/thread_info.h>
+
+	.text
+	.globl		VISenter, VISenterhalf
+
+	/* On entry: %o5=current FPRS value, %g7 is callers address */
+	/* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */
+
+	/* Nothing special need be done here to handle pre-emption, this
+	 * FPU save/restore mechanism is already preemption safe.
+	 */
+
+	.align		32
+VISenter:
+	ldub		[%g6 + TI_FPDEPTH], %g1
+	brnz,a,pn	%g1, 1f
+	 cmp		%g1, 1
+	stb		%g0, [%g6 + TI_FPSAVED]
+	stx		%fsr, [%g6 + TI_XFSR]
+9:	jmpl		%g7 + %g0, %g0
+	 nop
+1:	bne,pn		%icc, 2f
+
+	 srl		%g1, 1, %g1
+vis1:	ldub		[%g6 + TI_FPSAVED], %g3
+	stx		%fsr, [%g6 + TI_XFSR]
+	or		%g3, %o5, %g3
+	stb		%g3, [%g6 + TI_FPSAVED]
+	rd		%gsr, %g3
+	clr		%g1
+	ba,pt		%xcc, 3f
+
+	 stx		%g3, [%g6 + TI_GSR]
+2:	add		%g6, %g1, %g3
+	cmp		%o5, FPRS_DU
+	be,pn		%icc, 6f
+	 sll		%g1, 3, %g1
+	stb		%o5, [%g3 + TI_FPSAVED]
+	rd		%gsr, %g2
+	add		%g6, %g1, %g3
+	stx		%g2, [%g3 + TI_GSR]
+
+	add		%g6, %g1, %g2
+	stx		%fsr, [%g2 + TI_XFSR]
+	sll		%g1, 5, %g1
+3:	andcc		%o5, FPRS_DL|FPRS_DU, %g0
+	be,pn		%icc, 9b
+	 add		%g6, TI_FPREGS, %g2
+	andcc		%o5, FPRS_DL, %g0
+	membar		#StoreStore | #LoadStore
+
+	be,pn		%icc, 4f
+	 add		%g6, TI_FPREGS+0x40, %g3
+	stda		%f0, [%g2 + %g1] ASI_BLK_P
+	stda		%f16, [%g3 + %g1] ASI_BLK_P
+	andcc		%o5, FPRS_DU, %g0
+	be,pn		%icc, 5f
+4:	 add		%g1, 128, %g1
+	stda		%f32, [%g2 + %g1] ASI_BLK_P
+
+	stda		%f48, [%g3 + %g1] ASI_BLK_P
+5:	membar		#Sync
+	jmpl		%g7 + %g0, %g0
+	 nop
+
+6:	ldub		[%g3 + TI_FPSAVED], %o5
+	or		%o5, FPRS_DU, %o5
+	add		%g6, TI_FPREGS+0x80, %g2
+	stb		%o5, [%g3 + TI_FPSAVED]
+
+	sll		%g1, 5, %g1
+	add		%g6, TI_FPREGS+0xc0, %g3
+	wr		%g0, FPRS_FEF, %fprs
+	membar		#StoreStore | #LoadStore
+	stda		%f32, [%g2 + %g1] ASI_BLK_P
+	stda		%f48, [%g3 + %g1] ASI_BLK_P
+	membar		#Sync
+	jmpl		%g7 + %g0, %g0
+
+	 nop
+
+	.align		32
+VISenterhalf:
+	ldub		[%g6 + TI_FPDEPTH], %g1
+	brnz,a,pn	%g1, 1f
+	 cmp		%g1, 1
+	stb		%g0, [%g6 + TI_FPSAVED]
+	stx		%fsr, [%g6 + TI_XFSR]
+	clr		%o5
+	jmpl		%g7 + %g0, %g0
+	 wr		%g0, FPRS_FEF, %fprs
+
+1:	bne,pn		%icc, 2f
+	 srl		%g1, 1, %g1
+	ba,pt		%xcc, vis1
+	 sub		%g7, 8, %g7
+2:	addcc		%g6, %g1, %g3
+	sll		%g1, 3, %g1
+	andn		%o5, FPRS_DU, %g2
+	stb		%g2, [%g3 + TI_FPSAVED]
+
+	rd		%gsr, %g2
+	add		%g6, %g1, %g3
+	stx		%g2, [%g3 + TI_GSR]
+	add		%g6, %g1, %g2
+	stx		%fsr, [%g2 + TI_XFSR]
+	sll		%g1, 5, %g1
+3:	andcc		%o5, FPRS_DL, %g0
+	be,pn		%icc, 4f
+	 add		%g6, TI_FPREGS, %g2
+
+	membar		#StoreStore | #LoadStore
+	add		%g6, TI_FPREGS+0x40, %g3
+	stda		%f0, [%g2 + %g1] ASI_BLK_P
+	stda		%f16, [%g3 + %g1] ASI_BLK_P
+	membar		#Sync
+4:	and		%o5, FPRS_DU, %o5
+	jmpl		%g7 + %g0, %g0
+	 wr		%o5, FPRS_FEF, %fprs
diff --git a/arch/sparc64/lib/atomic.S b/arch/sparc64/lib/atomic.S
new file mode 100644
index 0000000..e528b8d
--- /dev/null
+++ b/arch/sparc64/lib/atomic.S
@@ -0,0 +1,139 @@
+/* $Id: atomic.S,v 1.4 2001/11/18 00:12:56 davem Exp $
+ * atomic.S: These things are too big to do inline.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/config.h>
+#include <asm/asi.h>
+
+	/* On SMP we need to use memory barriers to ensure
+	 * correct memory operation ordering, nop these out
+	 * for uniprocessor.
+	 */
+#ifdef CONFIG_SMP
+#define ATOMIC_PRE_BARRIER	membar #StoreLoad | #LoadLoad
+#define ATOMIC_POST_BARRIER	membar #StoreLoad | #StoreStore
+#else
+#define ATOMIC_PRE_BARRIER	nop
+#define ATOMIC_POST_BARRIER	nop
+#endif
+
+	.text
+
+	/* Two versions of the atomic routines, one that
+	 * does not return a value and does not perform
+	 * memory barriers, and a second which returns
+	 * a value and does the barriers.
+	 */
+	.globl	atomic_add
+	.type	atomic_add,#function
+atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
+1:	lduw	[%o1], %g1
+	add	%g1, %o0, %g7
+	cas	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%icc, 1b
+	 nop
+	retl
+	 nop
+	.size	atomic_add, .-atomic_add
+
+	.globl	atomic_sub
+	.type	atomic_sub,#function
+atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+1:	lduw	[%o1], %g1
+	sub	%g1, %o0, %g7
+	cas	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%icc, 1b
+	 nop
+	retl
+	 nop
+	.size	atomic_sub, .-atomic_sub
+
+	.globl	atomic_add_ret
+	.type	atomic_add_ret,#function
+atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+	ATOMIC_PRE_BARRIER
+1:	lduw	[%o1], %g1
+	add	%g1, %o0, %g7
+	cas	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%icc, 1b
+	 add	%g7, %o0, %g7
+	ATOMIC_POST_BARRIER
+	retl
+	 sra	%g7, 0, %o0
+	.size	atomic_add_ret, .-atomic_add_ret
+
+	.globl	atomic_sub_ret
+	.type	atomic_sub_ret,#function
+atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+	ATOMIC_PRE_BARRIER
+1:	lduw	[%o1], %g1
+	sub	%g1, %o0, %g7
+	cas	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%icc, 1b
+	 sub	%g7, %o0, %g7
+	ATOMIC_POST_BARRIER
+	retl
+	 sra	%g7, 0, %o0
+	.size	atomic_sub_ret, .-atomic_sub_ret
+
+	.globl	atomic64_add
+	.type	atomic64_add,#function
+atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
+1:	ldx	[%o1], %g1
+	add	%g1, %o0, %g7
+	casx	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%xcc, 1b
+	 nop
+	retl
+	 nop
+	.size	atomic64_add, .-atomic64_add
+
+	.globl	atomic64_sub
+	.type	atomic64_sub,#function
+atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+1:	ldx	[%o1], %g1
+	sub	%g1, %o0, %g7
+	casx	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%xcc, 1b
+	 nop
+	retl
+	 nop
+	.size	atomic64_sub, .-atomic64_sub
+
+	.globl	atomic64_add_ret
+	.type	atomic64_add_ret,#function
+atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+	ATOMIC_PRE_BARRIER
+1:	ldx	[%o1], %g1
+	add	%g1, %o0, %g7
+	casx	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%xcc, 1b
+	 add	%g7, %o0, %g7
+	ATOMIC_POST_BARRIER
+	retl
+	 mov	%g7, %o0
+	.size	atomic64_add_ret, .-atomic64_add_ret
+
+	.globl	atomic64_sub_ret
+	.type	atomic64_sub_ret,#function
+atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+	ATOMIC_PRE_BARRIER
+1:	ldx	[%o1], %g1
+	sub	%g1, %o0, %g7
+	casx	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%xcc, 1b
+	 sub	%g7, %o0, %g7
+	ATOMIC_POST_BARRIER
+	retl
+	 mov	%g7, %o0
+	.size	atomic64_sub_ret, .-atomic64_sub_ret
diff --git a/arch/sparc64/lib/bitops.S b/arch/sparc64/lib/bitops.S
new file mode 100644
index 0000000..886dcd2
--- /dev/null
+++ b/arch/sparc64/lib/bitops.S
@@ -0,0 +1,145 @@
+/* $Id: bitops.S,v 1.3 2001/11/18 00:12:56 davem Exp $
+ * bitops.S: Sparc64 atomic bit operations.
+ *
+ * Copyright (C) 2000 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/config.h>
+#include <asm/asi.h>
+
+	/* On SMP we need to use memory barriers to ensure
+	 * correct memory operation ordering, nop these out
+	 * for uniprocessor.
+	 */
+#ifdef CONFIG_SMP
+#define BITOP_PRE_BARRIER	membar #StoreLoad | #LoadLoad
+#define BITOP_POST_BARRIER	membar #StoreLoad | #StoreStore
+#else
+#define BITOP_PRE_BARRIER	nop
+#define BITOP_POST_BARRIER	nop
+#endif
+
+	.text
+
+	.globl	test_and_set_bit
+	.type	test_and_set_bit,#function
+test_and_set_bit:	/* %o0=nr, %o1=addr */
+	BITOP_PRE_BARRIER
+	srlx	%o0, 6, %g1
+	mov	1, %o2
+	sllx	%g1, 3, %g3
+	and	%o0, 63, %g2
+	sllx	%o2, %g2, %o2
+	add	%o1, %g3, %o1
+1:	ldx	[%o1], %g7
+	or	%g7, %o2, %g1
+	casx	[%o1], %g7, %g1
+	cmp	%g7, %g1
+	bne,pn	%xcc, 1b
+	 and	%g7, %o2, %g2
+	BITOP_POST_BARRIER
+	clr	%o0
+	retl
+	 movrne	%g2, 1, %o0
+	.size	test_and_set_bit, .-test_and_set_bit
+
+	.globl	test_and_clear_bit
+	.type	test_and_clear_bit,#function
+test_and_clear_bit:	/* %o0=nr, %o1=addr */
+	BITOP_PRE_BARRIER
+	srlx	%o0, 6, %g1
+	mov	1, %o2
+	sllx	%g1, 3, %g3
+	and	%o0, 63, %g2
+	sllx	%o2, %g2, %o2
+	add	%o1, %g3, %o1
+1:	ldx	[%o1], %g7
+	andn	%g7, %o2, %g1
+	casx	[%o1], %g7, %g1
+	cmp	%g7, %g1
+	bne,pn	%xcc, 1b
+	 and	%g7, %o2, %g2
+	BITOP_POST_BARRIER
+	clr	%o0
+	retl
+	 movrne	%g2, 1, %o0
+	.size	test_and_clear_bit, .-test_and_clear_bit
+
+	.globl	test_and_change_bit
+	.type	test_and_change_bit,#function
+test_and_change_bit:	/* %o0=nr, %o1=addr */
+	BITOP_PRE_BARRIER
+	srlx	%o0, 6, %g1
+	mov	1, %o2
+	sllx	%g1, 3, %g3
+	and	%o0, 63, %g2
+	sllx	%o2, %g2, %o2
+	add	%o1, %g3, %o1
+1:	ldx	[%o1], %g7
+	xor	%g7, %o2, %g1
+	casx	[%o1], %g7, %g1
+	cmp	%g7, %g1
+	bne,pn	%xcc, 1b
+	 and	%g7, %o2, %g2
+	BITOP_POST_BARRIER
+	clr	%o0
+	retl
+	 movrne	%g2, 1, %o0
+	.size	test_and_change_bit, .-test_and_change_bit
+
+	.globl	set_bit
+	.type	set_bit,#function
+set_bit:		/* %o0=nr, %o1=addr */
+	srlx	%o0, 6, %g1
+	mov	1, %o2
+	sllx	%g1, 3, %g3
+	and	%o0, 63, %g2
+	sllx	%o2, %g2, %o2
+	add	%o1, %g3, %o1
+1:	ldx	[%o1], %g7
+	or	%g7, %o2, %g1
+	casx	[%o1], %g7, %g1
+	cmp	%g7, %g1
+	bne,pn	%xcc, 1b
+	 nop
+	retl
+	 nop
+	.size	set_bit, .-set_bit
+
+	.globl	clear_bit
+	.type	clear_bit,#function
+clear_bit:		/* %o0=nr, %o1=addr */
+	srlx	%o0, 6, %g1
+	mov	1, %o2
+	sllx	%g1, 3, %g3
+	and	%o0, 63, %g2
+	sllx	%o2, %g2, %o2
+	add	%o1, %g3, %o1
+1:	ldx	[%o1], %g7
+	andn	%g7, %o2, %g1
+	casx	[%o1], %g7, %g1
+	cmp	%g7, %g1
+	bne,pn	%xcc, 1b
+	 nop
+	retl
+	 nop
+	.size	clear_bit, .-clear_bit
+
+	.globl	change_bit
+	.type	change_bit,#function
+change_bit:		/* %o0=nr, %o1=addr */
+	srlx	%o0, 6, %g1
+	mov	1, %o2
+	sllx	%g1, 3, %g3
+	and	%o0, 63, %g2
+	sllx	%o2, %g2, %o2
+	add	%o1, %g3, %o1
+1:	ldx	[%o1], %g7
+	xor	%g7, %o2, %g1
+	casx	[%o1], %g7, %g1
+	cmp	%g7, %g1
+	bne,pn	%xcc, 1b
+	 nop
+	retl
+	 nop
+	.size	change_bit, .-change_bit
diff --git a/arch/sparc64/lib/bzero.S b/arch/sparc64/lib/bzero.S
new file mode 100644
index 0000000..21a933f
--- /dev/null
+++ b/arch/sparc64/lib/bzero.S
@@ -0,0 +1,158 @@
+/* bzero.S: Simple prefetching memset, bzero, and clear_user
+ *          implementations.
+ *
+ * Copyright (C) 2005 David S. Miller <davem@davemloft.net>
+ */
+
+	.text
+
+	.globl	__memset
+	.type	__memset, #function
+__memset:		/* %o0=buf, %o1=pat, %o2=len */
+
+	.globl	memset
+	.type	memset, #function
+memset:			/* %o0=buf, %o1=pat, %o2=len */
+	and		%o1, 0xff, %o3
+	mov		%o2, %o1
+	sllx		%o3, 8, %g1
+	or		%g1, %o3, %o2
+	sllx		%o2, 16, %g1
+	or		%g1, %o2, %o2
+	sllx		%o2, 32, %g1
+	ba,pt		%xcc, 1f
+	 or		%g1, %o2, %o2
+
+	.globl	__bzero
+	.type	__bzero, #function
+__bzero:		/* %o0=buf, %o1=len */
+	clr		%o2
+1:	mov		%o0, %o3
+	brz,pn		%o1, __bzero_done
+	 cmp		%o1, 16
+	bl,pn		%icc, __bzero_tiny
+	 prefetch	[%o0 + 0x000], #n_writes
+	andcc		%o0, 0x3, %g0
+	be,pt		%icc, 2f
+1:	 stb		%o2, [%o0 + 0x00]
+	add		%o0, 1, %o0
+	andcc		%o0, 0x3, %g0
+	bne,pn		%icc, 1b
+	 sub		%o1, 1, %o1
+2:	andcc		%o0, 0x7, %g0
+	be,pt		%icc, 3f
+	 stw		%o2, [%o0 + 0x00]
+	sub		%o1, 4, %o1
+	add		%o0, 4, %o0
+3:	and		%o1, 0x38, %g1
+	cmp		%o1, 0x40
+	andn		%o1, 0x3f, %o4
+	bl,pn		%icc, 5f
+	 and		%o1, 0x7, %o1
+	prefetch	[%o0 + 0x040], #n_writes
+	prefetch	[%o0 + 0x080], #n_writes
+	prefetch	[%o0 + 0x0c0], #n_writes
+	prefetch	[%o0 + 0x100], #n_writes
+	prefetch	[%o0 + 0x140], #n_writes
+4:	prefetch	[%o0 + 0x180], #n_writes
+	stx		%o2, [%o0 + 0x00]
+	stx		%o2, [%o0 + 0x08]
+	stx		%o2, [%o0 + 0x10]
+	stx		%o2, [%o0 + 0x18]
+	stx		%o2, [%o0 + 0x20]
+	stx		%o2, [%o0 + 0x28]
+	stx		%o2, [%o0 + 0x30]
+	stx		%o2, [%o0 + 0x38]
+	subcc		%o4, 0x40, %o4
+	bne,pt		%icc, 4b
+	 add		%o0, 0x40, %o0
+	brz,pn		%g1, 6f
+	 nop
+5:	stx		%o2, [%o0 + 0x00]
+	subcc		%g1, 8, %g1
+	bne,pt		%icc, 5b
+	 add		%o0, 0x8, %o0
+6:	brz,pt		%o1, __bzero_done
+	 nop
+__bzero_tiny:
+1:	stb		%o2, [%o0 + 0x00]
+	subcc		%o1, 1, %o1
+	bne,pt		%icc, 1b
+	 add		%o0, 1, %o0
+__bzero_done:
+	retl
+	 mov		%o3, %o0
+	.size		__bzero, .-__bzero
+	.size		__memset, .-__memset
+	.size		memset, .-memset
+
+#define EX_ST(x,y)		\
+98:	x,y;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	%o1, %o0;	\
+	.section __ex_table;	\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+	.globl	__bzero_noasi
+	.type	__bzero_noasi, #function
+__bzero_noasi:		/* %o0=buf, %o1=len */
+	brz,pn		%o1, __bzero_noasi_done
+	 cmp		%o1, 16
+	bl,pn		%icc, __bzero_noasi_tiny
+	 EX_ST(prefetcha [%o0 + 0x00] %asi, #n_writes)
+	andcc		%o0, 0x3, %g0
+	be,pt		%icc, 2f
+1:	 EX_ST(stba	%g0, [%o0 + 0x00] %asi)
+	add		%o0, 1, %o0
+	andcc		%o0, 0x3, %g0
+	bne,pn		%icc, 1b
+	 sub		%o1, 1, %o1
+2:	andcc		%o0, 0x7, %g0
+	be,pt		%icc, 3f
+	 EX_ST(stwa	%g0, [%o0 + 0x00] %asi)
+	sub		%o1, 4, %o1
+	add		%o0, 4, %o0
+3:	and		%o1, 0x38, %g1
+	cmp		%o1, 0x40
+	andn		%o1, 0x3f, %o4
+	bl,pn		%icc, 5f
+	 and		%o1, 0x7, %o1
+	EX_ST(prefetcha	[%o0 + 0x040] %asi, #n_writes)
+	EX_ST(prefetcha	[%o0 + 0x080] %asi, #n_writes)
+	EX_ST(prefetcha	[%o0 + 0x0c0] %asi, #n_writes)
+	EX_ST(prefetcha	[%o0 + 0x100] %asi, #n_writes)
+	EX_ST(prefetcha	[%o0 + 0x140] %asi, #n_writes)
+4:	EX_ST(prefetcha	[%o0 + 0x180] %asi, #n_writes)
+	EX_ST(stxa	%g0, [%o0 + 0x00] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x08] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x10] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x18] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x20] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x28] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x30] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x38] %asi)
+	subcc		%o4, 0x40, %o4
+	bne,pt		%icc, 4b
+	 add		%o0, 0x40, %o0
+	brz,pn		%g1, 6f
+	 nop
+5:	EX_ST(stxa	%g0, [%o0 + 0x00] %asi)
+	subcc		%g1, 8, %g1
+	bne,pt		%icc, 5b
+	 add		%o0, 0x8, %o0
+6:	brz,pt		%o1, __bzero_noasi_done
+	 nop
+__bzero_noasi_tiny:
+1:	EX_ST(stba	%g0, [%o0 + 0x00] %asi)
+	subcc		%o1, 1, %o1
+	bne,pt		%icc, 1b
+	 add		%o0, 1, %o0
+__bzero_noasi_done:
+	retl
+	 clr		%o0
+	.size		__bzero_noasi, .-__bzero_noasi
diff --git a/arch/sparc64/lib/checksum.S b/arch/sparc64/lib/checksum.S
new file mode 100644
index 0000000..ba9cd3c
--- /dev/null
+++ b/arch/sparc64/lib/checksum.S
@@ -0,0 +1,172 @@
+/* checksum.S: Sparc V9 optimized checksum code.
+ *
+ *  Copyright(C) 1995 Linus Torvalds
+ *  Copyright(C) 1995 Miguel de Icaza
+ *  Copyright(C) 1996, 2000 David S. Miller
+ *  Copyright(C) 1997 Jakub Jelinek
+ *
+ * derived from:
+ *	Linux/Alpha checksum c-code
+ *      Linux/ix86 inline checksum assembly
+ *      RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code)
+ *	David Mosberger-Tang for optimized reference c-code
+ *	BSD4.4 portable checksum routine
+ */
+
+	.text
+
+csum_partial_fix_alignment:
+	/* We checked for zero length already, so there must be
+	 * at least one byte.
+	 */
+	be,pt		%icc, 1f
+	 nop
+	ldub		[%o0 + 0x00], %o4
+	add		%o0, 1, %o0
+	sub		%o1, 1, %o1
+1:	andcc		%o0, 0x2, %g0
+	be,pn		%icc, csum_partial_post_align
+	 cmp		%o1, 2
+	blu,pn		%icc, csum_partial_end_cruft
+	 nop
+	lduh		[%o0 + 0x00], %o5
+	add		%o0, 2, %o0
+	sub		%o1, 2, %o1
+	ba,pt		%xcc, csum_partial_post_align
+	 add		%o5, %o4, %o4
+
+	.align		32
+	.globl		csum_partial
+csum_partial:		/* %o0=buff, %o1=len, %o2=sum */
+	prefetch	[%o0 + 0x000], #n_reads
+	clr		%o4
+	prefetch	[%o0 + 0x040], #n_reads
+	brz,pn		%o1, csum_partial_finish
+	 andcc		%o0, 0x3, %g0
+
+	/* We "remember" whether the lowest bit in the address
+	 * was set in %g7.  Because if it is, we have to swap
+	 * upper and lower 8 bit fields of the sum we calculate.
+	*/
+	bne,pn		%icc, csum_partial_fix_alignment
+	 andcc		%o0, 0x1, %g7
+
+csum_partial_post_align:
+	prefetch	[%o0 + 0x080], #n_reads
+	andncc		%o1, 0x3f, %o3
+
+	prefetch	[%o0 + 0x0c0], #n_reads
+	sub		%o1, %o3, %o1
+	brz,pn		%o3, 2f
+	 prefetch	[%o0 + 0x100], #n_reads
+
+	/* So that we don't need to use the non-pairing
+	 * add-with-carry instructions we accumulate 32-bit
+	 * values into a 64-bit register.  At the end of the
+	 * loop we fold it down to 32-bits and so on.
+	 */
+	prefetch	[%o0 + 0x140], #n_reads
+1:	lduw		[%o0 + 0x00], %o5
+	lduw		[%o0 + 0x04], %g1
+	lduw		[%o0 + 0x08], %g2
+	add		%o4, %o5, %o4
+	lduw		[%o0 + 0x0c], %g3
+	add		%o4, %g1, %o4
+	lduw		[%o0 + 0x10], %o5
+	add		%o4, %g2, %o4
+	lduw		[%o0 + 0x14], %g1
+	add		%o4, %g3, %o4
+	lduw		[%o0 + 0x18], %g2
+	add		%o4, %o5, %o4
+	lduw		[%o0 + 0x1c], %g3
+	add		%o4, %g1, %o4
+	lduw		[%o0 + 0x20], %o5
+	add		%o4, %g2, %o4
+	lduw		[%o0 + 0x24], %g1
+	add		%o4, %g3, %o4
+	lduw		[%o0 + 0x28], %g2
+	add		%o4, %o5, %o4
+	lduw		[%o0 + 0x2c], %g3
+	add		%o4, %g1, %o4
+	lduw		[%o0 + 0x30], %o5
+	add		%o4, %g2, %o4
+	lduw		[%o0 + 0x34], %g1
+	add		%o4, %g3, %o4
+	lduw		[%o0 + 0x38], %g2
+	add		%o4, %o5, %o4
+	lduw		[%o0 + 0x3c], %g3
+	add		%o4, %g1, %o4
+	prefetch	[%o0 + 0x180], #n_reads
+	add		%o4, %g2, %o4
+	subcc		%o3, 0x40, %o3
+	add		%o0, 0x40, %o0
+	bne,pt		%icc, 1b
+	 add		%o4, %g3, %o4
+
+2:	and		%o1, 0x3c, %o3
+	brz,pn		%o3, 2f
+	 sub		%o1, %o3, %o1
+1:	lduw		[%o0 + 0x00], %o5
+	subcc		%o3, 0x4, %o3
+	add		%o0, 0x4, %o0
+	bne,pt		%icc, 1b
+	 add		%o4, %o5, %o4
+
+2:
+	/* fold 64-->32 */
+	srlx		%o4, 32, %o5
+	srl		%o4, 0, %o4
+	add		%o4, %o5, %o4
+	srlx		%o4, 32, %o5
+	srl		%o4, 0, %o4
+	add		%o4, %o5, %o4
+
+	/* fold 32-->16 */
+	sethi		%hi(0xffff0000), %g1
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+
+csum_partial_end_cruft:
+	/* %o4 has the 16-bit sum we have calculated so-far.  */
+	cmp		%o1, 2
+	blu,pt		%icc, 1f
+	 nop
+	lduh		[%o0 + 0x00], %o5
+	sub		%o1, 2, %o1
+	add		%o0, 2, %o0
+	add		%o4, %o5, %o4
+1:	brz,pt		%o1, 1f
+	 nop
+	ldub		[%o0 + 0x00], %o5
+	sub		%o1, 1, %o1
+	add		%o0, 1, %o0
+	sllx		%o5, 8, %o5
+	add		%o4, %o5, %o4
+1:
+	/* fold 32-->16 */
+	sethi		%hi(0xffff0000), %g1
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+
+1:	brz,pt		%g7, 1f
+	 nop
+
+	/* We started with an odd byte, byte-swap the result.  */
+	srl		%o4, 8, %o5
+	and		%o4, 0xff, %g1
+	sll		%g1, 8, %g1
+	or		%o5, %g1, %o4
+
+1:	add		%o2, %o4, %o2
+
+csum_partial_finish:
+	retl
+	 mov		%o2, %o0
diff --git a/arch/sparc64/lib/clear_page.S b/arch/sparc64/lib/clear_page.S
new file mode 100644
index 0000000..b59884e
--- /dev/null
+++ b/arch/sparc64/lib/clear_page.S
@@ -0,0 +1,105 @@
+/* clear_page.S: UltraSparc optimized clear page.
+ *
+ * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
+ */
+
+#include <asm/visasm.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/spitfire.h>
+
+	/* What we used to do was lock a TLB entry into a specific
+	 * TLB slot, clear the page with interrupts disabled, then
+	 * restore the original TLB entry.  This was great for
+	 * disturbing the TLB as little as possible, but it meant
+	 * we had to keep interrupts disabled for a long time.
+	 *
+	 * Now, we simply use the normal TLB loading mechanism,
+	 * and this makes the cpu choose a slot all by itself.
+	 * Then we do a normal TLB flush on exit.  We need only
+	 * disable preemption during the clear.
+	 */
+
+#define TTE_BITS_TOP	(_PAGE_VALID | _PAGE_SZBITS)
+#define TTE_BITS_BOTTOM	(_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
+
+	.text
+
+	.globl		_clear_page
+_clear_page:		/* %o0=dest */
+	ba,pt		%xcc, clear_page_common
+	 clr		%o4
+
+	/* This thing is pretty important, it shows up
+	 * on the profiles via do_anonymous_page().
+	 */
+	.align		32
+	.globl		clear_user_page
+clear_user_page:	/* %o0=dest, %o1=vaddr */
+	lduw		[%g6 + TI_PRE_COUNT], %o2
+	sethi		%uhi(PAGE_OFFSET), %g2
+	sethi		%hi(PAGE_SIZE), %o4
+
+	sllx		%g2, 32, %g2
+	sethi		%uhi(TTE_BITS_TOP), %g3
+
+	sllx		%g3, 32, %g3
+	sub		%o0, %g2, %g1		! paddr
+
+	or		%g3, TTE_BITS_BOTTOM, %g3
+	and		%o1, %o4, %o0		! vaddr D-cache alias bit
+
+	or		%g1, %g3, %g1		! TTE data
+	sethi		%hi(TLBTEMP_BASE), %o3
+
+	add		%o2, 1, %o4
+	add		%o0, %o3, %o0		! TTE vaddr
+
+	/* Disable preemption.  */
+	mov		TLB_TAG_ACCESS, %g3
+	stw		%o4, [%g6 + TI_PRE_COUNT]
+
+	/* Load TLB entry.  */
+	rdpr		%pstate, %o4
+	wrpr		%o4, PSTATE_IE, %pstate
+	stxa		%o0, [%g3] ASI_DMMU
+	stxa		%g1, [%g0] ASI_DTLB_DATA_IN
+	flush		%g6
+	wrpr		%o4, 0x0, %pstate
+
+	mov		1, %o4
+
+clear_page_common:
+	VISEntryHalf
+	membar		#StoreLoad | #StoreStore | #LoadStore
+	fzero		%f0
+	sethi		%hi(PAGE_SIZE/64), %o1
+	mov		%o0, %g1		! remember vaddr for tlbflush
+	fzero		%f2
+	or		%o1, %lo(PAGE_SIZE/64), %o1
+	faddd		%f0, %f2, %f4
+	fmuld		%f0, %f2, %f6
+	faddd		%f0, %f2, %f8
+	fmuld		%f0, %f2, %f10
+
+	faddd		%f0, %f2, %f12
+	fmuld		%f0, %f2, %f14
+1:	stda		%f0, [%o0 + %g0] ASI_BLK_P
+	subcc		%o1, 1, %o1
+	bne,pt		%icc, 1b
+	 add		%o0, 0x40, %o0
+	membar		#Sync
+	VISExitHalf
+
+	brz,pn		%o4, out
+	 nop
+
+	stxa		%g0, [%g1] ASI_DMMU_DEMAP
+	membar		#Sync
+	stw		%o2, [%g6 + TI_PRE_COUNT]
+
+out:	retl
+	 nop
+
diff --git a/arch/sparc64/lib/copy_in_user.S b/arch/sparc64/lib/copy_in_user.S
new file mode 100644
index 0000000..816076c
--- /dev/null
+++ b/arch/sparc64/lib/copy_in_user.S
@@ -0,0 +1,119 @@
+/* copy_in_user.S: Copy from userspace to userspace.
+ *
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ */
+
+#include <asm/asi.h>
+
+#define XCC xcc
+
+#define EX(x,y)			\
+98:	x,y;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov 1, %o0;		\
+	.section __ex_table;	\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+	.register	%g2,#scratch
+	.register	%g3,#scratch
+
+	.text
+	.align	32
+
+	/* Don't try to get too fancy here, just nice and
+	 * simple.  This is predominantly used for well aligned
+	 * small copies in the compat layer.  It is also used
+	 * to copy register windows around during thread cloning.
+	 */
+
+	.globl		___copy_in_user
+	.type		___copy_in_user,#function
+___copy_in_user:	/* %o0=dst, %o1=src, %o2=len */
+	/* Writing to %asi is _expensive_ so we hardcode it.
+	 * Reading %asi to check for KERNEL_DS is comparatively
+	 * cheap.
+	 */
+	rd		%asi, %g1
+	cmp		%g1, ASI_AIUS
+	bne,pn		%icc, memcpy_user_stub
+	 nop
+
+	cmp		%o2, 0
+	be,pn		%XCC, 85f
+	 or		%o0, %o1, %o3
+	cmp		%o2, 16
+	bleu,a,pn	%XCC, 80f
+	 or		%o3, %o2, %o3
+
+	/* 16 < len <= 64 */
+	andcc		%o3, 0x7, %g0
+	bne,pn		%XCC, 90f
+	 sub		%o0, %o1, %o3
+
+	andn		%o2, 0x7, %o4
+	and		%o2, 0x7, %o2
+1:	subcc		%o4, 0x8, %o4
+	EX(ldxa [%o1] %asi, %o5)
+	EX(stxa %o5, [%o1 + %o3] ASI_AIUS)
+	bgu,pt		%XCC, 1b
+	 add		%o1, 0x8, %o1
+	andcc		%o2, 0x4, %g0
+	be,pt		%XCC, 1f
+	 nop
+	sub		%o2, 0x4, %o2
+	EX(lduwa [%o1] %asi, %o5)
+	EX(stwa %o5, [%o1 + %o3] ASI_AIUS)
+	add		%o1, 0x4, %o1
+1:	cmp		%o2, 0
+	be,pt		%XCC, 85f
+	 nop
+	ba,pt		%xcc, 90f
+	 nop
+
+80:	/* 0 < len <= 16 */
+	andcc		%o3, 0x3, %g0
+	bne,pn		%XCC, 90f
+	 sub		%o0, %o1, %o3
+
+82:
+	subcc		%o2, 4, %o2
+	EX(lduwa [%o1] %asi, %g1)
+	EX(stwa %g1, [%o1 + %o3] ASI_AIUS)
+	bgu,pt		%XCC, 82b
+	 add		%o1, 4, %o1
+
+85:	retl
+	 clr		%o0
+
+	.align	32
+90:
+	subcc		%o2, 1, %o2
+	EX(lduba [%o1] %asi, %g1)
+	EX(stba %g1, [%o1 + %o3] ASI_AIUS)
+	bgu,pt		%XCC, 90b
+	 add		%o1, 1, %o1
+	retl
+	 clr		%o0
+
+	.size		___copy_in_user, .-___copy_in_user
+
+	/* Act like copy_{to,in}_user(), ie. return zero instead
+	 * of original destination pointer.  This is invoked when
+	 * copy_{to,in}_user() finds that %asi is kernel space.
+	 */
+	.globl		memcpy_user_stub
+	.type		memcpy_user_stub,#function
+memcpy_user_stub:
+	save		%sp, -192, %sp
+	mov		%i0, %o0
+	mov		%i1, %o1
+	call		memcpy
+	 mov		%i2, %o2
+	ret
+	 restore	%g0, %g0, %o0
+	.size		memcpy_user_stub, .-memcpy_user_stub
diff --git a/arch/sparc64/lib/copy_page.S b/arch/sparc64/lib/copy_page.S
new file mode 100644
index 0000000..23ebf2c
--- /dev/null
+++ b/arch/sparc64/lib/copy_page.S
@@ -0,0 +1,242 @@
+/* clear_page.S: UltraSparc optimized copy page.
+ *
+ * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
+ */
+
+#include <asm/visasm.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/spitfire.h>
+#include <asm/head.h>
+
+	/* What we used to do was lock a TLB entry into a specific
+	 * TLB slot, clear the page with interrupts disabled, then
+	 * restore the original TLB entry.  This was great for
+	 * disturbing the TLB as little as possible, but it meant
+	 * we had to keep interrupts disabled for a long time.
+	 *
+	 * Now, we simply use the normal TLB loading mechanism,
+	 * and this makes the cpu choose a slot all by itself.
+	 * Then we do a normal TLB flush on exit.  We need only
+	 * disable preemption during the clear.
+	 */
+
+#define TTE_BITS_TOP	(_PAGE_VALID | _PAGE_SZBITS)
+#define TTE_BITS_BOTTOM	(_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
+#define	DCACHE_SIZE	(PAGE_SIZE * 2)
+
+#if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19)
+#define PAGE_SIZE_REM	0x80
+#elif (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22)
+#define PAGE_SIZE_REM	0x100
+#else
+#error Wrong PAGE_SHIFT specified
+#endif
+
+#define TOUCH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7)	\
+	fmovd	%reg0, %f48; 	fmovd	%reg1, %f50;		\
+	fmovd	%reg2, %f52; 	fmovd	%reg3, %f54;		\
+	fmovd	%reg4, %f56; 	fmovd	%reg5, %f58;		\
+	fmovd	%reg6, %f60; 	fmovd	%reg7, %f62;
+
+	.text
+
+	.align		32
+	.globl		copy_user_page
+	.type		copy_user_page,#function
+copy_user_page:		/* %o0=dest, %o1=src, %o2=vaddr */
+	lduw		[%g6 + TI_PRE_COUNT], %o4
+	sethi		%uhi(PAGE_OFFSET), %g2
+	sethi		%hi(PAGE_SIZE), %o3
+
+	sllx		%g2, 32, %g2
+	sethi		%uhi(TTE_BITS_TOP), %g3
+
+	sllx		%g3, 32, %g3
+	sub		%o0, %g2, %g1		! dest paddr
+
+	sub		%o1, %g2, %g2		! src paddr
+	or		%g3, TTE_BITS_BOTTOM, %g3
+
+	and		%o2, %o3, %o0		! vaddr D-cache alias bit
+	or		%g1, %g3, %g1		! dest TTE data
+
+	or		%g2, %g3, %g2		! src TTE data
+	sethi		%hi(TLBTEMP_BASE), %o3
+
+	sethi		%hi(DCACHE_SIZE), %o1
+	add		%o0, %o3, %o0		! dest TTE vaddr
+
+	add		%o4, 1, %o2
+	add		%o0, %o1, %o1		! src TTE vaddr
+
+	/* Disable preemption.  */
+	mov		TLB_TAG_ACCESS, %g3
+	stw		%o2, [%g6 + TI_PRE_COUNT]
+
+	/* Load TLB entries.  */
+	rdpr		%pstate, %o2
+	wrpr		%o2, PSTATE_IE, %pstate
+	stxa		%o0, [%g3] ASI_DMMU
+	stxa		%g1, [%g0] ASI_DTLB_DATA_IN
+	membar		#Sync
+	stxa		%o1, [%g3] ASI_DMMU
+	stxa		%g2, [%g0] ASI_DTLB_DATA_IN
+	membar		#Sync
+	wrpr		%o2, 0x0, %pstate
+
+	BRANCH_IF_ANY_CHEETAH(g3,o2,1f)
+	ba,pt		%xcc, 9f
+	 nop
+
+1:
+	VISEntryHalf
+	membar		#StoreLoad | #StoreStore | #LoadStore
+	sethi		%hi((PAGE_SIZE/64)-2), %o2
+	mov		%o0, %g1
+	prefetch	[%o1 + 0x000], #one_read
+	or		%o2, %lo((PAGE_SIZE/64)-2), %o2
+	prefetch	[%o1 + 0x040], #one_read
+	prefetch	[%o1 + 0x080], #one_read
+	prefetch	[%o1 + 0x0c0], #one_read
+	ldd		[%o1 + 0x000], %f0
+	prefetch	[%o1 + 0x100], #one_read
+	ldd		[%o1 + 0x008], %f2
+	prefetch	[%o1 + 0x140], #one_read
+	ldd		[%o1 + 0x010], %f4
+	prefetch	[%o1 + 0x180], #one_read
+	fmovd		%f0, %f16
+	ldd		[%o1 + 0x018], %f6
+	fmovd		%f2, %f18
+	ldd		[%o1 + 0x020], %f8
+	fmovd		%f4, %f20
+	ldd		[%o1 + 0x028], %f10
+	fmovd		%f6, %f22
+	ldd		[%o1 + 0x030], %f12
+	fmovd		%f8, %f24
+	ldd		[%o1 + 0x038], %f14
+	fmovd		%f10, %f26
+	ldd		[%o1 + 0x040], %f0
+1:	ldd		[%o1 + 0x048], %f2
+	fmovd		%f12, %f28
+	ldd		[%o1 + 0x050], %f4
+	fmovd		%f14, %f30
+	stda		%f16, [%o0] ASI_BLK_P
+	ldd		[%o1 + 0x058], %f6
+	fmovd		%f0, %f16
+	ldd		[%o1 + 0x060], %f8
+	fmovd		%f2, %f18
+	ldd		[%o1 + 0x068], %f10
+	fmovd		%f4, %f20
+	ldd		[%o1 + 0x070], %f12
+	fmovd		%f6, %f22
+	ldd		[%o1 + 0x078], %f14
+	fmovd		%f8, %f24
+	ldd		[%o1 + 0x080], %f0
+	prefetch	[%o1 + 0x180], #one_read
+	fmovd		%f10, %f26
+	subcc		%o2, 1, %o2
+	add		%o0, 0x40, %o0
+	bne,pt		%xcc, 1b
+	 add		%o1, 0x40, %o1
+
+	ldd		[%o1 + 0x048], %f2
+	fmovd		%f12, %f28
+	ldd		[%o1 + 0x050], %f4
+	fmovd		%f14, %f30
+	stda		%f16, [%o0] ASI_BLK_P
+	ldd		[%o1 + 0x058], %f6
+	fmovd		%f0, %f16
+	ldd		[%o1 + 0x060], %f8
+	fmovd		%f2, %f18
+	ldd		[%o1 + 0x068], %f10
+	fmovd		%f4, %f20
+	ldd		[%o1 + 0x070], %f12
+	fmovd		%f6, %f22
+	add		%o0, 0x40, %o0
+	ldd		[%o1 + 0x078], %f14
+	fmovd		%f8, %f24
+	fmovd		%f10, %f26
+	fmovd		%f12, %f28
+	fmovd		%f14, %f30
+	stda		%f16, [%o0] ASI_BLK_P
+	membar		#Sync
+	VISExitHalf
+	ba,pt		%xcc, 5f
+	 nop
+
+9:
+	VISEntry
+	ldub		[%g6 + TI_FAULT_CODE], %g3
+	mov		%o0, %g1
+	cmp		%g3, 0
+	rd		%asi, %g3
+	be,a,pt		%icc, 1f
+	 wr		%g0, ASI_BLK_P, %asi
+	wr		%g0, ASI_BLK_COMMIT_P, %asi
+1:	ldda		[%o1] ASI_BLK_P, %f0
+	add		%o1, 0x40, %o1
+	ldda		[%o1] ASI_BLK_P, %f16
+	add		%o1, 0x40, %o1
+	sethi		%hi(PAGE_SIZE), %o2
+1:	TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
+	ldda		[%o1] ASI_BLK_P, %f32
+	stda		%f48, [%o0] %asi
+	add		%o1, 0x40, %o1
+	sub		%o2, 0x40, %o2
+	add		%o0, 0x40, %o0
+	TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
+	ldda		[%o1] ASI_BLK_P, %f0
+	stda		%f48, [%o0] %asi
+	add		%o1, 0x40, %o1
+	sub		%o2, 0x40, %o2
+	add		%o0, 0x40, %o0
+	TOUCH(f32, f34, f36, f38, f40, f42, f44, f46)
+	ldda		[%o1] ASI_BLK_P, %f16
+	stda		%f48, [%o0] %asi
+	sub		%o2, 0x40, %o2
+	add		%o1, 0x40, %o1
+	cmp		%o2, PAGE_SIZE_REM
+	bne,pt		%xcc, 1b
+	 add		%o0, 0x40, %o0
+#if (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22)
+	TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
+	ldda		[%o1] ASI_BLK_P, %f32
+	stda		%f48, [%o0] %asi
+	add		%o1, 0x40, %o1
+	sub		%o2, 0x40, %o2
+	add		%o0, 0x40, %o0
+	TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
+	ldda		[%o1] ASI_BLK_P, %f0
+	stda		%f48, [%o0] %asi
+	add		%o1, 0x40, %o1
+	sub		%o2, 0x40, %o2
+	add		%o0, 0x40, %o0
+	membar		#Sync
+	stda		%f32, [%o0] %asi
+	add		%o0, 0x40, %o0
+	stda		%f0, [%o0] %asi
+#else
+	membar		#Sync
+	stda		%f0, [%o0] %asi
+	add		%o0, 0x40, %o0
+	stda		%f16, [%o0] %asi
+#endif
+	membar		#Sync
+	wr		%g3, 0x0, %asi
+	VISExit
+
+5:
+	stxa		%g0, [%g1] ASI_DMMU_DEMAP
+	membar		#Sync
+
+	sethi		%hi(DCACHE_SIZE), %g2
+	stxa		%g0, [%g1 + %g2] ASI_DMMU_DEMAP
+	membar		#Sync
+
+	retl
+	 stw		%o4, [%g6 + TI_PRE_COUNT]
+
+	.size		copy_user_page, .-copy_user_page
diff --git a/arch/sparc64/lib/csum_copy.S b/arch/sparc64/lib/csum_copy.S
new file mode 100644
index 0000000..71af488
--- /dev/null
+++ b/arch/sparc64/lib/csum_copy.S
@@ -0,0 +1,308 @@
+/* csum_copy.S: Checksum+copy code for sparc64
+ *
+ * Copyright (C) 2005 David S. Miller <davem@davemloft.net>
+ */
+
+#ifdef __KERNEL__
+#define GLOBAL_SPARE	%g7
+#else
+#define GLOBAL_SPARE	%g5
+#endif
+
+#ifndef EX_LD
+#define EX_LD(x)	x
+#endif
+
+#ifndef EX_ST
+#define EX_ST(x)	x
+#endif
+
+#ifndef EX_RETVAL
+#define EX_RETVAL(x)	x
+#endif
+
+#ifndef LOAD
+#define LOAD(type,addr,dest)	type [addr], dest
+#endif
+
+#ifndef STORE
+#define STORE(type,src,addr)	type src, [addr]
+#endif
+
+#ifndef FUNC_NAME
+#define FUNC_NAME	csum_partial_copy_nocheck
+#endif
+
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+
+	.text
+
+90:
+	/* We checked for zero length already, so there must be
+	 * at least one byte.
+	 */
+	be,pt		%icc, 1f
+	 nop
+	EX_LD(LOAD(ldub, %o0 + 0x00, %o4))
+	add		%o0, 1, %o0
+	sub		%o2, 1, %o2
+	EX_ST(STORE(stb, %o4, %o1 + 0x00))
+	add		%o1, 1, %o1
+1:	andcc		%o0, 0x2, %g0
+	be,pn		%icc, 80f
+	 cmp		%o2, 2
+	blu,pn		%icc, 60f
+	 nop
+	EX_LD(LOAD(lduh, %o0 + 0x00, %o5))
+	add		%o0, 2, %o0
+	sub		%o2, 2, %o2
+	EX_ST(STORE(sth, %o5, %o1 + 0x00))
+	add		%o1, 2, %o1
+	ba,pt		%xcc, 80f
+	 add		%o5, %o4, %o4
+
+	.globl		FUNC_NAME
+FUNC_NAME:		/* %o0=src, %o1=dst, %o2=len, %o3=sum */
+	LOAD(prefetch, %o0 + 0x000, #n_reads)
+	xor		%o0, %o1, %g1
+	clr		%o4
+	andcc		%g1, 0x3, %g0
+	bne,pn		%icc, 95f
+	 LOAD(prefetch, %o0 + 0x040, #n_reads)
+	
+	brz,pn		%o2, 70f
+	 andcc		%o0, 0x3, %g0
+
+	/* We "remember" whether the lowest bit in the address
+	 * was set in GLOBAL_SPARE.  Because if it is, we have to swap
+	 * upper and lower 8 bit fields of the sum we calculate.
+	*/
+	bne,pn		%icc, 90b
+	 andcc		%o0, 0x1, GLOBAL_SPARE
+
+80:
+	LOAD(prefetch, %o0 + 0x080, #n_reads)
+	andncc		%o2, 0x3f, %g3
+
+	LOAD(prefetch, %o0 + 0x0c0, #n_reads)
+	sub		%o2, %g3, %o2
+	brz,pn		%g3, 2f
+	 LOAD(prefetch, %o0 + 0x100, #n_reads)
+
+	/* So that we don't need to use the non-pairing
+	 * add-with-carry instructions we accumulate 32-bit
+	 * values into a 64-bit register.  At the end of the
+	 * loop we fold it down to 32-bits and so on.
+	 */
+	ba,pt		%xcc, 1f
+	LOAD(prefetch, %o0 + 0x140, #n_reads)
+
+	.align		32
+1:	EX_LD(LOAD(lduw, %o0 + 0x00, %o5))
+	EX_LD(LOAD(lduw, %o0 + 0x04, %g1))
+	EX_LD(LOAD(lduw, %o0 + 0x08, %g2))
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x00))
+	EX_LD(LOAD(lduw, %o0 + 0x0c, %o5))
+	add		%o4, %g1, %o4
+	EX_ST(STORE(stw, %g1, %o1 + 0x04))
+	EX_LD(LOAD(lduw, %o0 + 0x10, %g1))
+	add		%o4, %g2, %o4
+	EX_ST(STORE(stw, %g2, %o1 + 0x08))
+	EX_LD(LOAD(lduw, %o0 + 0x14, %g2))
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x0c))
+	EX_LD(LOAD(lduw, %o0 + 0x18, %o5))
+	add		%o4, %g1, %o4
+	EX_ST(STORE(stw, %g1, %o1 + 0x10))
+	EX_LD(LOAD(lduw, %o0 + 0x1c, %g1))
+	add		%o4, %g2, %o4
+	EX_ST(STORE(stw, %g2, %o1 + 0x14))
+	EX_LD(LOAD(lduw, %o0 + 0x20, %g2))
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x18))
+	EX_LD(LOAD(lduw, %o0 + 0x24, %o5))
+	add		%o4, %g1, %o4
+	EX_ST(STORE(stw, %g1, %o1 + 0x1c))
+	EX_LD(LOAD(lduw, %o0 + 0x28, %g1))
+	add		%o4, %g2, %o4
+	EX_ST(STORE(stw, %g2, %o1 + 0x20))
+	EX_LD(LOAD(lduw, %o0 + 0x2c, %g2))
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x24))
+	EX_LD(LOAD(lduw, %o0 + 0x30, %o5))
+	add		%o4, %g1, %o4
+	EX_ST(STORE(stw, %g1, %o1 + 0x28))
+	EX_LD(LOAD(lduw, %o0 + 0x34, %g1))
+	add		%o4, %g2, %o4
+	EX_ST(STORE(stw, %g2, %o1 + 0x2c))
+	EX_LD(LOAD(lduw, %o0 + 0x38, %g2))
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x30))
+	EX_LD(LOAD(lduw, %o0 + 0x3c, %o5))
+	add		%o4, %g1, %o4
+	EX_ST(STORE(stw, %g1, %o1 + 0x34))
+	LOAD(prefetch, %o0 + 0x180, #n_reads)
+	add		%o4, %g2, %o4
+	EX_ST(STORE(stw, %g2, %o1 + 0x38))
+	subcc		%g3, 0x40, %g3
+	add		%o0, 0x40, %o0
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x3c))
+	bne,pt		%icc, 1b
+	 add		%o1, 0x40, %o1
+
+2:	and		%o2, 0x3c, %g3
+	brz,pn		%g3, 2f
+	 sub		%o2, %g3, %o2
+1:	EX_LD(LOAD(lduw, %o0 + 0x00, %o5))
+	subcc		%g3, 0x4, %g3
+	add		%o0, 0x4, %o0
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x00))
+	bne,pt		%icc, 1b
+	 add		%o1, 0x4, %o1
+
+2:
+	/* fold 64-->32 */
+	srlx		%o4, 32, %o5
+	srl		%o4, 0, %o4
+	add		%o4, %o5, %o4
+	srlx		%o4, 32, %o5
+	srl		%o4, 0, %o4
+	add		%o4, %o5, %o4
+
+	/* fold 32-->16 */
+	sethi		%hi(0xffff0000), %g1
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+
+60:
+	/* %o4 has the 16-bit sum we have calculated so-far.  */
+	cmp		%o2, 2
+	blu,pt		%icc, 1f
+	 nop
+	EX_LD(LOAD(lduh, %o0 + 0x00, %o5))
+	sub		%o2, 2, %o2
+	add		%o0, 2, %o0
+	add		%o4, %o5, %o4
+	EX_ST(STORE(sth, %o5, %o1 + 0x00))
+	add		%o1, 0x2, %o1
+1:	brz,pt		%o2, 1f
+	 nop
+	EX_LD(LOAD(ldub, %o0 + 0x00, %o5))
+	sub		%o2, 1, %o2
+	add		%o0, 1, %o0
+	EX_ST(STORE(stb, %o5, %o1 + 0x00))
+	sllx		%o5, 8, %o5
+	add		%o1, 1, %o1
+	add		%o4, %o5, %o4
+1:
+	/* fold 32-->16 */
+	sethi		%hi(0xffff0000), %g1
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+
+1:	brz,pt		GLOBAL_SPARE, 1f
+	 nop
+
+	/* We started with an odd byte, byte-swap the result.  */
+	srl		%o4, 8, %o5
+	and		%o4, 0xff, %g1
+	sll		%g1, 8, %g1
+	or		%o5, %g1, %o4
+
+1:	add		%o3, %o4, %o3
+
+70:
+	retl
+	 mov		%o3, %o0
+
+95:	mov		0, GLOBAL_SPARE
+	brlez,pn	%o2, 4f
+	 andcc		%o0, 1, %o5		
+	be,a,pt		%icc, 1f
+	 srl		%o2, 1, %g1		
+	sub		%o2, 1, %o2	
+	EX_LD(LOAD(ldub, %o0, GLOBAL_SPARE))
+	add		%o0, 1, %o0	
+	EX_ST(STORE(stb, GLOBAL_SPARE, %o1))
+	srl		%o2, 1, %g1
+	add		%o1, 1, %o1
+1:	brz,a,pn	%g1, 3f
+	 andcc		%o2, 1, %g0
+	andcc		%o0, 2, %g0	
+	be,a,pt		%icc, 1f
+	 srl		%g1, 1, %g1
+	EX_LD(LOAD(lduh, %o0, %o4))
+	sub		%o2, 2, %o2	
+	srl		%o4, 8, %g2
+	sub		%g1, 1, %g1	
+	EX_ST(STORE(stb, %g2, %o1))
+	add		%o4, GLOBAL_SPARE, GLOBAL_SPARE
+	EX_ST(STORE(stb, %o4, %o1 + 1))
+	add		%o0, 2, %o0	
+	srl		%g1, 1, %g1
+	add		%o1, 2, %o1
+1:	brz,a,pn	%g1, 2f		
+	 andcc		%o2, 2, %g0
+	EX_LD(LOAD(lduw, %o0, %o4))
+5:	srl		%o4, 24, %g2
+	srl		%o4, 16, %g3
+	EX_ST(STORE(stb, %g2, %o1))
+	srl		%o4, 8, %g2
+	EX_ST(STORE(stb, %g3, %o1 + 1))
+	add		%o0, 4, %o0
+	EX_ST(STORE(stb, %g2, %o1 + 2))
+	addcc		%o4, GLOBAL_SPARE, GLOBAL_SPARE
+	EX_ST(STORE(stb, %o4, %o1 + 3))
+	addc		GLOBAL_SPARE, %g0, GLOBAL_SPARE
+	add		%o1, 4, %o1
+	subcc		%g1, 1, %g1
+	bne,a,pt	%icc, 5b
+	 EX_LD(LOAD(lduw, %o0, %o4))
+	sll		GLOBAL_SPARE, 16, %g2
+	srl		GLOBAL_SPARE, 16, GLOBAL_SPARE
+	srl		%g2, 16, %g2
+	andcc		%o2, 2, %g0
+	add		%g2, GLOBAL_SPARE, GLOBAL_SPARE 
+2:	be,a,pt		%icc, 3f		
+	 andcc		%o2, 1, %g0
+	EX_LD(LOAD(lduh, %o0, %o4))
+	andcc		%o2, 1, %g0
+	srl		%o4, 8, %g2
+	add		%o0, 2, %o0	
+	EX_ST(STORE(stb, %g2, %o1))
+	add		GLOBAL_SPARE, %o4, GLOBAL_SPARE
+	EX_ST(STORE(stb, %o4, %o1 + 1))
+	add		%o1, 2, %o1
+3:	be,a,pt		%icc, 1f		
+	 sll		GLOBAL_SPARE, 16, %o4
+	EX_LD(LOAD(ldub, %o0, %g2))
+	sll		%g2, 8, %o4	
+	EX_ST(STORE(stb, %g2, %o1))
+	add		GLOBAL_SPARE, %o4, GLOBAL_SPARE
+	sll		GLOBAL_SPARE, 16, %o4
+1:	addcc		%o4, GLOBAL_SPARE, GLOBAL_SPARE
+	srl		GLOBAL_SPARE, 16, %o4
+	addc		%g0, %o4, GLOBAL_SPARE
+	brz,pt		%o5, 4f
+	 srl		GLOBAL_SPARE, 8, %o4
+	and		GLOBAL_SPARE, 0xff, %g2
+	and		%o4, 0xff, %o4
+	sll		%g2, 8, %g2
+	or		%g2, %o4, GLOBAL_SPARE
+4:	addcc		%o3, GLOBAL_SPARE, %o3
+	addc		%g0, %o3, %o0
+	retl
+	 srl		%o0, 0, %o0
+	.size		FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc64/lib/csum_copy_from_user.S b/arch/sparc64/lib/csum_copy_from_user.S
new file mode 100644
index 0000000..817ebda
--- /dev/null
+++ b/arch/sparc64/lib/csum_copy_from_user.S
@@ -0,0 +1,21 @@
+/* csum_copy_from_user.S: Checksum+copy from userspace.
+ *
+ * Copyright (C) 2005 David S. Miller (davem@davemloft.net)
+ */
+
+#define EX_LD(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	-1, %o0;	\
+	.section __ex_table;	\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#define FUNC_NAME		__csum_partial_copy_from_user
+#define LOAD(type,addr,dest)	type##a [addr] %asi, dest
+
+#include "csum_copy.S"
diff --git a/arch/sparc64/lib/csum_copy_to_user.S b/arch/sparc64/lib/csum_copy_to_user.S
new file mode 100644
index 0000000..c2f9463
--- /dev/null
+++ b/arch/sparc64/lib/csum_copy_to_user.S
@@ -0,0 +1,21 @@
+/* csum_copy_to_user.S: Checksum+copy to userspace.
+ *
+ * Copyright (C) 2005 David S. Miller (davem@davemloft.net)
+ */
+
+#define EX_ST(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	-1, %o0;	\
+	.section __ex_table;	\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#define FUNC_NAME		__csum_partial_copy_to_user
+#define STORE(type,src,addr)	type##a src, [addr] %asi
+
+#include "csum_copy.S"
diff --git a/arch/sparc64/lib/debuglocks.c b/arch/sparc64/lib/debuglocks.c
new file mode 100644
index 0000000..c421e0c
--- /dev/null
+++ b/arch/sparc64/lib/debuglocks.c
@@ -0,0 +1,376 @@
+/* $Id: debuglocks.c,v 1.9 2001/11/17 00:10:48 davem Exp $
+ * debuglocks.c: Debugging versions of SMP locking primitives.
+ *
+ * Copyright (C) 1998 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_SMP
+
+#define GET_CALLER(PC) __asm__ __volatile__("mov %%i7, %0" : "=r" (PC))
+
+static inline void show (char *str, spinlock_t *lock, unsigned long caller)
+{
+	int cpu = smp_processor_id();
+
+	printk("%s(%p) CPU#%d stuck at %08x, owner PC(%08x):CPU(%x)\n",
+	       str, lock, cpu, (unsigned int) caller,
+	       lock->owner_pc, lock->owner_cpu);
+}
+
+static inline void show_read (char *str, rwlock_t *lock, unsigned long caller)
+{
+	int cpu = smp_processor_id();
+
+	printk("%s(%p) CPU#%d stuck at %08x, writer PC(%08x):CPU(%x)\n",
+	       str, lock, cpu, (unsigned int) caller,
+	       lock->writer_pc, lock->writer_cpu);
+}
+
+static inline void show_write (char *str, rwlock_t *lock, unsigned long caller)
+{
+	int cpu = smp_processor_id();
+	int i;
+
+	printk("%s(%p) CPU#%d stuck at %08x\n",
+	       str, lock, cpu, (unsigned int) caller);
+	printk("Writer: PC(%08x):CPU(%x)\n",
+	       lock->writer_pc, lock->writer_cpu);
+	printk("Readers:");
+	for (i = 0; i < NR_CPUS; i++)
+		if (lock->reader_pc[i])
+			printk(" %d[%08x]", i, lock->reader_pc[i]);
+	printk("\n");
+}
+
+#undef INIT_STUCK
+#define INIT_STUCK 100000000
+
+void _do_spin_lock(spinlock_t *lock, char *str)
+{
+	unsigned long caller, val;
+	int stuck = INIT_STUCK;
+	int cpu = get_cpu();
+	int shown = 0;
+
+	GET_CALLER(caller);
+again:
+	__asm__ __volatile__("ldstub [%1], %0"
+			     : "=r" (val)
+			     : "r" (&(lock->lock))
+			     : "memory");
+	membar("#StoreLoad | #StoreStore");
+	if (val) {
+		while (lock->lock) {
+			if (!--stuck) {
+				if (shown++ <= 2)
+					show(str, lock, caller);
+				stuck = INIT_STUCK;
+			}
+			membar("#LoadLoad");
+		}
+		goto again;
+	}
+	lock->owner_pc = ((unsigned int)caller);
+	lock->owner_cpu = cpu;
+	current->thread.smp_lock_count++;
+	current->thread.smp_lock_pc = ((unsigned int)caller);
+
+	put_cpu();
+}
+
+int _do_spin_trylock(spinlock_t *lock)
+{
+	unsigned long val, caller;
+	int cpu = get_cpu();
+
+	GET_CALLER(caller);
+	__asm__ __volatile__("ldstub [%1], %0"
+			     : "=r" (val)
+			     : "r" (&(lock->lock))
+			     : "memory");
+	membar("#StoreLoad | #StoreStore");
+	if (!val) {
+		lock->owner_pc = ((unsigned int)caller);
+		lock->owner_cpu = cpu;
+		current->thread.smp_lock_count++;
+		current->thread.smp_lock_pc = ((unsigned int)caller);
+	}
+
+	put_cpu();
+
+	return val == 0;
+}
+
+void _do_spin_unlock(spinlock_t *lock)
+{
+	lock->owner_pc = 0;
+	lock->owner_cpu = NO_PROC_ID;
+	membar("#StoreStore | #LoadStore");
+	lock->lock = 0;
+	current->thread.smp_lock_count--;
+}
+
+/* Keep INIT_STUCK the same... */
+
+void _do_read_lock (rwlock_t *rw, char *str)
+{
+	unsigned long caller, val;
+	int stuck = INIT_STUCK;
+	int cpu = get_cpu();
+	int shown = 0;
+
+	GET_CALLER(caller);
+wlock_again:
+	/* Wait for any writer to go away.  */
+	while (((long)(rw->lock)) < 0) {
+		if (!--stuck) {
+			if (shown++ <= 2)
+				show_read(str, rw, caller);
+			stuck = INIT_STUCK;
+		}
+		membar("#LoadLoad");
+	}
+	/* Try once to increment the counter.  */
+	__asm__ __volatile__(
+"	ldx		[%0], %%g1\n"
+"	brlz,a,pn	%%g1, 2f\n"
+"	 mov		1, %0\n"
+"	add		%%g1, 1, %%g7\n"
+"	casx		[%0], %%g1, %%g7\n"
+"	sub		%%g1, %%g7, %0\n"
+"2:"	: "=r" (val)
+	: "0" (&(rw->lock))
+	: "g1", "g7", "memory");
+	membar("#StoreLoad | #StoreStore");
+	if (val)
+		goto wlock_again;
+	rw->reader_pc[cpu] = ((unsigned int)caller);
+	current->thread.smp_lock_count++;
+	current->thread.smp_lock_pc = ((unsigned int)caller);
+
+	put_cpu();
+}
+
+void _do_read_unlock (rwlock_t *rw, char *str)
+{
+	unsigned long caller, val;
+	int stuck = INIT_STUCK;
+	int cpu = get_cpu();
+	int shown = 0;
+
+	GET_CALLER(caller);
+
+	/* Drop our identity _first_. */
+	rw->reader_pc[cpu] = 0;
+	current->thread.smp_lock_count--;
+runlock_again:
+	/* Spin trying to decrement the counter using casx.  */
+	__asm__ __volatile__(
+"	membar	#StoreLoad | #LoadLoad\n"
+"	ldx	[%0], %%g1\n"
+"	sub	%%g1, 1, %%g7\n"
+"	casx	[%0], %%g1, %%g7\n"
+"	membar	#StoreLoad | #StoreStore\n"
+"	sub	%%g1, %%g7, %0\n"
+	: "=r" (val)
+	: "0" (&(rw->lock))
+	: "g1", "g7", "memory");
+	if (val) {
+		if (!--stuck) {
+			if (shown++ <= 2)
+				show_read(str, rw, caller);
+			stuck = INIT_STUCK;
+		}
+		goto runlock_again;
+	}
+
+	put_cpu();
+}
+
+void _do_write_lock (rwlock_t *rw, char *str)
+{
+	unsigned long caller, val;
+	int stuck = INIT_STUCK;
+	int cpu = get_cpu();
+	int shown = 0;
+
+	GET_CALLER(caller);
+wlock_again:
+	/* Spin while there is another writer. */
+	while (((long)rw->lock) < 0) {
+		if (!--stuck) {
+			if (shown++ <= 2)
+				show_write(str, rw, caller);
+			stuck = INIT_STUCK;
+		}
+		membar("#LoadLoad");
+	}
+
+	/* Try to acuire the write bit.  */
+	__asm__ __volatile__(
+"	mov	1, %%g3\n"
+"	sllx	%%g3, 63, %%g3\n"
+"	ldx	[%0], %%g1\n"
+"	brlz,pn	%%g1, 1f\n"
+"	 or	%%g1, %%g3, %%g7\n"
+"	casx	[%0], %%g1, %%g7\n"
+"	membar	#StoreLoad | #StoreStore\n"
+"	ba,pt	%%xcc, 2f\n"
+"	 sub	%%g1, %%g7, %0\n"
+"1:	mov	1, %0\n"
+"2:"	: "=r" (val)
+	: "0" (&(rw->lock))
+	: "g3", "g1", "g7", "memory");
+	if (val) {
+		/* We couldn't get the write bit. */
+		if (!--stuck) {
+			if (shown++ <= 2)
+				show_write(str, rw, caller);
+			stuck = INIT_STUCK;
+		}
+		goto wlock_again;
+	}
+	if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) {
+		/* Readers still around, drop the write
+		 * lock, spin, and try again.
+		 */
+		if (!--stuck) {
+			if (shown++ <= 2)
+				show_write(str, rw, caller);
+			stuck = INIT_STUCK;
+		}
+		__asm__ __volatile__(
+"		mov	1, %%g3\n"
+"		sllx	%%g3, 63, %%g3\n"
+"1:		ldx	[%0], %%g1\n"
+"		andn	%%g1, %%g3, %%g7\n"
+"		casx	[%0], %%g1, %%g7\n"
+"		cmp	%%g1, %%g7\n"
+"		bne,pn	%%xcc, 1b\n"
+"		 membar	#StoreLoad | #StoreStore"
+		: /* no outputs */
+		: "r" (&(rw->lock))
+		: "g3", "g1", "g7", "cc", "memory");
+		while(rw->lock != 0) {
+			if (!--stuck) {
+				if (shown++ <= 2)
+					show_write(str, rw, caller);
+				stuck = INIT_STUCK;
+			}
+			membar("#LoadLoad");
+		}
+		goto wlock_again;
+	}
+
+	/* We have it, say who we are. */
+	rw->writer_pc = ((unsigned int)caller);
+	rw->writer_cpu = cpu;
+	current->thread.smp_lock_count++;
+	current->thread.smp_lock_pc = ((unsigned int)caller);
+
+	put_cpu();
+}
+
+void _do_write_unlock(rwlock_t *rw)
+{
+	unsigned long caller, val;
+	int stuck = INIT_STUCK;
+	int shown = 0;
+
+	GET_CALLER(caller);
+
+	/* Drop our identity _first_ */
+	rw->writer_pc = 0;
+	rw->writer_cpu = NO_PROC_ID;
+	current->thread.smp_lock_count--;
+wlock_again:
+	__asm__ __volatile__(
+"	membar	#StoreLoad | #LoadLoad\n"
+"	mov	1, %%g3\n"
+"	sllx	%%g3, 63, %%g3\n"
+"	ldx	[%0], %%g1\n"
+"	andn	%%g1, %%g3, %%g7\n"
+"	casx	[%0], %%g1, %%g7\n"
+"	membar	#StoreLoad | #StoreStore\n"
+"	sub	%%g1, %%g7, %0\n"
+	: "=r" (val)
+	: "0" (&(rw->lock))
+	: "g3", "g1", "g7", "memory");
+	if (val) {
+		if (!--stuck) {
+			if (shown++ <= 2)
+				show_write("write_unlock", rw, caller);
+			stuck = INIT_STUCK;
+		}
+		goto wlock_again;
+	}
+}
+
+int _do_write_trylock (rwlock_t *rw, char *str)
+{
+	unsigned long caller, val;
+	int cpu = get_cpu();
+
+	GET_CALLER(caller);
+
+	/* Try to acuire the write bit.  */
+	__asm__ __volatile__(
+"	mov	1, %%g3\n"
+"	sllx	%%g3, 63, %%g3\n"
+"	ldx	[%0], %%g1\n"
+"	brlz,pn	%%g1, 1f\n"
+"	 or	%%g1, %%g3, %%g7\n"
+"	casx	[%0], %%g1, %%g7\n"
+"	membar	#StoreLoad | #StoreStore\n"
+"	ba,pt	%%xcc, 2f\n"
+"	 sub	%%g1, %%g7, %0\n"
+"1:	mov	1, %0\n"
+"2:"	: "=r" (val)
+	: "0" (&(rw->lock))
+	: "g3", "g1", "g7", "memory");
+
+	if (val) {
+		put_cpu();
+		return 0;
+	}
+
+	if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) {
+		/* Readers still around, drop the write
+		 * lock, return failure.
+		 */
+		__asm__ __volatile__(
+"		mov	1, %%g3\n"
+"		sllx	%%g3, 63, %%g3\n"
+"1:		ldx	[%0], %%g1\n"
+"		andn	%%g1, %%g3, %%g7\n"
+"		casx	[%0], %%g1, %%g7\n"
+"		cmp	%%g1, %%g7\n"
+"		bne,pn	%%xcc, 1b\n"
+"		 membar	#StoreLoad | #StoreStore"
+		: /* no outputs */
+		: "r" (&(rw->lock))
+		: "g3", "g1", "g7", "cc", "memory");
+
+		put_cpu();
+
+		return 0;
+	}
+
+	/* We have it, say who we are. */
+	rw->writer_pc = ((unsigned int)caller);
+	rw->writer_cpu = cpu;
+	current->thread.smp_lock_count++;
+	current->thread.smp_lock_pc = ((unsigned int)caller);
+
+	put_cpu();
+
+	return 1;
+}
+
+#endif /* CONFIG_SMP */
diff --git a/arch/sparc64/lib/dec_and_lock.S b/arch/sparc64/lib/dec_and_lock.S
new file mode 100644
index 0000000..7e6fdae
--- /dev/null
+++ b/arch/sparc64/lib/dec_and_lock.S
@@ -0,0 +1,78 @@
+/* $Id: dec_and_lock.S,v 1.5 2001/11/18 00:12:56 davem Exp $
+ * dec_and_lock.S: Sparc64 version of "atomic_dec_and_lock()"
+ *                 using cas and ldstub instructions.
+ *
+ * Copyright (C) 2000 David S. Miller (davem@redhat.com)
+ */
+#include <linux/config.h>
+#include <asm/thread_info.h>
+
+	.text
+	.align	64
+
+	/* CAS basically works like this:
+	 *
+	 * void CAS(MEM, REG1, REG2)
+	 * {
+	 *   START_ATOMIC();
+	 *   if (*(MEM) == REG1) {
+	 *     TMP = *(MEM);
+	 *     *(MEM) = REG2;
+	 *     REG2 = TMP;
+	 *   } else
+	 *     REG2 = *(MEM);
+	 *   END_ATOMIC();
+	 * }
+	 */
+
+	.globl	_atomic_dec_and_lock
+_atomic_dec_and_lock:	/* %o0 = counter, %o1 = lock */
+loop1:	lduw	[%o0], %g2
+	subcc	%g2, 1, %g7
+	be,pn	%icc, start_to_zero
+	 nop
+nzero:	cas	[%o0], %g2, %g7
+	cmp	%g2, %g7
+	bne,pn	%icc, loop1
+	 mov	0, %g1
+
+out:
+	membar	#StoreLoad | #StoreStore
+	retl
+	 mov	%g1, %o0
+start_to_zero:
+#ifdef CONFIG_PREEMPT
+	ldsw	[%g6 + TI_PRE_COUNT], %g3
+	add	%g3, 1, %g3
+	stw	%g3, [%g6 + TI_PRE_COUNT]
+#endif
+to_zero:
+	ldstub	[%o1], %g3
+	brnz,pn	%g3, spin_on_lock
+	 membar	#StoreLoad | #StoreStore
+loop2:	cas	[%o0], %g2, %g7		/* ASSERT(g7 == 0) */
+	cmp	%g2, %g7
+
+	be,pt	%icc, out
+	 mov	1, %g1
+	lduw	[%o0], %g2
+	subcc	%g2, 1, %g7
+	be,pn	%icc, loop2
+	 nop
+	membar	#StoreStore | #LoadStore
+	stb	%g0, [%o1]
+#ifdef CONFIG_PREEMPT
+	ldsw	[%g6 + TI_PRE_COUNT], %g3
+	sub	%g3, 1, %g3
+	stw	%g3, [%g6 + TI_PRE_COUNT]
+#endif
+
+	b,pt	%xcc, nzero
+	 nop
+spin_on_lock:
+	ldub	[%o1], %g3
+	brnz,pt	%g3, spin_on_lock
+	 membar	#LoadLoad
+	ba,pt	%xcc, to_zero
+	 nop
+	nop
diff --git a/arch/sparc64/lib/delay.c b/arch/sparc64/lib/delay.c
new file mode 100644
index 0000000..f6b4c78
--- /dev/null
+++ b/arch/sparc64/lib/delay.c
@@ -0,0 +1,49 @@
+/* delay.c: Delay loops for sparc64
+ *
+ * Copyright (C) 2004 David S. Miller <davem@redhat.com>
+ *
+ * Based heavily upon x86 variant which is:
+ *	Copyright (C) 1993 Linus Torvalds
+ *	Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ */
+
+#include <linux/delay.h>
+
+void __delay(unsigned long loops)
+{
+	__asm__ __volatile__(
+"	b,pt	%%xcc, 1f\n"
+"	 cmp	%0, 0\n"
+"	.align	32\n"
+"1:\n"
+"	bne,pt	%%xcc, 1b\n"
+"	 subcc	%0, 1, %0\n"
+	: "=&r" (loops)
+	: "0" (loops)
+	: "cc");
+}
+
+/* We used to multiply by HZ after shifting down by 32 bits
+ * but that runs into problems for higher values of HZ and
+ * slow cpus.
+ */
+void __const_udelay(unsigned long n)
+{
+	n *= 4;
+
+	n *= (cpu_data(_smp_processor_id()).udelay_val * (HZ/4));
+	n >>= 32;
+
+	__delay(n + 1);
+}
+
+void __udelay(unsigned long n)
+{
+	__const_udelay(n * 0x10c7UL);
+}
+
+
+void __ndelay(unsigned long n)
+{
+	__const_udelay(n * 0x5UL);
+}
diff --git a/arch/sparc64/lib/find_bit.c b/arch/sparc64/lib/find_bit.c
new file mode 100644
index 0000000..6059557
--- /dev/null
+++ b/arch/sparc64/lib/find_bit.c
@@ -0,0 +1,127 @@
+#include <linux/bitops.h>
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+				unsigned long offset)
+{
+	const unsigned long *p = addr + (offset >> 6);
+	unsigned long result = offset & ~63UL;
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset &= 63UL;
+	if (offset) {
+		tmp = *(p++);
+		tmp &= (~0UL << offset);
+		if (size < 64)
+			goto found_first;
+		if (tmp)
+			goto found_middle;
+		size -= 64;
+		result += 64;
+	}
+	while (size & ~63UL) {
+		if ((tmp = *(p++)))
+			goto found_middle;
+		result += 64;
+		size -= 64;
+	}
+	if (!size)
+		return result;
+	tmp = *p;
+
+found_first:
+	tmp &= (~0UL >> (64 - size));
+	if (tmp == 0UL)        /* Are any bits set? */
+		return result + size; /* Nope. */
+found_middle:
+	return result + __ffs(tmp);
+}
+
+/* find_next_zero_bit() finds the first zero bit in a bit string of length
+ * 'size' bits, starting the search at bit 'offset'. This is largely based
+ * on Linus's ALPHA routines, which are pretty portable BTW.
+ */
+
+unsigned long find_next_zero_bit(const unsigned long *addr,
+			unsigned long size, unsigned long offset)
+{
+	const unsigned long *p = addr + (offset >> 6);
+	unsigned long result = offset & ~63UL;
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset &= 63UL;
+	if (offset) {
+		tmp = *(p++);
+		tmp |= ~0UL >> (64-offset);
+		if (size < 64)
+			goto found_first;
+		if (~tmp)
+			goto found_middle;
+		size -= 64;
+		result += 64;
+	}
+	while (size & ~63UL) {
+		if (~(tmp = *(p++)))
+			goto found_middle;
+		result += 64;
+		size -= 64;
+	}
+	if (!size)
+		return result;
+	tmp = *p;
+
+found_first:
+	tmp |= ~0UL << size;
+	if (tmp == ~0UL)        /* Are any bits zero? */
+		return result + size; /* Nope. */
+found_middle:
+	return result + ffz(tmp);
+}
+
+unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size, unsigned long offset)
+{
+	unsigned long *p = addr + (offset >> 6);
+	unsigned long result = offset & ~63UL;
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset &= 63UL;
+	if(offset) {
+		tmp = __swab64p(p++);
+		tmp |= (~0UL >> (64-offset));
+		if(size < 64)
+			goto found_first;
+		if(~tmp)
+			goto found_middle;
+		size -= 64;
+		result += 64;
+	}
+	while(size & ~63) {
+		if(~(tmp = __swab64p(p++)))
+			goto found_middle;
+		result += 64;
+		size -= 64;
+	}
+	if(!size)
+		return result;
+	tmp = __swab64p(p);
+found_first:
+	tmp |= (~0UL << size);
+	if (tmp == ~0UL)        /* Are any bits zero? */
+		return result + size; /* Nope. */
+found_middle:
+	return result + ffz(tmp);
+}
diff --git a/arch/sparc64/lib/iomap.c b/arch/sparc64/lib/iomap.c
new file mode 100644
index 0000000..ac556db
--- /dev/null
+++ b/arch/sparc64/lib/iomap.c
@@ -0,0 +1,48 @@
+/*
+ * Implement the sparc64 iomap interfaces
+ */
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+/* Create a virtual mapping cookie for an IO port range */
+void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+	return (void __iomem *) (unsigned long) port;
+}
+
+void ioport_unmap(void __iomem *addr)
+{
+	/* Nothing to do */
+}
+EXPORT_SYMBOL(ioport_map);
+EXPORT_SYMBOL(ioport_unmap);
+
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+	unsigned long start = pci_resource_start(dev, bar);
+	unsigned long len = pci_resource_len(dev, bar);
+	unsigned long flags = pci_resource_flags(dev, bar);
+
+	if (!len || !start)
+		return NULL;
+	if (maxlen && len > maxlen)
+		len = maxlen;
+	if (flags & IORESOURCE_IO)
+		return ioport_map(start, len);
+	if (flags & IORESOURCE_MEM) {
+		if (flags & IORESOURCE_CACHEABLE)
+			return ioremap(start, len);
+		return ioremap_nocache(start, len);
+	}
+	/* What? */
+	return NULL;
+}
+
+void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
+{
+	/* nothing to do */
+}
+EXPORT_SYMBOL(pci_iomap);
+EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/sparc64/lib/ipcsum.S b/arch/sparc64/lib/ipcsum.S
new file mode 100644
index 0000000..58ca5b9
--- /dev/null
+++ b/arch/sparc64/lib/ipcsum.S
@@ -0,0 +1,34 @@
+	.text
+	.align	32
+	.globl	ip_fast_csum
+	.type	ip_fast_csum,#function
+ip_fast_csum:	/* %o0 = iph, %o1 = ihl */
+	sub	%o1, 4, %g7
+	lduw	[%o0 + 0x00], %o2
+	lduw	[%o0 + 0x04], %g2
+	lduw	[%o0 + 0x08], %g3
+	addcc	%g2, %o2, %o2
+	lduw	[%o0 + 0x0c], %g2
+	addccc	%g3, %o2, %o2
+	lduw	[%o0 + 0x10], %g3
+
+	addccc	%g2, %o2, %o2
+	addc	%o2, %g0, %o2
+1:	addcc	%g3, %o2, %o2
+	add	%o0, 4, %o0
+	addccc	%o2, %g0, %o2
+	subcc	%g7, 1, %g7
+	be,a,pt	%icc, 2f
+	 sll	%o2, 16, %g2
+
+	lduw	[%o0 + 0x10], %g3
+	ba,pt	%xcc, 1b
+	 nop
+2:	addcc	%o2, %g2, %g2
+	srl	%g2, 16, %o2
+	addc	%o2, %g0, %o2
+	xnor	%g0, %o2, %o2
+	set	0xffff, %o1
+	retl
+	 and	%o2, %o1, %o0
+	.size	ip_fast_csum, .-ip_fast_csum
diff --git a/arch/sparc64/lib/mcount.S b/arch/sparc64/lib/mcount.S
new file mode 100644
index 0000000..2ef2e26
--- /dev/null
+++ b/arch/sparc64/lib/mcount.S
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com)
+ *
+ * This file implements mcount(), which is used to collect profiling data.
+ * This can also be tweaked for kernel stack overflow detection.
+ */
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+
+#include <asm/ptrace.h>
+#include <asm/thread_info.h>
+
+/*
+ * This is the main variant and is called by C code.  GCC's -pg option
+ * automatically instruments every C function with a call to this.
+ */
+
+#ifdef CONFIG_STACK_DEBUG
+
+#define OVSTACKSIZE	4096		/* lets hope this is enough */
+
+	.data
+	.align		8
+panicstring:
+	.asciz		"Stack overflow\n"
+	.align		8
+ovstack:
+	.skip		OVSTACKSIZE
+#endif
+	.text
+	.align 32
+	.globl mcount, _mcount
+mcount:
+_mcount:
+#ifdef CONFIG_STACK_DEBUG
+	/*
+	 * Check whether %sp is dangerously low.
+	 */
+	ldub		[%g6 + TI_FPDEPTH], %g1
+	srl		%g1, 1, %g3
+	add		%g3, 1, %g3
+	sllx		%g3, 8, %g3			! each fpregs frame is 256b
+	add		%g3, 192, %g3
+	add		%g6, %g3, %g3			! where does task_struct+frame end?
+	sub		%g3, STACK_BIAS, %g3
+	cmp		%sp, %g3
+	bg,pt		%xcc, 1f
+	 sethi		%hi(panicstring), %g3
+	sethi		%hi(ovstack), %g7		! cant move to panic stack fast enough
+	 or		%g7, %lo(ovstack), %g7
+	add		%g7, OVSTACKSIZE, %g7
+	sub		%g7, STACK_BIAS, %g7
+	mov		%g7, %sp
+	call		prom_printf
+	 or		%g3, %lo(panicstring), %o0
+	call		prom_halt
+	 nop
+#endif
+1:	retl
+	 nop
diff --git a/arch/sparc64/lib/memcmp.S b/arch/sparc64/lib/memcmp.S
new file mode 100644
index 0000000..c90ad96
--- /dev/null
+++ b/arch/sparc64/lib/memcmp.S
@@ -0,0 +1,28 @@
+/* $Id: memcmp.S,v 1.3 2000/03/23 07:51:08 davem Exp $
+ * Sparc64 optimized memcmp code.
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 2000 David S. Miller (davem@redhat.com)
+ */
+
+	.text
+	.align	32
+	.globl	__memcmp, memcmp
+__memcmp:
+memcmp:
+	cmp	%o2, 0		! IEU1	Group
+loop:	be,pn	%icc, ret_0	! CTI
+	 nop			! IEU0
+	ldub	[%o0], %g7	! LSU	Group
+	ldub	[%o1], %g3	! LSU	Group
+	sub	%o2, 1, %o2	! IEU0
+	add	%o0, 1, %o0	! IEU1
+	add	%o1, 1, %o1	! IEU0	Group
+	subcc	%g7, %g3, %g3	! IEU1	Group
+	be,pt	%icc, loop	! CTI
+	 cmp	%o2, 0		! IEU1	Group
+
+ret_n0:	retl
+	 mov	%g3, %o0
+ret_0:	retl
+	 mov	0, %o0
diff --git a/arch/sparc64/lib/memmove.S b/arch/sparc64/lib/memmove.S
new file mode 100644
index 0000000..9739580
--- /dev/null
+++ b/arch/sparc64/lib/memmove.S
@@ -0,0 +1,31 @@
+/* memmove.S: Simple memmove implementation.
+ *
+ * Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+	.text
+	.align		32
+	.globl		memmove
+	.type		memmove,#function
+memmove:		/* o0=dst o1=src o2=len */
+	mov		%o0, %g1
+	cmp		%o0, %o1
+	bleu,pt		%xcc, memcpy
+	 add		%o1, %o2, %g7
+	cmp		%g7, %o0
+	bleu,pt		%xcc, memcpy
+	 add		%o0, %o2, %o5
+	sub		%g7, 1, %o1
+
+	sub		%o5, 1, %o0
+1:	ldub		[%o1], %g7
+	subcc		%o2, 1, %o2
+	sub		%o1, 1, %o1
+	stb		%g7, [%o0]
+	bne,pt		%icc, 1b
+	 sub		%o0, 1, %o0
+
+	retl
+	 mov		%g1, %o0
+	.size		memmove, .-memmove
diff --git a/arch/sparc64/lib/memscan.S b/arch/sparc64/lib/memscan.S
new file mode 100644
index 0000000..5e72d49
--- /dev/null
+++ b/arch/sparc64/lib/memscan.S
@@ -0,0 +1,129 @@
+/* $Id: memscan.S,v 1.3 2000/01/31 04:59:10 davem Exp $
+ * memscan.S: Optimized memscan for Sparc64.
+ *
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 1998 David S. Miller (davem@redhat.com)
+ */
+
+#define HI_MAGIC	0x8080808080808080
+#define LO_MAGIC	0x0101010101010101
+#define ASI_PL		0x88
+
+	.text
+	.align	32
+	.globl		__memscan_zero, __memscan_generic
+	.globl		memscan
+
+__memscan_zero:
+	/* %o0 = bufp, %o1 = size */
+	brlez,pn	%o1, szzero
+	 andcc		%o0, 7, %g0
+	be,pt		%icc, we_are_aligned
+	 sethi		%hi(HI_MAGIC), %o4
+	ldub		[%o0], %o5
+1:	subcc		%o1, 1, %o1
+	brz,pn		%o5, 10f
+	 add		%o0, 1, %o0
+
+	be,pn		%xcc, szzero
+	 andcc		%o0, 7, %g0
+	bne,a,pn	%icc, 1b
+	 ldub		[%o0], %o5
+we_are_aligned:
+	ldxa		[%o0] ASI_PL, %o5
+	or		%o4, %lo(HI_MAGIC), %o3
+	sllx		%o3, 32, %o4
+	or		%o4, %o3, %o3
+
+	srlx		%o3, 7, %o2
+msloop:
+	sub		%o1, 8, %o1
+	add		%o0, 8, %o0
+	sub		%o5, %o2, %o4
+	xor		%o4, %o5, %o4
+	andcc		%o4, %o3, %g3
+	bne,pn		%xcc, check_bytes
+	 srlx		%o4, 32, %g3
+
+	brgz,a,pt	%o1, msloop
+	 ldxa		[%o0] ASI_PL, %o5
+check_bytes:
+	bne,a,pn	%icc, 2f
+	 andcc		%o5, 0xff, %g0
+	add		%o0, -5, %g2
+	ba,pt		%xcc, 3f
+	 srlx		%o5, 32, %g7
+
+2:	srlx		%o5, 8, %g7
+	be,pn		%icc, 1f
+	 add		%o0, -8, %g2
+	andcc		%g7, 0xff, %g0
+	srlx		%g7, 8, %g7
+	be,pn		%icc, 1f
+	 inc		%g2
+	andcc		%g7, 0xff, %g0
+
+	srlx		%g7, 8, %g7
+	be,pn		%icc, 1f
+	 inc		%g2
+	andcc		%g7, 0xff, %g0
+	srlx		%g7, 8, %g7
+	be,pn		%icc, 1f
+	 inc		%g2
+	andcc		%g3, %o3, %g0
+
+	be,a,pn		%icc, 2f
+	 mov		%o0, %g2
+3:	andcc		%g7, 0xff, %g0
+	srlx		%g7, 8, %g7
+	be,pn		%icc, 1f
+	 inc		%g2
+	andcc		%g7, 0xff, %g0
+	srlx		%g7, 8, %g7
+
+	be,pn		%icc, 1f
+	 inc		%g2
+	andcc		%g7, 0xff, %g0
+	srlx		%g7, 8, %g7
+	be,pn		%icc, 1f
+	 inc		%g2
+	andcc		%g7, 0xff, %g0
+	srlx		%g7, 8, %g7
+
+	be,pn		%icc, 1f
+	 inc		%g2
+2:	brgz,a,pt	%o1, msloop
+	 ldxa		[%o0] ASI_PL, %o5
+	inc		%g2
+1:	add		%o0, %o1, %o0
+	cmp		%g2, %o0
+	retl
+
+	 movle		%xcc, %g2, %o0
+10:	retl
+	 sub		%o0, 1, %o0
+szzero:	retl
+	 nop
+
+memscan:
+__memscan_generic:
+	/* %o0 = addr, %o1 = c, %o2 = size */
+	brz,pn		%o2, 3f
+	 add		%o0, %o2, %o3
+	ldub		[%o0], %o5
+	sub		%g0, %o2, %o4
+1:
+	cmp		%o5, %o1
+	be,pn		%icc, 2f
+	 addcc		%o4, 1, %o4
+	bne,a,pt 	%xcc, 1b
+	 ldub		[%o3 + %o4], %o5
+	retl
+	/* The delay slot is the same as the next insn, this is just to make it look more awful */
+2:
+	 add		%o3, %o4, %o0
+	retl
+	 sub		%o0, 1, %o0
+3:
+	retl
+	 nop
diff --git a/arch/sparc64/lib/rwsem.S b/arch/sparc64/lib/rwsem.S
new file mode 100644
index 0000000..174ff7b
--- /dev/null
+++ b/arch/sparc64/lib/rwsem.S
@@ -0,0 +1,165 @@
+/* rwsem.S: RW semaphore assembler.
+ *
+ * Written by David S. Miller (davem@redhat.com), 2001.
+ * Derived from asm-i386/rwsem.h
+ */
+
+#include <asm/rwsem-const.h>
+
+	.section	.sched.text
+
+	.globl		__down_read
+__down_read:
+1:	lduw		[%o0], %g1
+	add		%g1, 1, %g7
+	cas		[%o0], %g1, %g7
+	cmp		%g1, %g7
+	bne,pn		%icc, 1b
+	 add		%g7, 1, %g7
+	cmp		%g7, 0
+	bl,pn		%icc, 3f
+	 membar		#StoreLoad | #StoreStore
+2:
+	retl
+	 nop
+3:
+	save		%sp, -192, %sp
+	call		rwsem_down_read_failed
+	 mov		%i0, %o0
+	ret
+	 restore
+	.size		__down_read, .-__down_read
+
+	.globl		__down_read_trylock
+__down_read_trylock:
+1:	lduw		[%o0], %g1
+	add		%g1, 1, %g7
+	cmp		%g7, 0
+	bl,pn		%icc, 2f
+	 mov		0, %o1
+	cas		[%o0], %g1, %g7
+	cmp		%g1, %g7
+	bne,pn		%icc, 1b
+	 mov		1, %o1
+	membar		#StoreLoad | #StoreStore
+2:	retl
+	 mov		%o1, %o0
+	.size		__down_read_trylock, .-__down_read_trylock
+
+	.globl		__down_write
+__down_write:
+	sethi		%hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
+	or		%g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
+1:
+	lduw		[%o0], %g3
+	add		%g3, %g1, %g7
+	cas		[%o0], %g3, %g7
+	cmp		%g3, %g7
+	bne,pn		%icc, 1b
+	 cmp		%g7, 0
+	bne,pn		%icc, 3f
+	 membar		#StoreLoad | #StoreStore
+2:	retl
+	 nop
+3:
+	save		%sp, -192, %sp
+	call		rwsem_down_write_failed
+	 mov		%i0, %o0
+	ret
+	 restore
+	.size		__down_write, .-__down_write
+
+	.globl		__down_write_trylock
+__down_write_trylock:
+	sethi		%hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
+	or		%g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
+1:
+	lduw		[%o0], %g3
+	cmp		%g3, 0
+	bne,pn		%icc, 2f
+	 mov		0, %o1
+	add		%g3, %g1, %g7
+	cas		[%o0], %g3, %g7
+	cmp		%g3, %g7
+	bne,pn		%icc, 1b
+	 mov		1, %o1
+	membar		#StoreLoad | #StoreStore
+2:	retl
+	 mov		%o1, %o0
+	.size		__down_write_trylock, .-__down_write_trylock
+
+	.globl		__up_read
+__up_read:
+1:
+	lduw		[%o0], %g1
+	sub		%g1, 1, %g7
+	cas		[%o0], %g1, %g7
+	cmp		%g1, %g7
+	bne,pn		%icc, 1b
+	 cmp		%g7, 0
+	bl,pn		%icc, 3f
+	 membar		#StoreLoad | #StoreStore
+2:	retl
+	 nop
+3:	sethi		%hi(RWSEM_ACTIVE_MASK), %g1
+	sub		%g7, 1, %g7
+	or		%g1, %lo(RWSEM_ACTIVE_MASK), %g1
+	andcc		%g7, %g1, %g0
+	bne,pn		%icc, 2b
+	 nop
+	save		%sp, -192, %sp
+	call		rwsem_wake
+	 mov		%i0, %o0
+	ret
+	 restore
+	.size		__up_read, .-__up_read
+
+	.globl		__up_write
+__up_write:
+	sethi		%hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
+	or		%g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
+1:
+	lduw		[%o0], %g3
+	sub		%g3, %g1, %g7
+	cas		[%o0], %g3, %g7
+	cmp		%g3, %g7
+	bne,pn		%icc, 1b
+	 sub		%g7, %g1, %g7
+	cmp		%g7, 0
+	bl,pn		%icc, 3f
+	 membar		#StoreLoad | #StoreStore
+2:
+	retl
+	 nop
+3:
+	save		%sp, -192, %sp
+	call		rwsem_wake
+	 mov		%i0, %o0
+	ret
+	 restore
+	.size		__up_write, .-__up_write
+
+	.globl		__downgrade_write
+__downgrade_write:
+	sethi		%hi(RWSEM_WAITING_BIAS), %g1
+	or		%g1, %lo(RWSEM_WAITING_BIAS), %g1
+1:
+	lduw		[%o0], %g3
+	sub		%g3, %g1, %g7
+	cas		[%o0], %g3, %g7
+	cmp		%g3, %g7
+	bne,pn		%icc, 1b
+	 sub		%g7, %g1, %g7
+	cmp		%g7, 0
+	bl,pn		%icc, 3f
+	 membar		#StoreLoad | #StoreStore
+2:
+	retl
+	 nop
+3:
+	save		%sp, -192, %sp
+	call		rwsem_downgrade_wake
+	 mov		%i0, %o0
+	ret
+	 restore
+	.size		__downgrade_write, .-__downgrade_write
diff --git a/arch/sparc64/lib/strlen.S b/arch/sparc64/lib/strlen.S
new file mode 100644
index 0000000..e9ba192
--- /dev/null
+++ b/arch/sparc64/lib/strlen.S
@@ -0,0 +1,80 @@
+/* strlen.S: Sparc64 optimized strlen code
+ * Hand optimized from GNU libc's strlen
+ * Copyright (C) 1991,1996 Free Software Foundation
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#define LO_MAGIC 0x01010101
+#define HI_MAGIC 0x80808080
+
+	.align	32
+	.globl	strlen
+	.type	strlen,#function
+strlen:
+	mov	%o0, %o1
+	andcc	%o0, 3, %g0
+	be,pt	%icc, 9f
+	 sethi	%hi(HI_MAGIC), %o4
+	ldub	[%o0], %o5
+	brz,pn	%o5, 11f
+	 add	%o0, 1, %o0
+	andcc	%o0, 3, %g0
+	be,pn	%icc, 4f
+	 or	%o4, %lo(HI_MAGIC), %o3
+	ldub	[%o0], %o5
+	brz,pn	%o5, 12f
+	 add	%o0, 1, %o0
+	andcc	%o0, 3, %g0
+	be,pt	%icc, 5f
+	 sethi	%hi(LO_MAGIC), %o4
+	ldub	[%o0], %o5
+	brz,pn	%o5, 13f
+	 add	%o0, 1, %o0
+	ba,pt	%icc, 8f
+	 or	%o4, %lo(LO_MAGIC), %o2
+9:
+	or	%o4, %lo(HI_MAGIC), %o3
+4:
+	sethi	%hi(LO_MAGIC), %o4
+5:
+	or	%o4, %lo(LO_MAGIC), %o2
+8:
+	ld	[%o0], %o5
+2:
+	sub	%o5, %o2, %o4
+	andcc	%o4, %o3, %g0
+	be,pt	%icc, 8b
+	 add	%o0, 4, %o0
+
+	/* Check every byte. */
+	srl	%o5, 24, %g7
+	andcc	%g7, 0xff, %g0
+	be,pn	%icc, 1f
+	 add	%o0, -4, %o4
+	srl	%o5, 16, %g7
+	andcc	%g7, 0xff, %g0
+	be,pn	%icc, 1f
+	 add	%o4, 1, %o4
+	srl	%o5, 8, %g7
+	andcc	%g7, 0xff, %g0
+	be,pn	%icc, 1f
+	 add	%o4, 1, %o4
+	andcc	%o5, 0xff, %g0
+	bne,a,pt %icc, 2b
+	 ld	[%o0], %o5
+	add	%o4, 1, %o4
+1:
+	retl
+	 sub	%o4, %o1, %o0
+11:
+	retl
+	 mov	0, %o0
+12:
+	retl
+	 mov	1, %o0
+13:
+	retl
+	 mov	2, %o0
+
+	.size	strlen, .-strlen
diff --git a/arch/sparc64/lib/strlen_user.S b/arch/sparc64/lib/strlen_user.S
new file mode 100644
index 0000000..9ed54ba
--- /dev/null
+++ b/arch/sparc64/lib/strlen_user.S
@@ -0,0 +1,95 @@
+/* strlen_user.S: Sparc64 optimized strlen_user code
+ *
+ * Return length of string in userspace including terminating 0
+ * or 0 for error
+ *
+ * Copyright (C) 1991,1996 Free Software Foundation
+ * Copyright (C) 1996,1999 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <asm/asi.h>
+
+#define LO_MAGIC 0x01010101
+#define HI_MAGIC 0x80808080
+
+	.align 4
+	.global __strlen_user, __strnlen_user
+__strlen_user:
+	sethi	%hi(32768), %o1
+__strnlen_user:	
+	mov	%o1, %g1
+	mov	%o0, %o1
+	andcc	%o0, 3, %g0
+	be,pt	%icc, 9f
+	 sethi	%hi(HI_MAGIC), %o4
+10:	lduba	[%o0] %asi, %o5
+	brz,pn	%o5, 21f
+	 add	%o0, 1, %o0
+	andcc	%o0, 3, %g0
+	be,pn	%icc, 4f
+	 or	%o4, %lo(HI_MAGIC), %o3
+11:	lduba	[%o0] %asi, %o5
+	brz,pn	%o5, 22f
+	 add	%o0, 1, %o0
+	andcc	%o0, 3, %g0
+	be,pt	%icc, 13f
+	 srl	%o3, 7, %o2
+12:	lduba	[%o0] %asi, %o5
+	brz,pn	%o5, 23f
+	 add	%o0, 1, %o0
+	ba,pt	%icc, 2f
+15:	 lda	[%o0] %asi, %o5
+9:	or	%o4, %lo(HI_MAGIC), %o3
+4:	srl	%o3, 7, %o2
+13:	lda	[%o0] %asi, %o5
+2:	sub	%o5, %o2, %o4
+	andcc	%o4, %o3, %g0
+	bne,pn	%icc, 82f
+	 add	%o0, 4, %o0
+	sub	%o0, %o1, %g2
+81:	cmp	%g2, %g1
+	blu,pt	%icc, 13b
+	 mov	%o0, %o4
+	ba,a,pt	%xcc, 1f
+
+	/* Check every byte. */
+82:	srl	%o5, 24, %g7
+	andcc	%g7, 0xff, %g0
+	be,pn	%icc, 1f
+	 add	%o0, -3, %o4
+	srl	%o5, 16, %g7
+	andcc	%g7, 0xff, %g0
+	be,pn	%icc, 1f
+	 add	%o4, 1, %o4
+	srl	%o5, 8, %g7
+	andcc	%g7, 0xff, %g0
+	be,pn	%icc, 1f
+	 add	%o4, 1, %o4
+	andcc	%o5, 0xff, %g0
+	bne,pt	%icc, 81b
+	 sub	%o0, %o1, %g2
+	add	%o4, 1, %o4
+1:	retl
+	 sub	%o4, %o1, %o0
+21:	retl
+	 mov	1, %o0
+22:	retl
+	 mov	2, %o0
+23:	retl
+	 mov	3, %o0
+
+        .section .fixup,#alloc,#execinstr
+        .align  4
+30:
+        retl
+         clr    %o0
+
+	.section __ex_table,#alloc
+	.align	4
+
+	.word	10b, 30b
+	.word	11b, 30b
+	.word	12b, 30b
+	.word	15b, 30b
+	.word	13b, 30b
diff --git a/arch/sparc64/lib/strncmp.S b/arch/sparc64/lib/strncmp.S
new file mode 100644
index 0000000..6f14f53
--- /dev/null
+++ b/arch/sparc64/lib/strncmp.S
@@ -0,0 +1,32 @@
+/* $Id: strncmp.S,v 1.2 1997/03/11 17:51:44 jj Exp $
+ * Sparc64 optimized strncmp code.
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <asm/asi.h>
+
+	.text
+	.align	32
+	.globl	strncmp
+	.type	strncmp,#function
+strncmp:
+	brlez,pn %o2, 3f
+	 lduba	[%o0] (ASI_PNF), %o3
+1:
+	add	%o0, 1, %o0
+	ldub	[%o1], %o4
+	brz,pn	%o3, 2f
+	 add	%o1, 1, %o1
+	cmp	%o3, %o4
+	bne,pn	%icc, 2f
+	 subcc	%o2, 1, %o2
+	bne,a,pt %xcc, 1b
+	 ldub	[%o0], %o3
+2:
+	retl
+	 sub	%o3, %o4, %o0
+3:
+	retl
+	 clr	%o0
+	.size	strncmp, .-strncmp
diff --git a/arch/sparc64/lib/strncpy_from_user.S b/arch/sparc64/lib/strncpy_from_user.S
new file mode 100644
index 0000000..09cbbaa
--- /dev/null
+++ b/arch/sparc64/lib/strncpy_from_user.S
@@ -0,0 +1,139 @@
+/* $Id: strncpy_from_user.S,v 1.6 1999/05/25 16:53:05 jj Exp $
+ * strncpy_from_user.S: Sparc64 strncpy from userspace.
+ *
+ *  Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#include <asm/asi.h>
+#include <asm/errno.h>
+
+	.data
+	.align	8
+0:	.xword	0x0101010101010101
+
+	.text
+	.align	32
+
+	/* Must return:
+	 *
+	 * -EFAULT		for an exception
+	 * count		if we hit the buffer limit
+	 * bytes copied		if we hit a null byte
+	 * (without the null byte)
+	 *
+	 * This implementation assumes:
+	 * %o1 is 8 aligned => !(%o2 & 7)
+	 * %o0 is 8 aligned (if not, it will be slooooow, but will work)
+	 *
+	 * This is optimized for the common case:
+	 * in my stats, 90% of src are 8 aligned (even on sparc32)
+	 * and average length is 18 or so.
+	 */
+
+	.globl	__strncpy_from_user
+	.type	__strncpy_from_user,#function
+__strncpy_from_user:
+	/* %o0=dest, %o1=src, %o2=count */
+	andcc	%o1, 7, %g0		! IEU1	Group
+	bne,pn	%icc, 30f		! CTI
+	 add	%o0, %o2, %g3		! IEU0
+60:	ldxa	[%o1] %asi, %g1		! Load	Group
+	brlez,pn %o2, 10f		! CTI
+	 mov	%o0, %o3		! IEU0
+50:	sethi	%hi(0b), %o4		! IEU0	Group
+	ldx	[%o4 + %lo(0b)], %o4	! Load
+	sllx	%o4, 7, %o5		! IEU1	Group
+1:	sub	%g1, %o4, %g2		! IEU0	Group
+	stx	%g1, [%o0]		! Store
+	add	%o0, 8, %o0		! IEU1
+	andcc	%g2, %o5, %g0		! IEU1	Group
+	bne,pn	%xcc, 5f		! CTI
+	 add	%o1, 8, %o1		! IEU0
+	cmp	%o0, %g3		! IEU1	Group
+	bl,a,pt %xcc, 1b		! CTI
+61:	 ldxa	[%o1] %asi, %g1		! Load
+10:	retl				! CTI	Group
+	 mov	%o2, %o0		! IEU0
+5:	srlx	%g2, 32, %g7		! IEU0	Group
+	sethi	%hi(0xff00), %o4	! IEU1
+	andcc	%g7, %o5, %g0		! IEU1	Group
+	be,pn	%icc, 2f		! CTI
+	 or	%o4, %lo(0xff00), %o4	! IEU0
+	srlx	%g1, 48, %g7		! IEU0	Group
+	andcc	%g7, %o4, %g0		! IEU1	Group
+	be,pn	%icc, 50f		! CTI
+	 andcc	%g7, 0xff, %g0		! IEU1	Group
+	be,pn	%icc, 51f		! CTI
+	 srlx	%g1, 32, %g7		! IEU0
+	andcc	%g7, %o4, %g0		! IEU1	Group
+	be,pn	%icc, 52f		! CTI
+	 andcc	%g7, 0xff, %g0		! IEU1	Group
+	be,pn	%icc, 53f		! CTI
+2:	 andcc	%g2, %o5, %g0		! IEU1	Group
+	be,pn	%icc, 2f		! CTI
+	 srl	%g1, 16, %g7		! IEU0
+	andcc	%g7, %o4, %g0		! IEU1	Group
+	be,pn	%icc, 54f		! CTI
+	 andcc	%g7, 0xff, %g0		! IEU1	Group
+	be,pn	%icc, 55f		! CTI
+	 andcc	%g1, %o4, %g0		! IEU1	Group
+	be,pn	%icc, 56f		! CTI
+	 andcc	%g1, 0xff, %g0		! IEU1	Group
+	be,a,pn	%icc, 57f		! CTI
+	 sub	%o0, %o3, %o0		! IEU0
+2:	cmp	%o0, %g3		! IEU1	Group
+	bl,a,pt	%xcc, 50b		! CTI
+62:	 ldxa	[%o1] %asi, %g1		! Load
+	retl				! CTI	Group
+	 mov	%o2, %o0		! IEU0
+50:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 8, %o0
+51:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 7, %o0
+52:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 6, %o0
+53:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 5, %o0
+54:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 4, %o0
+55:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 3, %o0
+56:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 2, %o0
+57:	retl
+	 sub	%o0, 1, %o0
+30:	brlez,pn %o2, 3f
+	 sub	%g0, %o2, %o3
+	add	%o0, %o2, %o0
+63:	lduba	[%o1] %asi, %o4
+1:	add	%o1, 1, %o1
+	brz,pn	%o4, 2f
+	 stb	%o4, [%o0 + %o3]
+	addcc	%o3, 1, %o3
+	bne,pt	%xcc, 1b
+64:	 lduba	[%o1] %asi, %o4
+3:	retl
+	 mov	%o2, %o0
+2:	retl
+	 add	%o2, %o3, %o0
+	.size	__strncpy_from_user, .-__strncpy_from_user
+
+	.section .fixup,#alloc,#execinstr
+	.align	4
+4:	retl
+	 mov	-EFAULT, %o0
+
+	.section __ex_table,#alloc
+	.align	4
+	.word	60b, 4b
+	.word	61b, 4b
+	.word	62b, 4b
+	.word	63b, 4b
+	.word	64b, 4b
diff --git a/arch/sparc64/lib/user_fixup.c b/arch/sparc64/lib/user_fixup.c
new file mode 100644
index 0000000..0278e34
--- /dev/null
+++ b/arch/sparc64/lib/user_fixup.c
@@ -0,0 +1,71 @@
+/* user_fixup.c: Fix up user copy faults.
+ *
+ * Copyright (C) 2004 David S. Miller <davem@redhat.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+
+/* Calculating the exact fault address when using
+ * block loads and stores can be very complicated.
+ * Instead of trying to be clever and handling all
+ * of the cases, just fix things up simply here.
+ */
+
+unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
+{
+	char *dst = to;
+	const char __user *src = from;
+
+	while (size) {
+		if (__get_user(*dst, src))
+			break;
+		dst++;
+		src++;
+		size--;
+	}
+
+	if (size)
+		memset(dst, 0, size);
+
+	return size;
+}
+
+unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
+{
+	char __user *dst = to;
+	const char *src = from;
+
+	while (size) {
+		if (__put_user(*src, dst))
+			break;
+		dst++;
+		src++;
+		size--;
+	}
+
+	return size;
+}
+
+unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
+{
+	char __user *dst = to;
+	char __user *src = from;
+
+	while (size) {
+		char tmp;
+
+		if (__get_user(tmp, src))
+			break;
+		if (__put_user(tmp, dst))
+			break;
+		dst++;
+		src++;
+		size--;
+	}
+
+	return size;
+}
diff --git a/arch/sparc64/lib/xor.S b/arch/sparc64/lib/xor.S
new file mode 100644
index 0000000..4cd5d2b
--- /dev/null
+++ b/arch/sparc64/lib/xor.S
@@ -0,0 +1,354 @@
+/*
+ * arch/sparc64/lib/xor.S
+ *
+ * High speed xor_block operation for RAID4/5 utilizing the
+ * UltraSparc Visual Instruction Set.
+ *
+ * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#include <asm/visasm.h>
+#include <asm/asi.h>
+#include <asm/dcu.h>
+#include <asm/spitfire.h>
+
+/*
+ *	Requirements:
+ *	!(((long)dest | (long)sourceN) & (64 - 1)) &&
+ *	!(len & 127) && len >= 256
+ */
+	.text
+	.align	32
+	.globl	xor_vis_2
+	.type	xor_vis_2,#function
+xor_vis_2:
+	rd	%fprs, %o5
+	andcc	%o5, FPRS_FEF|FPRS_DU, %g0
+	be,pt	%icc, 0f
+	 sethi	%hi(VISenter), %g1
+	jmpl	%g1 + %lo(VISenter), %g7
+	 add	%g7, 8, %g7
+0:	wr	%g0, FPRS_FEF, %fprs
+	rd	%asi, %g1
+	wr	%g0, ASI_BLK_P, %asi
+	membar	#LoadStore|#StoreLoad|#StoreStore
+	sub	%o0, 128, %o0
+	ldda	[%o1] %asi, %f0
+	ldda	[%o2] %asi, %f16
+
+2:	ldda	[%o1 + 64] %asi, %f32
+	fxor	%f0, %f16, %f16
+	fxor	%f2, %f18, %f18
+	fxor	%f4, %f20, %f20
+	fxor	%f6, %f22, %f22
+	fxor	%f8, %f24, %f24
+	fxor	%f10, %f26, %f26
+	fxor	%f12, %f28, %f28
+	fxor	%f14, %f30, %f30
+	stda	%f16, [%o1] %asi
+	ldda	[%o2 + 64] %asi, %f48
+	ldda	[%o1 + 128] %asi, %f0
+	fxor	%f32, %f48, %f48
+	fxor	%f34, %f50, %f50
+	add	%o1, 128, %o1
+	fxor	%f36, %f52, %f52
+	add	%o2, 128, %o2
+	fxor	%f38, %f54, %f54
+	subcc	%o0, 128, %o0
+	fxor	%f40, %f56, %f56
+	fxor	%f42, %f58, %f58
+	fxor	%f44, %f60, %f60
+	fxor	%f46, %f62, %f62
+	stda	%f48, [%o1 - 64] %asi
+	bne,pt	%xcc, 2b
+	 ldda	[%o2] %asi, %f16
+
+	ldda	[%o1 + 64] %asi, %f32
+	fxor	%f0, %f16, %f16
+	fxor	%f2, %f18, %f18
+	fxor	%f4, %f20, %f20
+	fxor	%f6, %f22, %f22
+	fxor	%f8, %f24, %f24
+	fxor	%f10, %f26, %f26
+	fxor	%f12, %f28, %f28
+	fxor	%f14, %f30, %f30
+	stda	%f16, [%o1] %asi
+	ldda	[%o2 + 64] %asi, %f48
+	membar	#Sync
+	fxor	%f32, %f48, %f48
+	fxor	%f34, %f50, %f50
+	fxor	%f36, %f52, %f52
+	fxor	%f38, %f54, %f54
+	fxor	%f40, %f56, %f56
+	fxor	%f42, %f58, %f58
+	fxor	%f44, %f60, %f60
+	fxor	%f46, %f62, %f62
+	stda	%f48, [%o1 + 64] %asi
+	membar	#Sync|#StoreStore|#StoreLoad
+	wr	%g1, %g0, %asi
+	retl
+	  wr	%g0, 0, %fprs
+	.size	xor_vis_2, .-xor_vis_2
+
+	.globl	xor_vis_3
+	.type	xor_vis_3,#function
+xor_vis_3:
+	rd	%fprs, %o5
+	andcc	%o5, FPRS_FEF|FPRS_DU, %g0
+	be,pt	%icc, 0f
+	 sethi	%hi(VISenter), %g1
+	jmpl	%g1 + %lo(VISenter), %g7
+	 add	%g7, 8, %g7
+0:	wr	%g0, FPRS_FEF, %fprs
+	rd	%asi, %g1
+	wr	%g0, ASI_BLK_P, %asi
+	membar	#LoadStore|#StoreLoad|#StoreStore
+	sub	%o0, 64, %o0
+	ldda	[%o1] %asi, %f0
+	ldda	[%o2] %asi, %f16
+
+3:	ldda	[%o3] %asi, %f32
+	fxor	%f0, %f16, %f48
+	fxor	%f2, %f18, %f50
+	add	%o1, 64, %o1
+	fxor	%f4, %f20, %f52
+	fxor	%f6, %f22, %f54
+	add	%o2, 64, %o2
+	fxor	%f8, %f24, %f56
+	fxor	%f10, %f26, %f58
+	fxor	%f12, %f28, %f60
+	fxor	%f14, %f30, %f62
+	ldda	[%o1] %asi, %f0
+	fxor	%f48, %f32, %f48
+	fxor	%f50, %f34, %f50
+	fxor	%f52, %f36, %f52
+	fxor	%f54, %f38, %f54
+	add	%o3, 64, %o3
+	fxor	%f56, %f40, %f56
+	fxor	%f58, %f42, %f58
+	subcc	%o0, 64, %o0
+	fxor	%f60, %f44, %f60
+	fxor	%f62, %f46, %f62
+	stda	%f48, [%o1 - 64] %asi
+	bne,pt	%xcc, 3b
+	 ldda	[%o2] %asi, %f16
+
+	ldda	[%o3] %asi, %f32
+	fxor	%f0, %f16, %f48
+	fxor	%f2, %f18, %f50
+	fxor	%f4, %f20, %f52
+	fxor	%f6, %f22, %f54
+	fxor	%f8, %f24, %f56
+	fxor	%f10, %f26, %f58
+	fxor	%f12, %f28, %f60
+	fxor	%f14, %f30, %f62
+	membar	#Sync
+	fxor	%f48, %f32, %f48
+	fxor	%f50, %f34, %f50
+	fxor	%f52, %f36, %f52
+	fxor	%f54, %f38, %f54
+	fxor	%f56, %f40, %f56
+	fxor	%f58, %f42, %f58
+	fxor	%f60, %f44, %f60
+	fxor	%f62, %f46, %f62
+	stda	%f48, [%o1] %asi
+	membar	#Sync|#StoreStore|#StoreLoad
+	wr	%g1, %g0, %asi
+	retl
+	 wr	%g0, 0, %fprs
+	.size	xor_vis_3, .-xor_vis_3
+
+	.globl	xor_vis_4
+	.type	xor_vis_4,#function
+xor_vis_4:
+	rd	%fprs, %o5
+	andcc	%o5, FPRS_FEF|FPRS_DU, %g0
+	be,pt	%icc, 0f
+	 sethi	%hi(VISenter), %g1
+	jmpl	%g1 + %lo(VISenter), %g7
+	 add	%g7, 8, %g7
+0:	wr	%g0, FPRS_FEF, %fprs
+	rd	%asi, %g1
+	wr	%g0, ASI_BLK_P, %asi
+	membar	#LoadStore|#StoreLoad|#StoreStore
+	sub	%o0, 64, %o0
+	ldda	[%o1] %asi, %f0
+	ldda	[%o2] %asi, %f16
+
+4:	ldda	[%o3] %asi, %f32
+	fxor	%f0, %f16, %f16
+	fxor	%f2, %f18, %f18
+	add	%o1, 64, %o1
+	fxor	%f4, %f20, %f20
+	fxor	%f6, %f22, %f22
+	add	%o2, 64, %o2
+	fxor	%f8, %f24, %f24
+	fxor	%f10, %f26, %f26
+	fxor	%f12, %f28, %f28
+	fxor	%f14, %f30, %f30
+	ldda	[%o4] %asi, %f48
+	fxor	%f16, %f32, %f32
+	fxor	%f18, %f34, %f34
+	fxor	%f20, %f36, %f36
+	fxor	%f22, %f38, %f38
+	add	%o3, 64, %o3
+	fxor	%f24, %f40, %f40
+	fxor	%f26, %f42, %f42
+	fxor	%f28, %f44, %f44
+	fxor	%f30, %f46, %f46
+	ldda	[%o1] %asi, %f0
+	fxor	%f32, %f48, %f48
+	fxor	%f34, %f50, %f50
+	fxor	%f36, %f52, %f52
+	add	%o4, 64, %o4
+	fxor	%f38, %f54, %f54
+	fxor	%f40, %f56, %f56
+	fxor	%f42, %f58, %f58
+	subcc	%o0, 64, %o0
+	fxor	%f44, %f60, %f60
+	fxor	%f46, %f62, %f62
+	stda	%f48, [%o1 - 64] %asi
+	bne,pt	%xcc, 4b
+	 ldda	[%o2] %asi, %f16
+
+	ldda	[%o3] %asi, %f32
+	fxor	%f0, %f16, %f16
+	fxor	%f2, %f18, %f18
+	fxor	%f4, %f20, %f20
+	fxor	%f6, %f22, %f22
+	fxor	%f8, %f24, %f24
+	fxor	%f10, %f26, %f26
+	fxor	%f12, %f28, %f28
+	fxor	%f14, %f30, %f30
+	ldda	[%o4] %asi, %f48
+	fxor	%f16, %f32, %f32
+	fxor	%f18, %f34, %f34
+	fxor	%f20, %f36, %f36
+	fxor	%f22, %f38, %f38
+	fxor	%f24, %f40, %f40
+	fxor	%f26, %f42, %f42
+	fxor	%f28, %f44, %f44
+	fxor	%f30, %f46, %f46
+	membar	#Sync
+	fxor	%f32, %f48, %f48
+	fxor	%f34, %f50, %f50
+	fxor	%f36, %f52, %f52
+	fxor	%f38, %f54, %f54
+	fxor	%f40, %f56, %f56
+	fxor	%f42, %f58, %f58
+	fxor	%f44, %f60, %f60
+	fxor	%f46, %f62, %f62
+	stda	%f48, [%o1] %asi
+	membar	#Sync|#StoreStore|#StoreLoad
+	wr	%g1, %g0, %asi
+	retl
+	 wr	%g0, 0, %fprs
+	.size	xor_vis_4, .-xor_vis_4
+
+	.globl	xor_vis_5
+	.type	xor_vis_5,#function
+xor_vis_5:
+	save	%sp, -192, %sp
+	rd	%fprs, %o5
+	andcc	%o5, FPRS_FEF|FPRS_DU, %g0
+	be,pt	%icc, 0f
+	 sethi	%hi(VISenter), %g1
+	jmpl	%g1 + %lo(VISenter), %g7
+	 add	%g7, 8, %g7
+0:	wr	%g0, FPRS_FEF, %fprs
+	rd	%asi, %g1
+	wr	%g0, ASI_BLK_P, %asi
+	membar	#LoadStore|#StoreLoad|#StoreStore
+	sub	%i0, 64, %i0
+	ldda	[%i1] %asi, %f0
+	ldda	[%i2] %asi, %f16
+
+5:	ldda	[%i3] %asi, %f32
+	fxor	%f0, %f16, %f48
+	fxor	%f2, %f18, %f50
+	add	%i1, 64, %i1
+	fxor	%f4, %f20, %f52
+	fxor	%f6, %f22, %f54
+	add	%i2, 64, %i2
+	fxor	%f8, %f24, %f56
+	fxor	%f10, %f26, %f58
+	fxor	%f12, %f28, %f60
+	fxor	%f14, %f30, %f62
+	ldda	[%i4] %asi, %f16
+	fxor	%f48, %f32, %f48
+	fxor	%f50, %f34, %f50
+	fxor	%f52, %f36, %f52
+	fxor	%f54, %f38, %f54
+	add	%i3, 64, %i3
+	fxor	%f56, %f40, %f56
+	fxor	%f58, %f42, %f58
+	fxor	%f60, %f44, %f60
+	fxor	%f62, %f46, %f62
+	ldda	[%i5] %asi, %f32
+	fxor	%f48, %f16, %f48
+	fxor	%f50, %f18, %f50
+	add	%i4, 64, %i4
+	fxor	%f52, %f20, %f52
+	fxor	%f54, %f22, %f54
+	add	%i5, 64, %i5
+	fxor	%f56, %f24, %f56
+	fxor	%f58, %f26, %f58
+	fxor	%f60, %f28, %f60
+	fxor	%f62, %f30, %f62
+	ldda	[%i1] %asi, %f0
+	fxor	%f48, %f32, %f48
+	fxor	%f50, %f34, %f50
+	fxor	%f52, %f36, %f52
+	fxor	%f54, %f38, %f54
+	fxor	%f56, %f40, %f56
+	fxor	%f58, %f42, %f58
+	subcc	%i0, 64, %i0
+	fxor	%f60, %f44, %f60
+	fxor	%f62, %f46, %f62
+	stda	%f48, [%i1 - 64] %asi
+	bne,pt	%xcc, 5b
+	 ldda	[%i2] %asi, %f16
+
+	ldda	[%i3] %asi, %f32
+	fxor	%f0, %f16, %f48
+	fxor	%f2, %f18, %f50
+	fxor	%f4, %f20, %f52
+	fxor	%f6, %f22, %f54
+	fxor	%f8, %f24, %f56
+	fxor	%f10, %f26, %f58
+	fxor	%f12, %f28, %f60
+	fxor	%f14, %f30, %f62
+	ldda	[%i4] %asi, %f16
+	fxor	%f48, %f32, %f48
+	fxor	%f50, %f34, %f50
+	fxor	%f52, %f36, %f52
+	fxor	%f54, %f38, %f54
+	fxor	%f56, %f40, %f56
+	fxor	%f58, %f42, %f58
+	fxor	%f60, %f44, %f60
+	fxor	%f62, %f46, %f62
+	ldda	[%i5] %asi, %f32
+	fxor	%f48, %f16, %f48
+	fxor	%f50, %f18, %f50
+	fxor	%f52, %f20, %f52
+	fxor	%f54, %f22, %f54
+	fxor	%f56, %f24, %f56
+	fxor	%f58, %f26, %f58
+	fxor	%f60, %f28, %f60
+	fxor	%f62, %f30, %f62
+	membar	#Sync
+	fxor	%f48, %f32, %f48
+	fxor	%f50, %f34, %f50
+	fxor	%f52, %f36, %f52
+	fxor	%f54, %f38, %f54
+	fxor	%f56, %f40, %f56
+	fxor	%f58, %f42, %f58
+	fxor	%f60, %f44, %f60
+	fxor	%f62, %f46, %f62
+	stda	%f48, [%i1] %asi
+	membar	#Sync|#StoreStore|#StoreLoad
+	wr	%g1, %g0, %asi
+	wr	%g0, 0, %fprs
+	ret
+	 restore
+	.size	xor_vis_5, .-xor_vis_5
diff --git a/arch/sparc64/math-emu/Makefile b/arch/sparc64/math-emu/Makefile
new file mode 100644
index 0000000..a0b06fd
--- /dev/null
+++ b/arch/sparc64/math-emu/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the FPU instruction emulation.
+#
+
+obj-y    := math.o
+
+EXTRA_CFLAGS = -I. -Iinclude/math-emu -w
diff --git a/arch/sparc64/math-emu/math.c b/arch/sparc64/math-emu/math.c
new file mode 100644
index 0000000..2ae05cd
--- /dev/null
+++ b/arch/sparc64/math-emu/math.c
@@ -0,0 +1,493 @@
+/* $Id: math.c,v 1.11 1999/12/20 05:02:25 davem Exp $
+ * arch/sparc64/math-emu/math.c
+ *
+ * Copyright (C) 1997,1999 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ *
+ * Emulation routines originate from soft-fp package, which is part
+ * of glibc and has appropriate copyrights in it.
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+
+#include <asm/fpumacro.h>
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+
+#include "sfp-util.h"
+#include <math-emu/soft-fp.h>
+#include <math-emu/single.h>
+#include <math-emu/double.h>
+#include <math-emu/quad.h>
+
+/* QUAD - ftt == 3 */
+#define FMOVQ	0x003
+#define FNEGQ	0x007
+#define FABSQ	0x00b
+#define FSQRTQ	0x02b
+#define FADDQ	0x043
+#define FSUBQ	0x047
+#define FMULQ	0x04b
+#define FDIVQ	0x04f
+#define FDMULQ	0x06e
+#define FQTOX	0x083
+#define FXTOQ	0x08c
+#define FQTOS	0x0c7
+#define FQTOD	0x0cb
+#define FITOQ	0x0cc
+#define FSTOQ	0x0cd
+#define FDTOQ	0x0ce
+#define FQTOI	0x0d3
+/* SUBNORMAL - ftt == 2 */
+#define FSQRTS	0x029
+#define FSQRTD	0x02a
+#define FADDS	0x041
+#define FADDD	0x042
+#define FSUBS	0x045
+#define FSUBD	0x046
+#define FMULS	0x049
+#define FMULD	0x04a
+#define FDIVS	0x04d
+#define FDIVD	0x04e
+#define FSMULD	0x069
+#define FSTOX	0x081
+#define FDTOX	0x082
+#define FDTOS	0x0c6
+#define FSTOD	0x0c9
+#define FSTOI	0x0d1
+#define FDTOI	0x0d2
+#define FXTOS	0x084 /* Only Ultra-III generates this. */
+#define FXTOD	0x088 /* Only Ultra-III generates this. */
+#if 0	/* Optimized inline in sparc64/kernel/entry.S */
+#define FITOS	0x0c4 /* Only Ultra-III generates this. */
+#endif
+#define FITOD	0x0c8 /* Only Ultra-III generates this. */
+/* FPOP2 */
+#define FCMPQ	0x053
+#define FCMPEQ	0x057
+#define FMOVQ0	0x003
+#define FMOVQ1	0x043
+#define FMOVQ2	0x083
+#define FMOVQ3	0x0c3
+#define FMOVQI	0x103
+#define FMOVQX	0x183
+#define FMOVQZ	0x027
+#define FMOVQLE	0x047
+#define FMOVQLZ 0x067
+#define FMOVQNZ	0x0a7
+#define FMOVQGZ	0x0c7
+#define FMOVQGE 0x0e7
+
+#define FSR_TEM_SHIFT	23UL
+#define FSR_TEM_MASK	(0x1fUL << FSR_TEM_SHIFT)
+#define FSR_AEXC_SHIFT	5UL
+#define FSR_AEXC_MASK	(0x1fUL << FSR_AEXC_SHIFT)
+#define FSR_CEXC_SHIFT	0UL
+#define FSR_CEXC_MASK	(0x1fUL << FSR_CEXC_SHIFT)
+
+/* All routines returning an exception to raise should detect
+ * such exceptions _before_ rounding to be consistent with
+ * the behavior of the hardware in the implemented cases
+ * (and thus with the recommendations in the V9 architecture
+ * manual).
+ *
+ * We return 0 if a SIGFPE should be sent, 1 otherwise.
+ */
+static inline int record_exception(struct pt_regs *regs, int eflag)
+{
+	u64 fsr = current_thread_info()->xfsr[0];
+	int would_trap;
+
+	/* Determine if this exception would have generated a trap. */
+	would_trap = (fsr & ((long)eflag << FSR_TEM_SHIFT)) != 0UL;
+
+	/* If trapping, we only want to signal one bit. */
+	if(would_trap != 0) {
+		eflag &= ((fsr & FSR_TEM_MASK) >> FSR_TEM_SHIFT);
+		if((eflag & (eflag - 1)) != 0) {
+			if(eflag & FP_EX_INVALID)
+				eflag = FP_EX_INVALID;
+			else if(eflag & FP_EX_OVERFLOW)
+				eflag = FP_EX_OVERFLOW;
+			else if(eflag & FP_EX_UNDERFLOW)
+				eflag = FP_EX_UNDERFLOW;
+			else if(eflag & FP_EX_DIVZERO)
+				eflag = FP_EX_DIVZERO;
+			else if(eflag & FP_EX_INEXACT)
+				eflag = FP_EX_INEXACT;
+		}
+	}
+
+	/* Set CEXC, here is the rule:
+	 *
+	 *    In general all FPU ops will set one and only one
+	 *    bit in the CEXC field, this is always the case
+	 *    when the IEEE exception trap is enabled in TEM.
+	 */
+	fsr &= ~(FSR_CEXC_MASK);
+	fsr |= ((long)eflag << FSR_CEXC_SHIFT);
+
+	/* Set the AEXC field, rule is:
+	 *
+	 *    If a trap would not be generated, the
+	 *    CEXC just generated is OR'd into the
+	 *    existing value of AEXC.
+	 */
+	if(would_trap == 0)
+		fsr |= ((long)eflag << FSR_AEXC_SHIFT);
+
+	/* If trapping, indicate fault trap type IEEE. */
+	if(would_trap != 0)
+		fsr |= (1UL << 14);
+
+	current_thread_info()->xfsr[0] = fsr;
+
+	/* If we will not trap, advance the program counter over
+	 * the instruction being handled.
+	 */
+	if(would_trap == 0) {
+		regs->tpc = regs->tnpc;
+		regs->tnpc += 4;
+	}
+
+	return (would_trap ? 0 : 1);
+}
+
+typedef union {
+	u32 s;
+	u64 d;
+	u64 q[2];
+} *argp;
+
+int do_mathemu(struct pt_regs *regs, struct fpustate *f)
+{
+	unsigned long pc = regs->tpc;
+	unsigned long tstate = regs->tstate;
+	u32 insn = 0;
+	int type = 0;
+	/* ftt tells which ftt it may happen in, r is rd, b is rs2 and a is rs1. The *u arg tells
+	   whether the argument should be packed/unpacked (0 - do not unpack/pack, 1 - unpack/pack)
+	   non-u args tells the size of the argument (0 - no argument, 1 - single, 2 - double, 3 - quad */
+#define TYPE(ftt, r, ru, b, bu, a, au) type = (au << 2) | (a << 0) | (bu << 5) | (b << 3) | (ru << 8) | (r << 6) | (ftt << 9)
+	int freg;
+	static u64 zero[2] = { 0L, 0L };
+	int flags;
+	FP_DECL_EX;
+	FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
+	FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
+	FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
+	int IR;
+	long XR, xfsr;
+
+	if (tstate & TSTATE_PRIV)
+		die_if_kernel("unfinished/unimplemented FPop from kernel", regs);
+	if (test_thread_flag(TIF_32BIT))
+		pc = (u32)pc;
+	if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
+		if ((insn & 0xc1f80000) == 0x81a00000) /* FPOP1 */ {
+			switch ((insn >> 5) & 0x1ff) {
+			/* QUAD - ftt == 3 */
+			case FMOVQ:
+			case FNEGQ:
+			case FABSQ: TYPE(3,3,0,3,0,0,0); break;
+			case FSQRTQ: TYPE(3,3,1,3,1,0,0); break;
+			case FADDQ:
+			case FSUBQ:
+			case FMULQ:
+			case FDIVQ: TYPE(3,3,1,3,1,3,1); break;
+			case FDMULQ: TYPE(3,3,1,2,1,2,1); break;
+			case FQTOX: TYPE(3,2,0,3,1,0,0); break;
+			case FXTOQ: TYPE(3,3,1,2,0,0,0); break;
+			case FQTOS: TYPE(3,1,1,3,1,0,0); break;
+			case FQTOD: TYPE(3,2,1,3,1,0,0); break;
+			case FITOQ: TYPE(3,3,1,1,0,0,0); break;
+			case FSTOQ: TYPE(3,3,1,1,1,0,0); break;
+			case FDTOQ: TYPE(3,3,1,2,1,0,0); break;
+			case FQTOI: TYPE(3,1,0,3,1,0,0); break;
+			/* SUBNORMAL - ftt == 2 */
+			case FSQRTS: TYPE(2,1,1,1,1,0,0); break;
+			case FSQRTD: TYPE(2,2,1,2,1,0,0); break;
+			case FADDD:
+			case FSUBD:
+			case FMULD:
+			case FDIVD: TYPE(2,2,1,2,1,2,1); break;
+			case FADDS:
+			case FSUBS:
+			case FMULS:
+			case FDIVS: TYPE(2,1,1,1,1,1,1); break;
+			case FSMULD: TYPE(2,2,1,1,1,1,1); break;
+			case FSTOX: TYPE(2,2,0,1,1,0,0); break;
+			case FDTOX: TYPE(2,2,0,2,1,0,0); break;
+			case FDTOS: TYPE(2,1,1,2,1,0,0); break;
+			case FSTOD: TYPE(2,2,1,1,1,0,0); break;
+			case FSTOI: TYPE(2,1,0,1,1,0,0); break;
+			case FDTOI: TYPE(2,1,0,2,1,0,0); break;
+
+			/* Only Ultra-III generates these */
+			case FXTOS: TYPE(2,1,1,2,0,0,0); break;
+			case FXTOD: TYPE(2,2,1,2,0,0,0); break;
+#if 0			/* Optimized inline in sparc64/kernel/entry.S */
+			case FITOS: TYPE(2,1,1,1,0,0,0); break;
+#endif
+			case FITOD: TYPE(2,2,1,1,0,0,0); break;
+			}
+		}
+		else if ((insn & 0xc1f80000) == 0x81a80000) /* FPOP2 */ {
+			IR = 2;
+			switch ((insn >> 5) & 0x1ff) {
+			case FCMPQ: TYPE(3,0,0,3,1,3,1); break;
+			case FCMPEQ: TYPE(3,0,0,3,1,3,1); break;
+			/* Now the conditional fmovq support */
+			case FMOVQ0:
+			case FMOVQ1:
+			case FMOVQ2:
+			case FMOVQ3:
+				/* fmovq %fccX, %fY, %fZ */
+				if (!((insn >> 11) & 3))
+					XR = current_thread_info()->xfsr[0] >> 10;
+				else
+					XR = current_thread_info()->xfsr[0] >> (30 + ((insn >> 10) & 0x6));
+				XR &= 3;
+				IR = 0;
+				switch ((insn >> 14) & 0x7) {
+				/* case 0: IR = 0; break; */			/* Never */
+				case 1: if (XR) IR = 1; break;			/* Not Equal */
+				case 2: if (XR == 1 || XR == 2) IR = 1; break;	/* Less or Greater */
+				case 3: if (XR & 1) IR = 1; break;		/* Unordered or Less */
+				case 4: if (XR == 1) IR = 1; break;		/* Less */
+				case 5: if (XR & 2) IR = 1; break;		/* Unordered or Greater */
+				case 6: if (XR == 2) IR = 1; break;		/* Greater */
+				case 7: if (XR == 3) IR = 1; break;		/* Unordered */
+				}
+				if ((insn >> 14) & 8)
+					IR ^= 1;
+				break;
+			case FMOVQI:
+			case FMOVQX:
+				/* fmovq %[ix]cc, %fY, %fZ */
+				XR = regs->tstate >> 32;
+				if ((insn >> 5) & 0x80)
+					XR >>= 4;
+				XR &= 0xf;
+				IR = 0;
+				freg = ((XR >> 2) ^ XR) & 2;
+				switch ((insn >> 14) & 0x7) {
+				/* case 0: IR = 0; break; */			/* Never */
+				case 1: if (XR & 4) IR = 1; break;		/* Equal */
+				case 2: if ((XR & 4) || freg) IR = 1; break;	/* Less or Equal */
+				case 3: if (freg) IR = 1; break;		/* Less */
+				case 4: if (XR & 5) IR = 1; break;		/* Less or Equal Unsigned */
+				case 5: if (XR & 1) IR = 1; break;		/* Carry Set */
+				case 6: if (XR & 8) IR = 1; break;		/* Negative */
+				case 7: if (XR & 2) IR = 1; break;		/* Overflow Set */
+				}
+				if ((insn >> 14) & 8)
+					IR ^= 1;
+				break;
+			case FMOVQZ:
+			case FMOVQLE:
+			case FMOVQLZ:
+			case FMOVQNZ:
+			case FMOVQGZ:
+			case FMOVQGE:
+				freg = (insn >> 14) & 0x1f;
+				if (!freg)
+					XR = 0;
+				else if (freg < 16)
+					XR = regs->u_regs[freg];
+				else if (test_thread_flag(TIF_32BIT)) {
+					struct reg_window32 __user *win32;
+					flushw_user ();
+					win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+					get_user(XR, &win32->locals[freg - 16]);
+				} else {
+					struct reg_window __user *win;
+					flushw_user ();
+					win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+					get_user(XR, &win->locals[freg - 16]);
+				}
+				IR = 0;
+				switch ((insn >> 10) & 3) {
+				case 1: if (!XR) IR = 1; break;			/* Register Zero */
+				case 2: if (XR <= 0) IR = 1; break;		/* Register Less Than or Equal to Zero */
+				case 3: if (XR < 0) IR = 1; break;		/* Register Less Than Zero */
+				}
+				if ((insn >> 10) & 4)
+					IR ^= 1;
+				break;
+			}
+			if (IR == 0) {
+				/* The fmov test was false. Do a nop instead */
+				current_thread_info()->xfsr[0] &= ~(FSR_CEXC_MASK);
+				regs->tpc = regs->tnpc;
+				regs->tnpc += 4;
+				return 1;
+			} else if (IR == 1) {
+				/* Change the instruction into plain fmovq */
+				insn = (insn & 0x3e00001f) | 0x81a00060;
+				TYPE(3,3,0,3,0,0,0); 
+			}
+		}
+	}
+	if (type) {
+		argp rs1 = NULL, rs2 = NULL, rd = NULL;
+		
+		freg = (current_thread_info()->xfsr[0] >> 14) & 0xf;
+		if (freg != (type >> 9))
+			goto err;
+		current_thread_info()->xfsr[0] &= ~0x1c000;
+		freg = ((insn >> 14) & 0x1f);
+		switch (type & 0x3) {
+		case 3: if (freg & 2) {
+				current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
+				goto err;
+			}
+		case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
+		case 1: rs1 = (argp)&f->regs[freg];
+			flags = (freg < 32) ? FPRS_DL : FPRS_DU; 
+			if (!(current_thread_info()->fpsaved[0] & flags))
+				rs1 = (argp)&zero;
+			break;
+		}
+		switch (type & 0x7) {
+		case 7: FP_UNPACK_QP (QA, rs1); break;
+		case 6: FP_UNPACK_DP (DA, rs1); break;
+		case 5: FP_UNPACK_SP (SA, rs1); break;
+		}
+		freg = (insn & 0x1f);
+		switch ((type >> 3) & 0x3) {
+		case 3: if (freg & 2) {
+				current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
+				goto err;
+			}
+		case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
+		case 1: rs2 = (argp)&f->regs[freg];
+			flags = (freg < 32) ? FPRS_DL : FPRS_DU; 
+			if (!(current_thread_info()->fpsaved[0] & flags))
+				rs2 = (argp)&zero;
+			break;
+		}
+		switch ((type >> 3) & 0x7) {
+		case 7: FP_UNPACK_QP (QB, rs2); break;
+		case 6: FP_UNPACK_DP (DB, rs2); break;
+		case 5: FP_UNPACK_SP (SB, rs2); break;
+		}
+		freg = ((insn >> 25) & 0x1f);
+		switch ((type >> 6) & 0x3) {
+		case 3: if (freg & 2) {
+				current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
+				goto err;
+			}
+		case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
+		case 1: rd = (argp)&f->regs[freg];
+			flags = (freg < 32) ? FPRS_DL : FPRS_DU; 
+			if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
+				current_thread_info()->fpsaved[0] = FPRS_FEF;
+				current_thread_info()->gsr[0] = 0;
+			}
+			if (!(current_thread_info()->fpsaved[0] & flags)) {
+				if (freg < 32)
+					memset(f->regs, 0, 32*sizeof(u32));
+				else
+					memset(f->regs+32, 0, 32*sizeof(u32));
+			}
+			current_thread_info()->fpsaved[0] |= flags;
+			break;
+		}
+		switch ((insn >> 5) & 0x1ff) {
+		/* + */
+		case FADDS: FP_ADD_S (SR, SA, SB); break;
+		case FADDD: FP_ADD_D (DR, DA, DB); break;
+		case FADDQ: FP_ADD_Q (QR, QA, QB); break;
+		/* - */
+		case FSUBS: FP_SUB_S (SR, SA, SB); break;
+		case FSUBD: FP_SUB_D (DR, DA, DB); break;
+		case FSUBQ: FP_SUB_Q (QR, QA, QB); break;
+		/* * */
+		case FMULS: FP_MUL_S (SR, SA, SB); break;
+		case FSMULD: FP_CONV (D, S, 1, 1, DA, SA);
+			     FP_CONV (D, S, 1, 1, DB, SB);
+		case FMULD: FP_MUL_D (DR, DA, DB); break;
+		case FDMULQ: FP_CONV (Q, D, 2, 1, QA, DA);
+			     FP_CONV (Q, D, 2, 1, QB, DB);
+		case FMULQ: FP_MUL_Q (QR, QA, QB); break;
+		/* / */
+		case FDIVS: FP_DIV_S (SR, SA, SB); break;
+		case FDIVD: FP_DIV_D (DR, DA, DB); break;
+		case FDIVQ: FP_DIV_Q (QR, QA, QB); break;
+		/* sqrt */
+		case FSQRTS: FP_SQRT_S (SR, SB); break;
+		case FSQRTD: FP_SQRT_D (DR, DB); break;
+		case FSQRTQ: FP_SQRT_Q (QR, QB); break;
+		/* mov */
+		case FMOVQ: rd->q[0] = rs2->q[0]; rd->q[1] = rs2->q[1]; break;
+		case FABSQ: rd->q[0] = rs2->q[0] & 0x7fffffffffffffffUL; rd->q[1] = rs2->q[1]; break;
+		case FNEGQ: rd->q[0] = rs2->q[0] ^ 0x8000000000000000UL; rd->q[1] = rs2->q[1]; break;
+		/* float to int */
+		case FSTOI: FP_TO_INT_S (IR, SB, 32, 1); break;
+		case FDTOI: FP_TO_INT_D (IR, DB, 32, 1); break;
+		case FQTOI: FP_TO_INT_Q (IR, QB, 32, 1); break;
+		case FSTOX: FP_TO_INT_S (XR, SB, 64, 1); break;
+		case FDTOX: FP_TO_INT_D (XR, DB, 64, 1); break;
+		case FQTOX: FP_TO_INT_Q (XR, QB, 64, 1); break;
+		/* int to float */
+		case FITOQ: IR = rs2->s; FP_FROM_INT_Q (QR, IR, 32, int); break;
+		case FXTOQ: XR = rs2->d; FP_FROM_INT_Q (QR, XR, 64, long); break;
+		/* Only Ultra-III generates these */
+		case FXTOS: XR = rs2->d; FP_FROM_INT_S (SR, XR, 64, long); break;
+		case FXTOD: XR = rs2->d; FP_FROM_INT_D (DR, XR, 64, long); break;
+#if 0		/* Optimized inline in sparc64/kernel/entry.S */
+		case FITOS: IR = rs2->s; FP_FROM_INT_S (SR, IR, 32, int); break;
+#endif
+		case FITOD: IR = rs2->s; FP_FROM_INT_D (DR, IR, 32, int); break;
+		/* float to float */
+		case FSTOD: FP_CONV (D, S, 1, 1, DR, SB); break;
+		case FSTOQ: FP_CONV (Q, S, 2, 1, QR, SB); break;
+		case FDTOQ: FP_CONV (Q, D, 2, 1, QR, DB); break;
+		case FDTOS: FP_CONV (S, D, 1, 1, SR, DB); break;
+		case FQTOS: FP_CONV (S, Q, 1, 2, SR, QB); break;
+		case FQTOD: FP_CONV (D, Q, 1, 2, DR, QB); break;
+		/* comparison */
+		case FCMPQ:
+		case FCMPEQ:
+			FP_CMP_Q(XR, QB, QA, 3);
+			if (XR == 3 &&
+			    (((insn >> 5) & 0x1ff) == FCMPEQ ||
+			     FP_ISSIGNAN_Q(QA) ||
+			     FP_ISSIGNAN_Q(QB)))
+				FP_SET_EXCEPTION (FP_EX_INVALID);
+		}
+		if (!FP_INHIBIT_RESULTS) {
+			switch ((type >> 6) & 0x7) {
+			case 0: xfsr = current_thread_info()->xfsr[0];
+				if (XR == -1) XR = 2;
+				switch (freg & 3) {
+				/* fcc0, 1, 2, 3 */
+				case 0: xfsr &= ~0xc00; xfsr |= (XR << 10); break;
+				case 1: xfsr &= ~0x300000000UL; xfsr |= (XR << 32); break;
+				case 2: xfsr &= ~0xc00000000UL; xfsr |= (XR << 34); break;
+				case 3: xfsr &= ~0x3000000000UL; xfsr |= (XR << 36); break;
+				}
+				current_thread_info()->xfsr[0] = xfsr;
+				break;
+			case 1: rd->s = IR; break;
+			case 2: rd->d = XR; break;
+			case 5: FP_PACK_SP (rd, SR); break;
+			case 6: FP_PACK_DP (rd, DR); break;
+			case 7: FP_PACK_QP (rd, QR); break;
+			}
+		}
+
+		if(_fex != 0)
+			return record_exception(regs, _fex);
+
+		/* Success and no exceptions detected. */
+		current_thread_info()->xfsr[0] &= ~(FSR_CEXC_MASK);
+		regs->tpc = regs->tnpc;
+		regs->tnpc += 4;
+		return 1;
+	}
+err:	return 0;
+}
diff --git a/arch/sparc64/math-emu/sfp-util.h b/arch/sparc64/math-emu/sfp-util.h
new file mode 100644
index 0000000..31e4747
--- /dev/null
+++ b/arch/sparc64/math-emu/sfp-util.h
@@ -0,0 +1,120 @@
+/* $Id: sfp-util.h,v 1.5 2001/06/10 06:48:46 davem Exp $
+ * arch/sparc64/math-emu/sfp-util.h
+ *
+ * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) 	\
+  __asm__ ("addcc %4,%5,%1\n\t"			\
+	   "add %2,%3,%0\n\t"			\
+  	   "bcs,a,pn %%xcc, 1f\n\t"		\
+  	   "add %0, 1, %0\n"			\
+  	   "1:"					\
+	   : "=r" ((UDItype)(sh)),		\
+	     "=&r" ((UDItype)(sl))		\
+	   : "r" ((UDItype)(ah)),		\
+	     "r" ((UDItype)(bh)),		\
+	     "r" ((UDItype)(al)),		\
+	     "r" ((UDItype)(bl))		\
+	   : "cc")
+	   
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) 	\
+  __asm__ ("subcc %4,%5,%1\n\t"			\
+  	   "sub %2,%3,%0\n\t"			\
+  	   "bcs,a,pn %%xcc, 1f\n\t"		\
+  	   "sub %0, 1, %0\n"			\
+  	   "1:"					\
+	   : "=r" ((UDItype)(sh)),		\
+	     "=&r" ((UDItype)(sl))		\
+	   : "r" ((UDItype)(ah)),		\
+	     "r" ((UDItype)(bh)),		\
+	     "r" ((UDItype)(al)),		\
+	     "r" ((UDItype)(bl))		\
+	   : "cc")
+
+#define umul_ppmm(wh, wl, u, v)				\
+  do {							\
+	  UDItype tmp1, tmp2, tmp3, tmp4;		\
+	  __asm__ __volatile__ (			\
+		   "srl %7,0,%3\n\t"			\
+		   "mulx %3,%6,%1\n\t"			\
+		   "srlx %6,32,%2\n\t"			\
+		   "mulx %2,%3,%4\n\t"			\
+		   "sllx %4,32,%5\n\t"			\
+		   "srl %6,0,%3\n\t"			\
+		   "sub %1,%5,%5\n\t"			\
+		   "srlx %5,32,%5\n\t"			\
+		   "addcc %4,%5,%4\n\t"			\
+		   "srlx %7,32,%5\n\t"			\
+		   "mulx %3,%5,%3\n\t"			\
+		   "mulx %2,%5,%5\n\t"			\
+		   "sethi %%hi(0x80000000),%2\n\t"	\
+		   "addcc %4,%3,%4\n\t"			\
+		   "srlx %4,32,%4\n\t"			\
+		   "add %2,%2,%2\n\t"			\
+		   "movcc %%xcc,%%g0,%2\n\t"		\
+		   "addcc %5,%4,%5\n\t"			\
+		   "sllx %3,32,%3\n\t"			\
+		   "add %1,%3,%1\n\t"			\
+		   "add %5,%2,%0"			\
+	   : "=r" ((UDItype)(wh)),			\
+	     "=&r" ((UDItype)(wl)),			\
+	     "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4) \
+	   : "r" ((UDItype)(u)),			\
+	     "r" ((UDItype)(v))				\
+	   : "cc");					\
+  } while (0)
+  
+#define udiv_qrnnd(q, r, n1, n0, d) 			\
+  do {                                                  \
+    UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m;     \
+    __d1 = (d >> 32);                                   \
+    __d0 = (USItype)d;                                  \
+                                                        \
+    __r1 = (n1) % __d1;                                 \
+    __q1 = (n1) / __d1;                                 \
+    __m = (UWtype) __q1 * __d0;                         \
+    __r1 = (__r1 << 32) | (n0 >> 32);                   \
+    if (__r1 < __m)                                     \
+      {                                                 \
+        __q1--, __r1 += (d);                            \
+        if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */ \
+          if (__r1 < __m)                               \
+            __q1--, __r1 += (d);                        \
+      }                                                 \
+    __r1 -= __m;                                        \
+                                                        \
+    __r0 = __r1 % __d1;                                 \
+    __q0 = __r1 / __d1;                                 \
+    __m = (UWtype) __q0 * __d0;                         \
+    __r0 = (__r0 << 32) | ((USItype)n0);                \
+    if (__r0 < __m)                                     \
+      {                                                 \
+        __q0--, __r0 += (d);                            \
+        if (__r0 >= (d))                                \
+          if (__r0 < __m)                               \
+            __q0--, __r0 += (d);                        \
+      }                                                 \
+    __r0 -= __m;                                        \
+                                                        \
+    (q) = (UWtype) (__q1 << 32)  | __q0;                \
+    (r) = __r0;                                         \
+  } while (0)
+
+#define UDIV_NEEDS_NORMALIZATION 1  
+
+#define abort() \
+	return 0
+
+#ifdef __BIG_ENDIAN
+#define __BYTE_ORDER __BIG_ENDIAN
+#else
+#define __BYTE_ORDER __LITTLE_ENDIAN
+#endif
diff --git a/arch/sparc64/mm/Makefile b/arch/sparc64/mm/Makefile
new file mode 100644
index 0000000..cda8733
--- /dev/null
+++ b/arch/sparc64/mm/Makefile
@@ -0,0 +1,10 @@
+# $Id: Makefile,v 1.8 2000/12/14 22:57:25 davem Exp $
+# Makefile for the linux Sparc64-specific parts of the memory manager.
+#
+
+EXTRA_AFLAGS := -ansi
+EXTRA_CFLAGS := -Werror
+
+obj-y    := ultra.o tlb.o fault.o init.o generic.o extable.o
+
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/sparc64/mm/extable.c b/arch/sparc64/mm/extable.c
new file mode 100644
index 0000000..ec33429
--- /dev/null
+++ b/arch/sparc64/mm/extable.c
@@ -0,0 +1,80 @@
+/*
+ * linux/arch/sparc64/mm/extable.c
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+extern const struct exception_table_entry __start___ex_table[];
+extern const struct exception_table_entry __stop___ex_table[];
+
+void sort_extable(struct exception_table_entry *start,
+		  struct exception_table_entry *finish)
+{
+}
+
+/* Caller knows they are in a range if ret->fixup == 0 */
+const struct exception_table_entry *
+search_extable(const struct exception_table_entry *start,
+	       const struct exception_table_entry *last,
+	       unsigned long value)
+{
+	const struct exception_table_entry *walk;
+
+	/* Single insn entries are encoded as:
+	 *	word 1:	insn address
+	 *	word 2:	fixup code address
+	 *
+	 * Range entries are encoded as:
+	 *	word 1: first insn address
+	 *	word 2: 0
+	 *	word 3: last insn address + 4 bytes
+	 *	word 4: fixup code address
+	 *
+	 * See asm/uaccess.h for more details.
+	 */
+
+	/* 1. Try to find an exact match. */
+	for (walk = start; walk <= last; walk++) {
+		if (walk->fixup == 0) {
+			/* A range entry, skip both parts. */
+			walk++;
+			continue;
+		}
+
+		if (walk->insn == value)
+			return walk;
+	}
+
+	/* 2. Try to find a range match. */
+	for (walk = start; walk <= (last - 1); walk++) {
+		if (walk->fixup)
+			continue;
+
+		if (walk[0].insn <= value && walk[1].insn > value)
+			return walk;
+
+		walk++;
+	}
+
+        return NULL;
+}
+
+/* Special extable search, which handles ranges.  Returns fixup */
+unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
+{
+	const struct exception_table_entry *entry;
+
+	entry = search_exception_tables(addr);
+	if (!entry)
+		return 0;
+
+	/* Inside range?  Fix g2 and return correct fixup */
+	if (!entry->fixup) {
+		*g2 = (addr - entry->insn) / 4;
+		return (entry + 1)->fixup;
+	}
+
+	return entry->fixup;
+}
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
new file mode 100644
index 0000000..3ffee7b
--- /dev/null
+++ b/arch/sparc64/mm/fault.c
@@ -0,0 +1,527 @@
+/* $Id: fault.c,v 1.59 2002/02/09 19:49:31 davem Exp $
+ * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#include <asm/head.h>
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/signal.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/uaccess.h>
+#include <asm/asi.h>
+#include <asm/lsu.h>
+#include <asm/sections.h>
+#include <asm/kdebug.h>
+
+#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
+
+extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
+
+/*
+ * To debug kernel during syscall entry.
+ */
+void syscall_trace_entry(struct pt_regs *regs)
+{
+	printk("scall entry: %s[%d]/cpu%d: %d\n", current->comm, current->pid, smp_processor_id(), (int) regs->u_regs[UREG_G1]);
+}
+
+/*
+ * To debug kernel during syscall exit.
+ */
+void syscall_trace_exit(struct pt_regs *regs)
+{
+	printk("scall exit: %s[%d]/cpu%d: %d\n", current->comm, current->pid, smp_processor_id(), (int) regs->u_regs[UREG_G1]);
+}
+
+/*
+ * To debug kernel to catch accesses to certain virtual/physical addresses.
+ * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints.
+ * flags = VM_READ watches memread accesses, flags = VM_WRITE watches memwrite accesses.
+ * Caller passes in a 64bit aligned addr, with mask set to the bytes that need to be
+ * watched. This is only useful on a single cpu machine for now. After the watchpoint
+ * is detected, the process causing it will be killed, thus preventing an infinite loop.
+ */
+void set_brkpt(unsigned long addr, unsigned char mask, int flags, int mode)
+{
+	unsigned long lsubits;
+
+	__asm__ __volatile__("ldxa [%%g0] %1, %0"
+			     : "=r" (lsubits)
+			     : "i" (ASI_LSU_CONTROL));
+	lsubits &= ~(LSU_CONTROL_PM | LSU_CONTROL_VM |
+		     LSU_CONTROL_PR | LSU_CONTROL_VR |
+		     LSU_CONTROL_PW | LSU_CONTROL_VW);
+
+	__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
+			     "membar	#Sync"
+			     : /* no outputs */
+			     : "r" (addr), "r" (mode ? VIRT_WATCHPOINT : PHYS_WATCHPOINT),
+			       "i" (ASI_DMMU));
+
+	lsubits |= ((unsigned long)mask << (mode ? 25 : 33));
+	if (flags & VM_READ)
+		lsubits |= (mode ? LSU_CONTROL_VR : LSU_CONTROL_PR);
+	if (flags & VM_WRITE)
+		lsubits |= (mode ? LSU_CONTROL_VW : LSU_CONTROL_PW);
+	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
+			     "membar #Sync"
+			     : /* no outputs */
+			     : "r" (lsubits), "i" (ASI_LSU_CONTROL)
+			     : "memory");
+}
+
+/* Nice, simple, prom library does all the sweating for us. ;) */
+unsigned long __init prom_probe_memory (void)
+{
+	register struct linux_mlist_p1275 *mlist;
+	register unsigned long bytes, base_paddr, tally;
+	register int i;
+
+	i = 0;
+	mlist = *prom_meminfo()->p1275_available;
+	bytes = tally = mlist->num_bytes;
+	base_paddr = mlist->start_adr;
+  
+	sp_banks[0].base_addr = base_paddr;
+	sp_banks[0].num_bytes = bytes;
+
+	while (mlist->theres_more != (void *) 0) {
+		i++;
+		mlist = mlist->theres_more;
+		bytes = mlist->num_bytes;
+		tally += bytes;
+		if (i >= SPARC_PHYS_BANKS-1) {
+			printk ("The machine has more banks than "
+				"this kernel can support\n"
+				"Increase the SPARC_PHYS_BANKS "
+				"setting (currently %d)\n",
+				SPARC_PHYS_BANKS);
+			i = SPARC_PHYS_BANKS-1;
+			break;
+		}
+    
+		sp_banks[i].base_addr = mlist->start_adr;
+		sp_banks[i].num_bytes = mlist->num_bytes;
+	}
+
+	i++;
+	sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
+	sp_banks[i].num_bytes = 0;
+
+	/* Now mask all bank sizes on a page boundary, it is all we can
+	 * use anyways.
+	 */
+	for (i = 0; sp_banks[i].num_bytes != 0; i++)
+		sp_banks[i].num_bytes &= PAGE_MASK;
+
+	return tally;
+}
+
+static void unhandled_fault(unsigned long address, struct task_struct *tsk,
+			    struct pt_regs *regs)
+{
+	if ((unsigned long) address < PAGE_SIZE) {
+		printk(KERN_ALERT "Unable to handle kernel NULL "
+		       "pointer dereference\n");
+	} else {
+		printk(KERN_ALERT "Unable to handle kernel paging request "
+		       "at virtual address %016lx\n", (unsigned long)address);
+	}
+	printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
+	       (tsk->mm ?
+		CTX_HWBITS(tsk->mm->context) :
+		CTX_HWBITS(tsk->active_mm->context)));
+	printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
+	       (tsk->mm ? (unsigned long) tsk->mm->pgd :
+		          (unsigned long) tsk->active_mm->pgd));
+	if (notify_die(DIE_GPF, "general protection fault", regs,
+		       0, 0, SIGSEGV) == NOTIFY_STOP)
+		return;
+	die_if_kernel("Oops", regs);
+}
+
+static void bad_kernel_pc(struct pt_regs *regs)
+{
+	unsigned long *ksp;
+
+	printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
+	       regs->tpc);
+	__asm__("mov %%sp, %0" : "=r" (ksp));
+	show_stack(current, ksp);
+	unhandled_fault(regs->tpc, current, regs);
+}
+
+/*
+ * We now make sure that mmap_sem is held in all paths that call 
+ * this. Additionally, to prevent kswapd from ripping ptes from
+ * under us, raise interrupts around the time that we look at the
+ * pte, kswapd will have to wait to get his smp ipi response from
+ * us. This saves us having to get page_table_lock.
+ */
+static unsigned int get_user_insn(unsigned long tpc)
+{
+	pgd_t *pgdp = pgd_offset(current->mm, tpc);
+	pud_t *pudp;
+	pmd_t *pmdp;
+	pte_t *ptep, pte;
+	unsigned long pa;
+	u32 insn = 0;
+	unsigned long pstate;
+
+	if (pgd_none(*pgdp))
+		goto outret;
+	pudp = pud_offset(pgdp, tpc);
+	if (pud_none(*pudp))
+		goto outret;
+	pmdp = pmd_offset(pudp, tpc);
+	if (pmd_none(*pmdp))
+		goto outret;
+
+	/* This disables preemption for us as well. */
+	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
+	__asm__ __volatile__("wrpr %0, %1, %%pstate"
+				: : "r" (pstate), "i" (PSTATE_IE));
+	ptep = pte_offset_map(pmdp, tpc);
+	pte = *ptep;
+	if (!pte_present(pte))
+		goto out;
+
+	pa  = (pte_val(pte) & _PAGE_PADDR);
+	pa += (tpc & ~PAGE_MASK);
+
+	/* Use phys bypass so we don't pollute dtlb/dcache. */
+	__asm__ __volatile__("lduwa [%1] %2, %0"
+			     : "=r" (insn)
+			     : "r" (pa), "i" (ASI_PHYS_USE_EC));
+
+out:
+	pte_unmap(ptep);
+	__asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
+outret:
+	return insn;
+}
+
+extern unsigned long compute_effective_address(struct pt_regs *, unsigned int, unsigned int);
+
+static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
+			     unsigned int insn, int fault_code)
+{
+	siginfo_t info;
+
+	info.si_code = code;
+	info.si_signo = sig;
+	info.si_errno = 0;
+	if (fault_code & FAULT_CODE_ITLB)
+		info.si_addr = (void __user *) regs->tpc;
+	else
+		info.si_addr = (void __user *)
+			compute_effective_address(regs, insn, 0);
+	info.si_trapno = 0;
+	force_sig_info(sig, &info, current);
+}
+
+extern int handle_ldf_stq(u32, struct pt_regs *);
+extern int handle_ld_nf(u32, struct pt_regs *);
+
+static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
+{
+	if (!insn) {
+		if (!regs->tpc || (regs->tpc & 0x3))
+			return 0;
+		if (regs->tstate & TSTATE_PRIV) {
+			insn = *(unsigned int *) regs->tpc;
+		} else {
+			insn = get_user_insn(regs->tpc);
+		}
+	}
+	return insn;
+}
+
+static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
+			    unsigned int insn, unsigned long address)
+{
+	unsigned long g2;
+	unsigned char asi = ASI_P;
+ 
+	if ((!insn) && (regs->tstate & TSTATE_PRIV))
+		goto cannot_handle;
+
+	/* If user insn could be read (thus insn is zero), that
+	 * is fine.  We will just gun down the process with a signal
+	 * in that case.
+	 */
+
+	if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
+	    (insn & 0xc0800000) == 0xc0800000) {
+		if (insn & 0x2000)
+			asi = (regs->tstate >> 24);
+		else
+			asi = (insn >> 5);
+		if ((asi & 0xf2) == 0x82) {
+			if (insn & 0x1000000) {
+				handle_ldf_stq(insn, regs);
+			} else {
+				/* This was a non-faulting load. Just clear the
+				 * destination register(s) and continue with the next
+				 * instruction. -jj
+				 */
+				handle_ld_nf(insn, regs);
+			}
+			return;
+		}
+	}
+		
+	g2 = regs->u_regs[UREG_G2];
+
+	/* Is this in ex_table? */
+	if (regs->tstate & TSTATE_PRIV) {
+		unsigned long fixup;
+
+		if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) {
+			if (insn & 0x2000)
+				asi = (regs->tstate >> 24);
+			else
+				asi = (insn >> 5);
+		}
+	
+		/* Look in asi.h: All _S asis have LS bit set */
+		if ((asi & 0x1) &&
+		    (fixup = search_extables_range(regs->tpc, &g2))) {
+			regs->tpc = fixup;
+			regs->tnpc = regs->tpc + 4;
+			regs->u_regs[UREG_G2] = g2;
+			return;
+		}
+	} else {
+		/* The si_code was set to make clear whether
+		 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
+		 */
+		do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
+		return;
+	}
+
+cannot_handle:
+	unhandled_fault (address, current, regs);
+}
+
+asmlinkage void do_sparc64_fault(struct pt_regs *regs)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	unsigned int insn = 0;
+	int si_code, fault_code;
+	unsigned long address;
+
+	fault_code = get_thread_fault_code();
+
+	if (notify_die(DIE_PAGE_FAULT, "page_fault", regs,
+		       fault_code, 0, SIGSEGV) == NOTIFY_STOP)
+		return;
+
+	si_code = SEGV_MAPERR;
+	address = current_thread_info()->fault_address;
+
+	if ((fault_code & FAULT_CODE_ITLB) &&
+	    (fault_code & FAULT_CODE_DTLB))
+		BUG();
+
+	if (regs->tstate & TSTATE_PRIV) {
+		unsigned long tpc = regs->tpc;
+
+		/* Sanity check the PC. */
+		if ((tpc >= KERNBASE && tpc < (unsigned long) _etext) ||
+		    (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
+			/* Valid, no problems... */
+		} else {
+			bad_kernel_pc(regs);
+			return;
+		}
+	}
+
+	/*
+	 * If we're in an interrupt or have no user
+	 * context, we must not take the fault..
+	 */
+	if (in_atomic() || !mm)
+		goto intr_or_no_mm;
+
+	if (test_thread_flag(TIF_32BIT)) {
+		if (!(regs->tstate & TSTATE_PRIV))
+			regs->tpc &= 0xffffffff;
+		address &= 0xffffffff;
+	}
+
+	if (!down_read_trylock(&mm->mmap_sem)) {
+		if ((regs->tstate & TSTATE_PRIV) &&
+		    !search_exception_tables(regs->tpc)) {
+			insn = get_fault_insn(regs, insn);
+			goto handle_kernel_fault;
+		}
+		down_read(&mm->mmap_sem);
+	}
+
+	vma = find_vma(mm, address);
+	if (!vma)
+		goto bad_area;
+
+	/* Pure DTLB misses do not tell us whether the fault causing
+	 * load/store/atomic was a write or not, it only says that there
+	 * was no match.  So in such a case we (carefully) read the
+	 * instruction to try and figure this out.  It's an optimization
+	 * so it's ok if we can't do this.
+	 *
+	 * Special hack, window spill/fill knows the exact fault type.
+	 */
+	if (((fault_code &
+	      (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
+	    (vma->vm_flags & VM_WRITE) != 0) {
+		insn = get_fault_insn(regs, 0);
+		if (!insn)
+			goto continue_fault;
+		if ((insn & 0xc0200000) == 0xc0200000 &&
+		    (insn & 0x1780000) != 0x1680000) {
+			/* Don't bother updating thread struct value,
+			 * because update_mmu_cache only cares which tlb
+			 * the access came from.
+			 */
+			fault_code |= FAULT_CODE_WRITE;
+		}
+	}
+continue_fault:
+
+	if (vma->vm_start <= address)
+		goto good_area;
+	if (!(vma->vm_flags & VM_GROWSDOWN))
+		goto bad_area;
+	if (!(fault_code & FAULT_CODE_WRITE)) {
+		/* Non-faulting loads shouldn't expand stack. */
+		insn = get_fault_insn(regs, insn);
+		if ((insn & 0xc0800000) == 0xc0800000) {
+			unsigned char asi;
+
+			if (insn & 0x2000)
+				asi = (regs->tstate >> 24);
+			else
+				asi = (insn >> 5);
+			if ((asi & 0xf2) == 0x82)
+				goto bad_area;
+		}
+	}
+	if (expand_stack(vma, address))
+		goto bad_area;
+	/*
+	 * Ok, we have a good vm_area for this memory access, so
+	 * we can handle it..
+	 */
+good_area:
+	si_code = SEGV_ACCERR;
+
+	/* If we took a ITLB miss on a non-executable page, catch
+	 * that here.
+	 */
+	if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
+		BUG_ON(address != regs->tpc);
+		BUG_ON(regs->tstate & TSTATE_PRIV);
+		goto bad_area;
+	}
+
+	if (fault_code & FAULT_CODE_WRITE) {
+		if (!(vma->vm_flags & VM_WRITE))
+			goto bad_area;
+
+		/* Spitfire has an icache which does not snoop
+		 * processor stores.  Later processors do...
+		 */
+		if (tlb_type == spitfire &&
+		    (vma->vm_flags & VM_EXEC) != 0 &&
+		    vma->vm_file != NULL)
+			set_thread_fault_code(fault_code |
+					      FAULT_CODE_BLKCOMMIT);
+	} else {
+		/* Allow reads even for write-only mappings */
+		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+			goto bad_area;
+	}
+
+	switch (handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE))) {
+	case VM_FAULT_MINOR:
+		current->min_flt++;
+		break;
+	case VM_FAULT_MAJOR:
+		current->maj_flt++;
+		break;
+	case VM_FAULT_SIGBUS:
+		goto do_sigbus;
+	case VM_FAULT_OOM:
+		goto out_of_memory;
+	default:
+		BUG();
+	}
+
+	up_read(&mm->mmap_sem);
+	goto fault_done;
+
+	/*
+	 * Something tried to access memory that isn't in our memory map..
+	 * Fix it, but check if it's kernel or user first..
+	 */
+bad_area:
+	insn = get_fault_insn(regs, insn);
+	up_read(&mm->mmap_sem);
+
+handle_kernel_fault:
+	do_kernel_fault(regs, si_code, fault_code, insn, address);
+
+	goto fault_done;
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+	insn = get_fault_insn(regs, insn);
+	up_read(&mm->mmap_sem);
+	printk("VM: killing process %s\n", current->comm);
+	if (!(regs->tstate & TSTATE_PRIV))
+		do_exit(SIGKILL);
+	goto handle_kernel_fault;
+
+intr_or_no_mm:
+	insn = get_fault_insn(regs, 0);
+	goto handle_kernel_fault;
+
+do_sigbus:
+	insn = get_fault_insn(regs, insn);
+	up_read(&mm->mmap_sem);
+
+	/*
+	 * Send a sigbus, regardless of whether we were in kernel
+	 * or user mode.
+	 */
+	do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
+
+	/* Kernel mode? Handle exceptions or die */
+	if (regs->tstate & TSTATE_PRIV)
+		goto handle_kernel_fault;
+
+fault_done:
+	/* These values are no longer needed, clear them. */
+	set_thread_fault_code(0);
+	current_thread_info()->fault_address = 0;
+}
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
new file mode 100644
index 0000000..6b31f61
--- /dev/null
+++ b/arch/sparc64/mm/generic.c
@@ -0,0 +1,182 @@
+/* $Id: generic.c,v 1.18 2001/12/21 04:56:15 davem Exp $
+ * generic.c: Generic Sparc mm routines that are not dependent upon
+ *            MMU type but are Sparc specific.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+
+/* Remap IO memory, the same way as remap_pfn_range(), but use
+ * the obio memory space.
+ *
+ * They use a pgprot that sets PAGE_IO and does not check the
+ * mem_map table as this is independent of normal memory.
+ */
+static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
+				      unsigned long address,
+				      unsigned long size,
+				      unsigned long offset, pgprot_t prot,
+				      int space)
+{
+	unsigned long end;
+
+	/* clear hack bit that was used as a write_combine side-effect flag */
+	offset &= ~0x1UL;
+	address &= ~PMD_MASK;
+	end = address + size;
+	if (end > PMD_SIZE)
+		end = PMD_SIZE;
+	do {
+		pte_t entry;
+		unsigned long curend = address + PAGE_SIZE;
+		
+		entry = mk_pte_io(offset, prot, space);
+		if (!(address & 0xffff)) {
+			if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
+				entry = mk_pte_io(offset,
+						  __pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
+						  space);
+				curend = address + 0x400000;
+				offset += 0x400000;
+			} else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
+				entry = mk_pte_io(offset,
+						  __pgprot(pgprot_val (prot) | _PAGE_SZ512K),
+						  space);
+				curend = address + 0x80000;
+				offset += 0x80000;
+			} else if (!(offset & 0xfffe) && end >= address + 0x10000) {
+				entry = mk_pte_io(offset,
+						  __pgprot(pgprot_val (prot) | _PAGE_SZ64K),
+						  space);
+				curend = address + 0x10000;
+				offset += 0x10000;
+			} else
+				offset += PAGE_SIZE;
+		} else
+			offset += PAGE_SIZE;
+
+		do {
+			BUG_ON(!pte_none(*pte));
+			set_pte_at(mm, address, pte, entry);
+			address += PAGE_SIZE;
+			pte++;
+		} while (address < curend);
+	} while (address < end);
+}
+
+static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
+	unsigned long offset, pgprot_t prot, int space)
+{
+	unsigned long end;
+
+	address &= ~PGDIR_MASK;
+	end = address + size;
+	if (end > PGDIR_SIZE)
+		end = PGDIR_SIZE;
+	offset -= address;
+	do {
+		pte_t * pte = pte_alloc_map(mm, pmd, address);
+		if (!pte)
+			return -ENOMEM;
+		io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
+		pte_unmap(pte);
+		address = (address + PMD_SIZE) & PMD_MASK;
+		pmd++;
+	} while (address < end);
+	return 0;
+}
+
+static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
+	unsigned long offset, pgprot_t prot, int space)
+{
+	unsigned long end;
+
+	address &= ~PUD_MASK;
+	end = address + size;
+	if (end > PUD_SIZE)
+		end = PUD_SIZE;
+	offset -= address;
+	do {
+		pmd_t *pmd = pmd_alloc(mm, pud, address);
+		if (!pud)
+			return -ENOMEM;
+		io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
+		address = (address + PUD_SIZE) & PUD_MASK;
+		pud++;
+	} while (address < end);
+	return 0;
+}
+
+int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space)
+{
+	int error = 0;
+	pgd_t * dir;
+	unsigned long beg = from;
+	unsigned long end = from + size;
+	struct mm_struct *mm = vma->vm_mm;
+
+	prot = __pgprot(pg_iobits);
+	offset -= from;
+	dir = pgd_offset(mm, from);
+	flush_cache_range(vma, beg, end);
+
+	spin_lock(&mm->page_table_lock);
+	while (from < end) {
+		pud_t *pud = pud_alloc(mm, dir, from);
+		error = -ENOMEM;
+		if (!pud)
+			break;
+		error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
+		if (error)
+			break;
+		from = (from + PGDIR_SIZE) & PGDIR_MASK;
+		dir++;
+	}
+	flush_tlb_range(vma, beg, end);
+	spin_unlock(&mm->page_table_lock);
+
+	return error;
+}
+
+int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+		unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+	int error = 0;
+	pgd_t * dir;
+	unsigned long beg = from;
+	unsigned long end = from + size;
+	struct mm_struct *mm = vma->vm_mm;
+	int space = GET_IOSPACE(pfn);
+	unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
+
+	prot = __pgprot(pg_iobits);
+	offset -= from;
+	dir = pgd_offset(mm, from);
+	flush_cache_range(vma, beg, end);
+
+	spin_lock(&mm->page_table_lock);
+	while (from < end) {
+		pud_t *pud = pud_alloc(current->mm, dir, from);
+		error = -ENOMEM;
+		if (!pud)
+			break;
+		error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
+		if (error)
+			break;
+		from = (from + PGDIR_SIZE) & PGDIR_MASK;
+		dir++;
+	}
+	flush_tlb_range(vma, beg, end);
+	spin_unlock(&mm->page_table_lock);
+
+	return error;
+}
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
new file mode 100644
index 0000000..5a1f831
--- /dev/null
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -0,0 +1,310 @@
+/*
+ * SPARC64 Huge TLB page support.
+ *
+ * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+#include <linux/slab.h>
+#include <linux/sysctl.h>
+
+#include <asm/mman.h>
+#include <asm/pgalloc.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *pte = NULL;
+
+	pgd = pgd_offset(mm, addr);
+	if (pgd) {
+		pud = pud_offset(pgd, addr);
+		if (pud) {
+			pmd = pmd_alloc(mm, pud, addr);
+			if (pmd)
+				pte = pte_alloc_map(mm, pmd, addr);
+		}
+	}
+	return pte;
+}
+
+static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *pte = NULL;
+
+	pgd = pgd_offset(mm, addr);
+	if (pgd) {
+		pud = pud_offset(pgd, addr);
+		if (pud) {
+			pmd = pmd_offset(pud, addr);
+			if (pmd)
+				pte = pte_offset_map(pmd, addr);
+		}
+	}
+	return pte;
+}
+
+#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
+
+static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
+			 unsigned long addr,
+			 struct page *page, pte_t * page_table, int write_access)
+{
+	unsigned long i;
+	pte_t entry;
+
+	add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE);
+
+	if (write_access)
+		entry = pte_mkwrite(pte_mkdirty(mk_pte(page,
+						       vma->vm_page_prot)));
+	else
+		entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
+	entry = pte_mkyoung(entry);
+	mk_pte_huge(entry);
+
+	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
+		set_pte_at(mm, addr, page_table, entry);
+		page_table++;
+		addr += PAGE_SIZE;
+
+		pte_val(entry) += PAGE_SIZE;
+	}
+}
+
+/*
+ * This function checks for proper alignment of input addr and len parameters.
+ */
+int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
+{
+	if (len & ~HPAGE_MASK)
+		return -EINVAL;
+	if (addr & ~HPAGE_MASK)
+		return -EINVAL;
+	return 0;
+}
+
+int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
+			    struct vm_area_struct *vma)
+{
+	pte_t *src_pte, *dst_pte, entry;
+	struct page *ptepage;
+	unsigned long addr = vma->vm_start;
+	unsigned long end = vma->vm_end;
+	int i;
+
+	while (addr < end) {
+		dst_pte = huge_pte_alloc(dst, addr);
+		if (!dst_pte)
+			goto nomem;
+		src_pte = huge_pte_offset(src, addr);
+		BUG_ON(!src_pte || pte_none(*src_pte));
+		entry = *src_pte;
+		ptepage = pte_page(entry);
+		get_page(ptepage);
+		for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
+			set_pte_at(dst, addr, dst_pte, entry);
+			pte_val(entry) += PAGE_SIZE;
+			dst_pte++;
+			addr += PAGE_SIZE;
+		}
+		add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
+	}
+	return 0;
+
+nomem:
+	return -ENOMEM;
+}
+
+int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
+			struct page **pages, struct vm_area_struct **vmas,
+			unsigned long *position, int *length, int i)
+{
+	unsigned long vaddr = *position;
+	int remainder = *length;
+
+	WARN_ON(!is_vm_hugetlb_page(vma));
+
+	while (vaddr < vma->vm_end && remainder) {
+		if (pages) {
+			pte_t *pte;
+			struct page *page;
+
+			pte = huge_pte_offset(mm, vaddr);
+
+			/* hugetlb should be locked, and hence, prefaulted */
+			BUG_ON(!pte || pte_none(*pte));
+
+			page = pte_page(*pte);
+
+			WARN_ON(!PageCompound(page));
+
+			get_page(page);
+			pages[i] = page;
+		}
+
+		if (vmas)
+			vmas[i] = vma;
+
+		vaddr += PAGE_SIZE;
+		--remainder;
+		++i;
+	}
+
+	*length = remainder;
+	*position = vaddr;
+
+	return i;
+}
+
+struct page *follow_huge_addr(struct mm_struct *mm,
+			      unsigned long address, int write)
+{
+	return ERR_PTR(-EINVAL);
+}
+
+int pmd_huge(pmd_t pmd)
+{
+	return 0;
+}
+
+struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+			     pmd_t *pmd, int write)
+{
+	return NULL;
+}
+
+void unmap_hugepage_range(struct vm_area_struct *vma,
+			  unsigned long start, unsigned long end)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	unsigned long address;
+	pte_t *pte;
+	struct page *page;
+	int i;
+
+	BUG_ON(start & (HPAGE_SIZE - 1));
+	BUG_ON(end & (HPAGE_SIZE - 1));
+
+	for (address = start; address < end; address += HPAGE_SIZE) {
+		pte = huge_pte_offset(mm, address);
+		BUG_ON(!pte);
+		if (pte_none(*pte))
+			continue;
+		page = pte_page(*pte);
+		put_page(page);
+		for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
+			pte_clear(mm, address+(i*PAGE_SIZE), pte);
+			pte++;
+		}
+	}
+	add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT));
+	flush_tlb_range(vma, start, end);
+}
+
+static void context_reload(void *__data)
+{
+	struct mm_struct *mm = __data;
+
+	if (mm == current->mm)
+		load_secondary_context(mm);
+}
+
+int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
+{
+	struct mm_struct *mm = current->mm;
+	unsigned long addr;
+	int ret = 0;
+
+	/* On UltraSPARC-III+ and later, configure the second half of
+	 * the Data-TLB for huge pages.
+	 */
+	if (tlb_type == cheetah_plus) {
+		unsigned long ctx;
+
+		spin_lock(&ctx_alloc_lock);
+		ctx = mm->context.sparc64_ctx_val;
+		ctx &= ~CTX_PGSZ_MASK;
+		ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
+		ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
+
+		if (ctx != mm->context.sparc64_ctx_val) {
+			/* When changing the page size fields, we
+			 * must perform a context flush so that no
+			 * stale entries match.  This flush must
+			 * occur with the original context register
+			 * settings.
+			 */
+			do_flush_tlb_mm(mm);
+
+			/* Reload the context register of all processors
+			 * also executing in this address space.
+			 */
+			mm->context.sparc64_ctx_val = ctx;
+			on_each_cpu(context_reload, mm, 0, 0);
+		}
+		spin_unlock(&ctx_alloc_lock);
+	}
+
+	BUG_ON(vma->vm_start & ~HPAGE_MASK);
+	BUG_ON(vma->vm_end & ~HPAGE_MASK);
+
+	spin_lock(&mm->page_table_lock);
+	for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
+		unsigned long idx;
+		pte_t *pte = huge_pte_alloc(mm, addr);
+		struct page *page;
+
+		if (!pte) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		if (!pte_none(*pte))
+			continue;
+
+		idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
+			+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
+		page = find_get_page(mapping, idx);
+		if (!page) {
+			/* charge the fs quota first */
+			if (hugetlb_get_quota(mapping)) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			page = alloc_huge_page();
+			if (!page) {
+				hugetlb_put_quota(mapping);
+				ret = -ENOMEM;
+				goto out;
+			}
+			ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
+			if (! ret) {
+				unlock_page(page);
+			} else {
+				hugetlb_put_quota(mapping);
+				free_huge_page(page);
+				goto out;
+			}
+		}
+		set_huge_pte(mm, vma, addr, page, pte, vma->vm_flags & VM_WRITE);
+	}
+out:
+	spin_unlock(&mm->page_table_lock);
+	return ret;
+}
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
new file mode 100644
index 0000000..89022cc
--- /dev/null
+++ b/arch/sparc64/mm/init.c
@@ -0,0 +1,1769 @@
+/*  $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
+ *  arch/sparc64/mm/init.c
+ *
+ *  Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
+ *  Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+ 
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/slab.h>
+#include <linux/initrd.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+
+#include <asm/head.h>
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/oplib.h>
+#include <asm/iommu.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/dma.h>
+#include <asm/starfire.h>
+#include <asm/tlb.h>
+#include <asm/spitfire.h>
+#include <asm/sections.h>
+
+extern void device_scan(void);
+
+struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
+
+unsigned long *sparc64_valid_addr_bitmap;
+
+/* Ugly, but necessary... -DaveM */
+unsigned long phys_base;
+unsigned long kern_base;
+unsigned long kern_size;
+unsigned long pfn_base;
+
+/* This is even uglier. We have a problem where the kernel may not be
+ * located at phys_base. However, initial __alloc_bootmem() calls need to
+ * be adjusted to be within the 4-8Megs that the kernel is mapped to, else
+ * those page mappings wont work. Things are ok after inherit_prom_mappings
+ * is called though. Dave says he'll clean this up some other time.
+ * -- BenC
+ */
+static unsigned long bootmap_base;
+
+/* get_new_mmu_context() uses "cache + 1".  */
+DEFINE_SPINLOCK(ctx_alloc_lock);
+unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
+#define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6))
+unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
+
+/* References to special section boundaries */
+extern char  _start[], _end[];
+
+/* Initial ramdisk setup */
+extern unsigned long sparc_ramdisk_image64;
+extern unsigned int sparc_ramdisk_image;
+extern unsigned int sparc_ramdisk_size;
+
+struct page *mem_map_zero;
+
+int bigkernel = 0;
+
+/* XXX Tune this... */
+#define PGT_CACHE_LOW	25
+#define PGT_CACHE_HIGH	50
+
+void check_pgt_cache(void)
+{
+	preempt_disable();
+	if (pgtable_cache_size > PGT_CACHE_HIGH) {
+		do {
+			if (pgd_quicklist)
+				free_pgd_slow(get_pgd_fast());
+			if (pte_quicklist[0])
+				free_pte_slow(pte_alloc_one_fast(NULL, 0));
+			if (pte_quicklist[1])
+				free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10)));
+		} while (pgtable_cache_size > PGT_CACHE_LOW);
+	}
+	preempt_enable();
+}
+
+#ifdef CONFIG_DEBUG_DCFLUSH
+atomic_t dcpage_flushes = ATOMIC_INIT(0);
+#ifdef CONFIG_SMP
+atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
+#endif
+#endif
+
+__inline__ void flush_dcache_page_impl(struct page *page)
+{
+#ifdef CONFIG_DEBUG_DCFLUSH
+	atomic_inc(&dcpage_flushes);
+#endif
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+	__flush_dcache_page(page_address(page),
+			    ((tlb_type == spitfire) &&
+			     page_mapping(page) != NULL));
+#else
+	if (page_mapping(page) != NULL &&
+	    tlb_type == spitfire)
+		__flush_icache_page(__pa(page_address(page)));
+#endif
+}
+
+#define PG_dcache_dirty		PG_arch_1
+
+#define dcache_dirty_cpu(page) \
+	(((page)->flags >> 24) & (NR_CPUS - 1UL))
+
+static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
+{
+	unsigned long mask = this_cpu;
+	unsigned long non_cpu_bits = ~((NR_CPUS - 1UL) << 24UL);
+	mask = (mask << 24) | (1UL << PG_dcache_dirty);
+	__asm__ __volatile__("1:\n\t"
+			     "ldx	[%2], %%g7\n\t"
+			     "and	%%g7, %1, %%g1\n\t"
+			     "or	%%g1, %0, %%g1\n\t"
+			     "casx	[%2], %%g7, %%g1\n\t"
+			     "cmp	%%g7, %%g1\n\t"
+			     "bne,pn	%%xcc, 1b\n\t"
+			     " membar	#StoreLoad | #StoreStore"
+			     : /* no outputs */
+			     : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
+			     : "g1", "g7");
+}
+
+static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
+{
+	unsigned long mask = (1UL << PG_dcache_dirty);
+
+	__asm__ __volatile__("! test_and_clear_dcache_dirty\n"
+			     "1:\n\t"
+			     "ldx	[%2], %%g7\n\t"
+			     "srlx	%%g7, 24, %%g1\n\t"
+			     "and	%%g1, %3, %%g1\n\t"
+			     "cmp	%%g1, %0\n\t"
+			     "bne,pn	%%icc, 2f\n\t"
+			     " andn	%%g7, %1, %%g1\n\t"
+			     "casx	[%2], %%g7, %%g1\n\t"
+			     "cmp	%%g7, %%g1\n\t"
+			     "bne,pn	%%xcc, 1b\n\t"
+			     " membar	#StoreLoad | #StoreStore\n"
+			     "2:"
+			     : /* no outputs */
+			     : "r" (cpu), "r" (mask), "r" (&page->flags),
+			       "i" (NR_CPUS - 1UL)
+			     : "g1", "g7");
+}
+
+extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long address, pte_t pte, int code);
+
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+	struct page *page;
+	unsigned long pfn;
+	unsigned long pg_flags;
+
+	pfn = pte_pfn(pte);
+	if (pfn_valid(pfn) &&
+	    (page = pfn_to_page(pfn), page_mapping(page)) &&
+	    ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
+		int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));
+		int this_cpu = get_cpu();
+
+		/* This is just to optimize away some function calls
+		 * in the SMP case.
+		 */
+		if (cpu == this_cpu)
+			flush_dcache_page_impl(page);
+		else
+			smp_flush_dcache_page_impl(page, cpu);
+
+		clear_dcache_dirty_cpu(page, cpu);
+
+		put_cpu();
+	}
+
+	if (get_thread_fault_code())
+		__update_mmu_cache(CTX_NRBITS(vma->vm_mm->context),
+				   address, pte, get_thread_fault_code());
+}
+
+void flush_dcache_page(struct page *page)
+{
+	struct address_space *mapping = page_mapping(page);
+	int dirty = test_bit(PG_dcache_dirty, &page->flags);
+	int dirty_cpu = dcache_dirty_cpu(page);
+	int this_cpu = get_cpu();
+
+	if (mapping && !mapping_mapped(mapping)) {
+		if (dirty) {
+			if (dirty_cpu == this_cpu)
+				goto out;
+			smp_flush_dcache_page_impl(page, dirty_cpu);
+		}
+		set_dcache_dirty(page, this_cpu);
+	} else {
+		/* We could delay the flush for the !page_mapping
+		 * case too.  But that case is for exec env/arg
+		 * pages and those are %99 certainly going to get
+		 * faulted into the tlb (and thus flushed) anyways.
+		 */
+		flush_dcache_page_impl(page);
+	}
+
+out:
+	put_cpu();
+}
+
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+	/* Cheetah has coherent I-cache. */
+	if (tlb_type == spitfire) {
+		unsigned long kaddr;
+
+		for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)
+			__flush_icache_page(__get_phys(kaddr));
+	}
+}
+
+unsigned long page_to_pfn(struct page *page)
+{
+	return (unsigned long) ((page - mem_map) + pfn_base);
+}
+
+struct page *pfn_to_page(unsigned long pfn)
+{
+	return (mem_map + (pfn - pfn_base));
+}
+
+void show_mem(void)
+{
+	printk("Mem-info:\n");
+	show_free_areas();
+	printk("Free swap:       %6ldkB\n",
+	       nr_swap_pages << (PAGE_SHIFT-10));
+	printk("%ld pages of RAM\n", num_physpages);
+	printk("%d free pages\n", nr_free_pages());
+	printk("%d pages in page table cache\n",pgtable_cache_size);
+}
+
+void mmu_info(struct seq_file *m)
+{
+	if (tlb_type == cheetah)
+		seq_printf(m, "MMU Type\t: Cheetah\n");
+	else if (tlb_type == cheetah_plus)
+		seq_printf(m, "MMU Type\t: Cheetah+\n");
+	else if (tlb_type == spitfire)
+		seq_printf(m, "MMU Type\t: Spitfire\n");
+	else
+		seq_printf(m, "MMU Type\t: ???\n");
+
+#ifdef CONFIG_DEBUG_DCFLUSH
+	seq_printf(m, "DCPageFlushes\t: %d\n",
+		   atomic_read(&dcpage_flushes));
+#ifdef CONFIG_SMP
+	seq_printf(m, "DCPageFlushesXC\t: %d\n",
+		   atomic_read(&dcpage_flushes_xcall));
+#endif /* CONFIG_SMP */
+#endif /* CONFIG_DEBUG_DCFLUSH */
+}
+
+struct linux_prom_translation {
+	unsigned long virt;
+	unsigned long size;
+	unsigned long data;
+};
+
+extern unsigned long prom_boot_page;
+extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
+extern int prom_get_mmu_ihandle(void);
+extern void register_prom_callbacks(void);
+
+/* Exported for SMP bootup purposes. */
+unsigned long kern_locked_tte_data;
+
+void __init early_pgtable_allocfail(char *type)
+{
+	prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
+	prom_halt();
+}
+
+#define BASE_PAGE_SIZE 8192
+static pmd_t *prompmd;
+
+/*
+ * Translate PROM's mapping we capture at boot time into physical address.
+ * The second parameter is only set from prom_callback() invocations.
+ */
+unsigned long prom_virt_to_phys(unsigned long promva, int *error)
+{
+	pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff);
+	pte_t *ptep;
+	unsigned long base;
+
+	if (pmd_none(*pmdp)) {
+		if (error)
+			*error = 1;
+		return(0);
+	}
+	ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff);
+	if (!pte_present(*ptep)) {
+		if (error)
+			*error = 1;
+		return(0);
+	}
+	if (error) {
+		*error = 0;
+		return(pte_val(*ptep));
+	}
+	base = pte_val(*ptep) & _PAGE_PADDR;
+	return(base + (promva & (BASE_PAGE_SIZE - 1)));
+}
+
+static void inherit_prom_mappings(void)
+{
+	struct linux_prom_translation *trans;
+	unsigned long phys_page, tte_vaddr, tte_data;
+	void (*remap_func)(unsigned long, unsigned long, int);
+	pmd_t *pmdp;
+	pte_t *ptep;
+	int node, n, i, tsz;
+	extern unsigned int obp_iaddr_patch[2], obp_daddr_patch[2];
+
+	node = prom_finddevice("/virtual-memory");
+	n = prom_getproplen(node, "translations");
+	if (n == 0 || n == -1) {
+		prom_printf("Couldn't get translation property\n");
+		prom_halt();
+	}
+	n += 5 * sizeof(struct linux_prom_translation);
+	for (tsz = 1; tsz < n; tsz <<= 1)
+		/* empty */;
+	trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, bootmap_base);
+	if (trans == NULL) {
+		prom_printf("inherit_prom_mappings: Cannot alloc translations.\n");
+		prom_halt();
+	}
+	memset(trans, 0, tsz);
+
+	if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {
+		prom_printf("Couldn't get translation property\n");
+		prom_halt();
+	}
+	n = n / sizeof(*trans);
+
+	/*
+	 * The obp translations are saved based on 8k pagesize, since obp can
+	 * use a mixture of pagesizes. Misses to the 0xf0000000 - 0x100000000,
+	 * ie obp range, are handled in entry.S and do not use the vpte scheme
+	 * (see rant in inherit_locked_prom_mappings()).
+	 */
+#define OBP_PMD_SIZE 2048
+	prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, bootmap_base);
+	if (prompmd == NULL)
+		early_pgtable_allocfail("pmd");
+	memset(prompmd, 0, OBP_PMD_SIZE);
+	for (i = 0; i < n; i++) {
+		unsigned long vaddr;
+
+		if (trans[i].virt >= LOW_OBP_ADDRESS && trans[i].virt < HI_OBP_ADDRESS) {
+			for (vaddr = trans[i].virt;
+			     ((vaddr < trans[i].virt + trans[i].size) && 
+			     (vaddr < HI_OBP_ADDRESS));
+			     vaddr += BASE_PAGE_SIZE) {
+				unsigned long val;
+
+				pmdp = prompmd + ((vaddr >> 23) & 0x7ff);
+				if (pmd_none(*pmdp)) {
+					ptep = __alloc_bootmem(BASE_PAGE_SIZE,
+							       BASE_PAGE_SIZE,
+							       bootmap_base);
+					if (ptep == NULL)
+						early_pgtable_allocfail("pte");
+					memset(ptep, 0, BASE_PAGE_SIZE);
+					pmd_set(pmdp, ptep);
+				}
+				ptep = (pte_t *)__pmd_page(*pmdp) +
+						((vaddr >> 13) & 0x3ff);
+
+				val = trans[i].data;
+
+				/* Clear diag TTE bits. */
+				if (tlb_type == spitfire)
+					val &= ~0x0003fe0000000000UL;
+
+				set_pte_at(&init_mm, vaddr,
+					   ptep, __pte(val | _PAGE_MODIFIED));
+				trans[i].data += BASE_PAGE_SIZE;
+			}
+		}
+	}
+	phys_page = __pa(prompmd);
+	obp_iaddr_patch[0] |= (phys_page >> 10);
+	obp_iaddr_patch[1] |= (phys_page & 0x3ff);
+	flushi((long)&obp_iaddr_patch[0]);
+	obp_daddr_patch[0] |= (phys_page >> 10);
+	obp_daddr_patch[1] |= (phys_page & 0x3ff);
+	flushi((long)&obp_daddr_patch[0]);
+
+	/* Now fixup OBP's idea about where we really are mapped. */
+	prom_printf("Remapping the kernel... ");
+
+	/* Spitfire Errata #32 workaround */
+	/* NOTE: Using plain zero for the context value is
+	 *       correct here, we are not using the Linux trap
+	 *       tables yet so we should not use the special
+	 *       UltraSPARC-III+ page size encodings yet.
+	 */
+	__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
+			     "flush	%%g6"
+			     : /* No outputs */
+			     : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+	switch (tlb_type) {
+	default:
+	case spitfire:
+		phys_page = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
+		break;
+
+	case cheetah:
+	case cheetah_plus:
+		phys_page = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
+		break;
+	};
+
+	phys_page &= _PAGE_PADDR;
+	phys_page += ((unsigned long)&prom_boot_page -
+		      (unsigned long)KERNBASE);
+
+	if (tlb_type == spitfire) {
+		/* Lock this into i/d tlb entry 59 */
+		__asm__ __volatile__(
+			"stxa	%%g0, [%2] %3\n\t"
+			"stxa	%0, [%1] %4\n\t"
+			"membar	#Sync\n\t"
+			"flush	%%g6\n\t"
+			"stxa	%%g0, [%2] %5\n\t"
+			"stxa	%0, [%1] %6\n\t"
+			"membar	#Sync\n\t"
+			"flush	%%g6"
+			: : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
+				 _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
+			"r" (59 << 3), "r" (TLB_TAG_ACCESS),
+			"i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
+			"i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
+			: "memory");
+	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+		/* Lock this into i/d tlb-0 entry 11 */
+		__asm__ __volatile__(
+			"stxa	%%g0, [%2] %3\n\t"
+			"stxa	%0, [%1] %4\n\t"
+			"membar	#Sync\n\t"
+			"flush	%%g6\n\t"
+			"stxa	%%g0, [%2] %5\n\t"
+			"stxa	%0, [%1] %6\n\t"
+			"membar	#Sync\n\t"
+			"flush	%%g6"
+			: : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
+				 _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
+			"r" ((0 << 16) | (11 << 3)), "r" (TLB_TAG_ACCESS),
+			"i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
+			"i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
+			: "memory");
+	} else {
+		/* Implement me :-) */
+		BUG();
+	}
+
+	tte_vaddr = (unsigned long) KERNBASE;
+
+	/* Spitfire Errata #32 workaround */
+	/* NOTE: Using plain zero for the context value is
+	 *       correct here, we are not using the Linux trap
+	 *       tables yet so we should not use the special
+	 *       UltraSPARC-III+ page size encodings yet.
+	 */
+	__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
+			     "flush	%%g6"
+			     : /* No outputs */
+			     : "r" (0),
+			     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+	if (tlb_type == spitfire)
+		tte_data = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
+	else
+		tte_data = cheetah_get_ldtlb_data(sparc64_highest_locked_tlbent());
+
+	kern_locked_tte_data = tte_data;
+
+	remap_func = (void *)  ((unsigned long) &prom_remap -
+				(unsigned long) &prom_boot_page);
+
+
+	/* Spitfire Errata #32 workaround */
+	/* NOTE: Using plain zero for the context value is
+	 *       correct here, we are not using the Linux trap
+	 *       tables yet so we should not use the special
+	 *       UltraSPARC-III+ page size encodings yet.
+	 */
+	__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
+			     "flush	%%g6"
+			     : /* No outputs */
+			     : "r" (0),
+			     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+	remap_func((tlb_type == spitfire ?
+		    (spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR) :
+		    (cheetah_get_litlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR)),
+		   (unsigned long) KERNBASE,
+		   prom_get_mmu_ihandle());
+
+	if (bigkernel)
+		remap_func(((tte_data + 0x400000) & _PAGE_PADDR),
+			(unsigned long) KERNBASE + 0x400000, prom_get_mmu_ihandle());
+
+	/* Flush out that temporary mapping. */
+	spitfire_flush_dtlb_nucleus_page(0x0);
+	spitfire_flush_itlb_nucleus_page(0x0);
+
+	/* Now lock us back into the TLBs via OBP. */
+	prom_dtlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
+	prom_itlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
+	if (bigkernel) {
+		prom_dtlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000, 
+								tte_vaddr + 0x400000);
+		prom_itlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000, 
+								tte_vaddr + 0x400000);
+	}
+
+	/* Re-read translations property. */
+	if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {
+		prom_printf("Couldn't get translation property\n");
+		prom_halt();
+	}
+	n = n / sizeof(*trans);
+
+	for (i = 0; i < n; i++) {
+		unsigned long vaddr = trans[i].virt;
+		unsigned long size = trans[i].size;
+
+		if (vaddr < 0xf0000000UL) {
+			unsigned long avoid_start = (unsigned long) KERNBASE;
+			unsigned long avoid_end = avoid_start + (4 * 1024 * 1024);
+
+			if (bigkernel)
+				avoid_end += (4 * 1024 * 1024);
+			if (vaddr < avoid_start) {
+				unsigned long top = vaddr + size;
+
+				if (top > avoid_start)
+					top = avoid_start;
+				prom_unmap(top - vaddr, vaddr);
+			}
+			if ((vaddr + size) > avoid_end) {
+				unsigned long bottom = vaddr;
+
+				if (bottom < avoid_end)
+					bottom = avoid_end;
+				prom_unmap((vaddr + size) - bottom, bottom);
+			}
+		}
+	}
+
+	prom_printf("done.\n");
+
+	register_prom_callbacks();
+}
+
+/* The OBP specifications for sun4u mark 0xfffffffc00000000 and
+ * upwards as reserved for use by the firmware (I wonder if this
+ * will be the same on Cheetah...).  We use this virtual address
+ * range for the VPTE table mappings of the nucleus so we need
+ * to zap them when we enter the PROM.  -DaveM
+ */
+static void __flush_nucleus_vptes(void)
+{
+	unsigned long prom_reserved_base = 0xfffffffc00000000UL;
+	int i;
+
+	/* Only DTLB must be checked for VPTE entries. */
+	if (tlb_type == spitfire) {
+		for (i = 0; i < 63; i++) {
+			unsigned long tag;
+
+			/* Spitfire Errata #32 workaround */
+			/* NOTE: Always runs on spitfire, so no cheetah+
+			 *       page size encodings.
+			 */
+			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
+					     "flush	%%g6"
+					     : /* No outputs */
+					     : "r" (0),
+					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+			tag = spitfire_get_dtlb_tag(i);
+			if (((tag & ~(PAGE_MASK)) == 0) &&
+			    ((tag &  (PAGE_MASK)) >= prom_reserved_base)) {
+				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+						     "membar #Sync"
+						     : /* no outputs */
+						     : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+				spitfire_put_dtlb_data(i, 0x0UL);
+			}
+		}
+	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+		for (i = 0; i < 512; i++) {
+			unsigned long tag = cheetah_get_dtlb_tag(i, 2);
+
+			if ((tag & ~PAGE_MASK) == 0 &&
+			    (tag & PAGE_MASK) >= prom_reserved_base) {
+				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+						     "membar #Sync"
+						     : /* no outputs */
+						     : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+				cheetah_put_dtlb_data(i, 0x0UL, 2);
+			}
+
+			if (tlb_type != cheetah_plus)
+				continue;
+
+			tag = cheetah_get_dtlb_tag(i, 3);
+
+			if ((tag & ~PAGE_MASK) == 0 &&
+			    (tag & PAGE_MASK) >= prom_reserved_base) {
+				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+						     "membar #Sync"
+						     : /* no outputs */
+						     : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+				cheetah_put_dtlb_data(i, 0x0UL, 3);
+			}
+		}
+	} else {
+		/* Implement me :-) */
+		BUG();
+	}
+}
+
+static int prom_ditlb_set;
+struct prom_tlb_entry {
+	int		tlb_ent;
+	unsigned long	tlb_tag;
+	unsigned long	tlb_data;
+};
+struct prom_tlb_entry prom_itlb[16], prom_dtlb[16];
+
+void prom_world(int enter)
+{
+	unsigned long pstate;
+	int i;
+
+	if (!enter)
+		set_fs((mm_segment_t) { get_thread_current_ds() });
+
+	if (!prom_ditlb_set)
+		return;
+
+	/* Make sure the following runs atomically. */
+	__asm__ __volatile__("flushw\n\t"
+			     "rdpr	%%pstate, %0\n\t"
+			     "wrpr	%0, %1, %%pstate"
+			     : "=r" (pstate)
+			     : "i" (PSTATE_IE));
+
+	if (enter) {
+		/* Kick out nucleus VPTEs. */
+		__flush_nucleus_vptes();
+
+		/* Install PROM world. */
+		for (i = 0; i < 16; i++) {
+			if (prom_dtlb[i].tlb_ent != -1) {
+				__asm__ __volatile__("stxa %0, [%1] %2\n\t"
+						     "membar #Sync"
+					: : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
+					"i" (ASI_DMMU));
+				if (tlb_type == spitfire)
+					spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
+							       prom_dtlb[i].tlb_data);
+				else if (tlb_type == cheetah || tlb_type == cheetah_plus)
+					cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
+							       prom_dtlb[i].tlb_data);
+			}
+			if (prom_itlb[i].tlb_ent != -1) {
+				__asm__ __volatile__("stxa %0, [%1] %2\n\t"
+						     "membar #Sync"
+						     : : "r" (prom_itlb[i].tlb_tag),
+						     "r" (TLB_TAG_ACCESS),
+						     "i" (ASI_IMMU));
+				if (tlb_type == spitfire)
+					spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
+							       prom_itlb[i].tlb_data);
+				else if (tlb_type == cheetah || tlb_type == cheetah_plus)
+					cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
+							       prom_itlb[i].tlb_data);
+			}
+		}
+	} else {
+		for (i = 0; i < 16; i++) {
+			if (prom_dtlb[i].tlb_ent != -1) {
+				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+						     "membar #Sync"
+					: : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+				if (tlb_type == spitfire)
+					spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
+				else
+					cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
+			}
+			if (prom_itlb[i].tlb_ent != -1) {
+				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+						     "membar #Sync"
+						     : : "r" (TLB_TAG_ACCESS),
+						     "i" (ASI_IMMU));
+				if (tlb_type == spitfire)
+					spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL);
+				else
+					cheetah_put_litlb_data(prom_itlb[i].tlb_ent, 0x0UL);
+			}
+		}
+	}
+	__asm__ __volatile__("wrpr	%0, 0, %%pstate"
+			     : : "r" (pstate));
+}
+
+void inherit_locked_prom_mappings(int save_p)
+{
+	int i;
+	int dtlb_seen = 0;
+	int itlb_seen = 0;
+
+	/* Fucking losing PROM has more mappings in the TLB, but
+	 * it (conveniently) fails to mention any of these in the
+	 * translations property.  The only ones that matter are
+	 * the locked PROM tlb entries, so we impose the following
+	 * irrecovable rule on the PROM, it is allowed 8 locked
+	 * entries in the ITLB and 8 in the DTLB.
+	 *
+	 * Supposedly the upper 16GB of the address space is
+	 * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED
+	 * SOMEWHERE!!!!!!!!!!!!!!!!!  Furthermore the entire interface
+	 * used between the client program and the firmware on sun5
+	 * systems to coordinate mmu mappings is also COMPLETELY
+	 * UNDOCUMENTED!!!!!! Thanks S(t)un!
+	 */
+	if (save_p) {
+		for (i = 0; i < 16; i++) {
+			prom_itlb[i].tlb_ent = -1;
+			prom_dtlb[i].tlb_ent = -1;
+		}
+	}
+	if (tlb_type == spitfire) {
+		int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel;
+		for (i = 0; i < high; i++) {
+			unsigned long data;
+
+			/* Spitfire Errata #32 workaround */
+			/* NOTE: Always runs on spitfire, so no cheetah+
+			 *       page size encodings.
+			 */
+			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
+					     "flush	%%g6"
+					     : /* No outputs */
+					     : "r" (0),
+					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+			data = spitfire_get_dtlb_data(i);
+			if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
+				unsigned long tag;
+
+				/* Spitfire Errata #32 workaround */
+				/* NOTE: Always runs on spitfire, so no
+				 *       cheetah+ page size encodings.
+				 */
+				__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
+						     "flush	%%g6"
+						     : /* No outputs */
+						     : "r" (0),
+						     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+				tag = spitfire_get_dtlb_tag(i);
+				if (save_p) {
+					prom_dtlb[dtlb_seen].tlb_ent = i;
+					prom_dtlb[dtlb_seen].tlb_tag = tag;
+					prom_dtlb[dtlb_seen].tlb_data = data;
+				}
+				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+						     "membar #Sync"
+						     : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+				spitfire_put_dtlb_data(i, 0x0UL);
+
+				dtlb_seen++;
+				if (dtlb_seen > 15)
+					break;
+			}
+		}
+
+		for (i = 0; i < high; i++) {
+			unsigned long data;
+
+			/* Spitfire Errata #32 workaround */
+			/* NOTE: Always runs on spitfire, so no
+			 *       cheetah+ page size encodings.
+			 */
+			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
+					     "flush	%%g6"
+					     : /* No outputs */
+					     : "r" (0),
+					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+			data = spitfire_get_itlb_data(i);
+			if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
+				unsigned long tag;
+
+				/* Spitfire Errata #32 workaround */
+				/* NOTE: Always runs on spitfire, so no
+				 *       cheetah+ page size encodings.
+				 */
+				__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
+						     "flush	%%g6"
+						     : /* No outputs */
+						     : "r" (0),
+						     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+				tag = spitfire_get_itlb_tag(i);
+				if (save_p) {
+					prom_itlb[itlb_seen].tlb_ent = i;
+					prom_itlb[itlb_seen].tlb_tag = tag;
+					prom_itlb[itlb_seen].tlb_data = data;
+				}
+				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+						     "membar #Sync"
+						     : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
+				spitfire_put_itlb_data(i, 0x0UL);
+
+				itlb_seen++;
+				if (itlb_seen > 15)
+					break;
+			}
+		}
+	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+		int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel;
+
+		for (i = 0; i < high; i++) {
+			unsigned long data;
+
+			data = cheetah_get_ldtlb_data(i);
+			if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
+				unsigned long tag;
+
+				tag = cheetah_get_ldtlb_tag(i);
+				if (save_p) {
+					prom_dtlb[dtlb_seen].tlb_ent = i;
+					prom_dtlb[dtlb_seen].tlb_tag = tag;
+					prom_dtlb[dtlb_seen].tlb_data = data;
+				}
+				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+						     "membar #Sync"
+						     : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+				cheetah_put_ldtlb_data(i, 0x0UL);
+
+				dtlb_seen++;
+				if (dtlb_seen > 15)
+					break;
+			}
+		}
+
+		for (i = 0; i < high; i++) {
+			unsigned long data;
+
+			data = cheetah_get_litlb_data(i);
+			if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
+				unsigned long tag;
+
+				tag = cheetah_get_litlb_tag(i);
+				if (save_p) {
+					prom_itlb[itlb_seen].tlb_ent = i;
+					prom_itlb[itlb_seen].tlb_tag = tag;
+					prom_itlb[itlb_seen].tlb_data = data;
+				}
+				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+						     "membar #Sync"
+						     : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
+				cheetah_put_litlb_data(i, 0x0UL);
+
+				itlb_seen++;
+				if (itlb_seen > 15)
+					break;
+			}
+		}
+	} else {
+		/* Implement me :-) */
+		BUG();
+	}
+	if (save_p)
+		prom_ditlb_set = 1;
+}
+
+/* Give PROM back his world, done during reboots... */
+void prom_reload_locked(void)
+{
+	int i;
+
+	for (i = 0; i < 16; i++) {
+		if (prom_dtlb[i].tlb_ent != -1) {
+			__asm__ __volatile__("stxa %0, [%1] %2\n\t"
+					     "membar #Sync"
+				: : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
+				"i" (ASI_DMMU));
+			if (tlb_type == spitfire)
+				spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
+						       prom_dtlb[i].tlb_data);
+			else if (tlb_type == cheetah || tlb_type == cheetah_plus)
+				cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
+						      prom_dtlb[i].tlb_data);
+		}
+
+		if (prom_itlb[i].tlb_ent != -1) {
+			__asm__ __volatile__("stxa %0, [%1] %2\n\t"
+					     "membar #Sync"
+					     : : "r" (prom_itlb[i].tlb_tag),
+					     "r" (TLB_TAG_ACCESS),
+					     "i" (ASI_IMMU));
+			if (tlb_type == spitfire)
+				spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
+						       prom_itlb[i].tlb_data);
+			else
+				cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
+						       prom_itlb[i].tlb_data);
+		}
+	}
+}
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+void __flush_dcache_range(unsigned long start, unsigned long end)
+{
+	unsigned long va;
+
+	if (tlb_type == spitfire) {
+		int n = 0;
+
+		for (va = start; va < end; va += 32) {
+			spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
+			if (++n >= 512)
+				break;
+		}
+	} else {
+		start = __pa(start);
+		end = __pa(end);
+		for (va = start; va < end; va += 32)
+			__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+					     "membar #Sync"
+					     : /* no outputs */
+					     : "r" (va),
+					       "i" (ASI_DCACHE_INVALIDATE));
+	}
+}
+#endif /* DCACHE_ALIASING_POSSIBLE */
+
+/* If not locked, zap it. */
+void __flush_tlb_all(void)
+{
+	unsigned long pstate;
+	int i;
+
+	__asm__ __volatile__("flushw\n\t"
+			     "rdpr	%%pstate, %0\n\t"
+			     "wrpr	%0, %1, %%pstate"
+			     : "=r" (pstate)
+			     : "i" (PSTATE_IE));
+	if (tlb_type == spitfire) {
+		for (i = 0; i < 64; i++) {
+			/* Spitfire Errata #32 workaround */
+			/* NOTE: Always runs on spitfire, so no
+			 *       cheetah+ page size encodings.
+			 */
+			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
+					     "flush	%%g6"
+					     : /* No outputs */
+					     : "r" (0),
+					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+			if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
+				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+						     "membar #Sync"
+						     : /* no outputs */
+						     : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+				spitfire_put_dtlb_data(i, 0x0UL);
+			}
+
+			/* Spitfire Errata #32 workaround */
+			/* NOTE: Always runs on spitfire, so no
+			 *       cheetah+ page size encodings.
+			 */
+			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
+					     "flush	%%g6"
+					     : /* No outputs */
+					     : "r" (0),
+					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+			if (!(spitfire_get_itlb_data(i) & _PAGE_L)) {
+				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+						     "membar #Sync"
+						     : /* no outputs */
+						     : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
+				spitfire_put_itlb_data(i, 0x0UL);
+			}
+		}
+	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+		cheetah_flush_dtlb_all();
+		cheetah_flush_itlb_all();
+	}
+	__asm__ __volatile__("wrpr	%0, 0, %%pstate"
+			     : : "r" (pstate));
+}
+
+/* Caller does TLB context flushing on local CPU if necessary.
+ * The caller also ensures that CTX_VALID(mm->context) is false.
+ *
+ * We must be careful about boundary cases so that we never
+ * let the user have CTX 0 (nucleus) or we ever use a CTX
+ * version of zero (and thus NO_CONTEXT would not be caught
+ * by version mis-match tests in mmu_context.h).
+ */
+void get_new_mmu_context(struct mm_struct *mm)
+{
+	unsigned long ctx, new_ctx;
+	unsigned long orig_pgsz_bits;
+	
+
+	spin_lock(&ctx_alloc_lock);
+	orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
+	ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
+	new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
+	if (new_ctx >= (1 << CTX_NR_BITS)) {
+		new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
+		if (new_ctx >= ctx) {
+			int i;
+			new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
+				CTX_FIRST_VERSION;
+			if (new_ctx == 1)
+				new_ctx = CTX_FIRST_VERSION;
+
+			/* Don't call memset, for 16 entries that's just
+			 * plain silly...
+			 */
+			mmu_context_bmap[0] = 3;
+			mmu_context_bmap[1] = 0;
+			mmu_context_bmap[2] = 0;
+			mmu_context_bmap[3] = 0;
+			for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
+				mmu_context_bmap[i + 0] = 0;
+				mmu_context_bmap[i + 1] = 0;
+				mmu_context_bmap[i + 2] = 0;
+				mmu_context_bmap[i + 3] = 0;
+			}
+			goto out;
+		}
+	}
+	mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
+	new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
+out:
+	tlb_context_cache = new_ctx;
+	mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
+	spin_unlock(&ctx_alloc_lock);
+}
+
+#ifndef CONFIG_SMP
+struct pgtable_cache_struct pgt_quicklists;
+#endif
+
+/* OK, we have to color these pages. The page tables are accessed
+ * by non-Dcache enabled mapping in the VPTE area by the dtlb_backend.S
+ * code, as well as by PAGE_OFFSET range direct-mapped addresses by 
+ * other parts of the kernel. By coloring, we make sure that the tlbmiss 
+ * fast handlers do not get data from old/garbage dcache lines that 
+ * correspond to an old/stale virtual address (user/kernel) that 
+ * previously mapped the pagetable page while accessing vpte range 
+ * addresses. The idea is that if the vpte color and PAGE_OFFSET range 
+ * color is the same, then when the kernel initializes the pagetable 
+ * using the later address range, accesses with the first address
+ * range will see the newly initialized data rather than the garbage.
+ */
+#ifdef DCACHE_ALIASING_POSSIBLE
+#define DC_ALIAS_SHIFT	1
+#else
+#define DC_ALIAS_SHIFT	0
+#endif
+pte_t *__pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+{
+	struct page *page;
+	unsigned long color;
+
+	{
+		pte_t *ptep = pte_alloc_one_fast(mm, address);
+
+		if (ptep)
+			return ptep;
+	}
+
+	color = VPTE_COLOR(address);
+	page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, DC_ALIAS_SHIFT);
+	if (page) {
+		unsigned long *to_free;
+		unsigned long paddr;
+		pte_t *pte;
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+		set_page_count(page, 1);
+		ClearPageCompound(page);
+
+		set_page_count((page + 1), 1);
+		ClearPageCompound(page + 1);
+#endif
+		paddr = (unsigned long) page_address(page);
+		memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT));
+
+		if (!color) {
+			pte = (pte_t *) paddr;
+			to_free = (unsigned long *) (paddr + PAGE_SIZE);
+		} else {
+			pte = (pte_t *) (paddr + PAGE_SIZE);
+			to_free = (unsigned long *) paddr;
+		}
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+		/* Now free the other one up, adjust cache size. */
+		preempt_disable();
+		*to_free = (unsigned long) pte_quicklist[color ^ 0x1];
+		pte_quicklist[color ^ 0x1] = to_free;
+		pgtable_cache_size++;
+		preempt_enable();
+#endif
+
+		return pte;
+	}
+	return NULL;
+}
+
+void sparc_ultra_dump_itlb(void)
+{
+        int slot;
+
+	if (tlb_type == spitfire) {
+		printk ("Contents of itlb: ");
+		for (slot = 0; slot < 14; slot++) printk ("    ");
+		printk ("%2x:%016lx,%016lx\n",
+			0,
+			spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
+		for (slot = 1; slot < 64; slot+=3) {
+			printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", 
+				slot,
+				spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
+				slot+1,
+				spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
+				slot+2,
+				spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
+		}
+	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+		printk ("Contents of itlb0:\n");
+		for (slot = 0; slot < 16; slot+=2) {
+			printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+				slot,
+				cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),
+				slot+1,
+				cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));
+		}
+		printk ("Contents of itlb2:\n");
+		for (slot = 0; slot < 128; slot+=2) {
+			printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+				slot,
+				cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),
+				slot+1,
+				cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));
+		}
+	}
+}
+
+void sparc_ultra_dump_dtlb(void)
+{
+        int slot;
+
+	if (tlb_type == spitfire) {
+		printk ("Contents of dtlb: ");
+		for (slot = 0; slot < 14; slot++) printk ("    ");
+		printk ("%2x:%016lx,%016lx\n", 0,
+			spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
+		for (slot = 1; slot < 64; slot+=3) {
+			printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", 
+				slot,
+				spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
+				slot+1,
+				spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
+				slot+2,
+				spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
+		}
+	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+		printk ("Contents of dtlb0:\n");
+		for (slot = 0; slot < 16; slot+=2) {
+			printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+				slot,
+				cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),
+				slot+1,
+				cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));
+		}
+		printk ("Contents of dtlb2:\n");
+		for (slot = 0; slot < 512; slot+=2) {
+			printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+				slot,
+				cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2),
+				slot+1,
+				cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2));
+		}
+		if (tlb_type == cheetah_plus) {
+			printk ("Contents of dtlb3:\n");
+			for (slot = 0; slot < 512; slot+=2) {
+				printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+					slot,
+					cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3),
+					slot+1,
+					cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3));
+			}
+		}
+	}
+}
+
+extern unsigned long cmdline_memory_size;
+
+unsigned long __init bootmem_init(unsigned long *pages_avail)
+{
+	unsigned long bootmap_size, start_pfn, end_pfn;
+	unsigned long end_of_phys_memory = 0UL;
+	unsigned long bootmap_pfn, bytes_avail, size;
+	int i;
+
+#ifdef CONFIG_DEBUG_BOOTMEM
+	prom_printf("bootmem_init: Scan sp_banks, ");
+#endif
+
+	bytes_avail = 0UL;
+	for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+		end_of_phys_memory = sp_banks[i].base_addr +
+			sp_banks[i].num_bytes;
+		bytes_avail += sp_banks[i].num_bytes;
+		if (cmdline_memory_size) {
+			if (bytes_avail > cmdline_memory_size) {
+				unsigned long slack = bytes_avail - cmdline_memory_size;
+
+				bytes_avail -= slack;
+				end_of_phys_memory -= slack;
+
+				sp_banks[i].num_bytes -= slack;
+				if (sp_banks[i].num_bytes == 0) {
+					sp_banks[i].base_addr = 0xdeadbeef;
+				} else {
+					sp_banks[i+1].num_bytes = 0;
+					sp_banks[i+1].base_addr = 0xdeadbeef;
+				}
+				break;
+			}
+		}
+	}
+
+	*pages_avail = bytes_avail >> PAGE_SHIFT;
+
+	/* Start with page aligned address of last symbol in kernel
+	 * image.  The kernel is hard mapped below PAGE_OFFSET in a
+	 * 4MB locked TLB translation.
+	 */
+	start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT;
+
+	bootmap_pfn = start_pfn;
+
+	end_pfn = end_of_phys_memory >> PAGE_SHIFT;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+	/* Now have to check initial ramdisk, so that bootmap does not overwrite it */
+	if (sparc_ramdisk_image || sparc_ramdisk_image64) {
+		unsigned long ramdisk_image = sparc_ramdisk_image ?
+			sparc_ramdisk_image : sparc_ramdisk_image64;
+		if (ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE)
+			ramdisk_image -= KERNBASE;
+		initrd_start = ramdisk_image + phys_base;
+		initrd_end = initrd_start + sparc_ramdisk_size;
+		if (initrd_end > end_of_phys_memory) {
+			printk(KERN_CRIT "initrd extends beyond end of memory "
+		                 	 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
+			       initrd_end, end_of_phys_memory);
+			initrd_start = 0;
+		}
+		if (initrd_start) {
+			if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
+			    initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
+				bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
+		}
+	}
+#endif	
+	/* Initialize the boot-time allocator. */
+	max_pfn = max_low_pfn = end_pfn;
+	min_low_pfn = pfn_base;
+
+#ifdef CONFIG_DEBUG_BOOTMEM
+	prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
+		    min_low_pfn, bootmap_pfn, max_low_pfn);
+#endif
+	bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
+
+	bootmap_base = bootmap_pfn << PAGE_SHIFT;
+
+	/* Now register the available physical memory with the
+	 * allocator.
+	 */
+	for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+#ifdef CONFIG_DEBUG_BOOTMEM
+		prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n",
+			    i, sp_banks[i].base_addr, sp_banks[i].num_bytes);
+#endif
+		free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes);
+	}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+	if (initrd_start) {
+		size = initrd_end - initrd_start;
+
+		/* Resert the initrd image area. */
+#ifdef CONFIG_DEBUG_BOOTMEM
+		prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
+			initrd_start, initrd_end);
+#endif
+		reserve_bootmem(initrd_start, size);
+		*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+		initrd_start += PAGE_OFFSET;
+		initrd_end += PAGE_OFFSET;
+	}
+#endif
+	/* Reserve the kernel text/data/bss. */
+#ifdef CONFIG_DEBUG_BOOTMEM
+	prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size);
+#endif
+	reserve_bootmem(kern_base, kern_size);
+	*pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
+
+	/* Reserve the bootmem map.   We do not account for it
+	 * in pages_avail because we will release that memory
+	 * in free_all_bootmem.
+	 */
+	size = bootmap_size;
+#ifdef CONFIG_DEBUG_BOOTMEM
+	prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
+		    (bootmap_pfn << PAGE_SHIFT), size);
+#endif
+	reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
+	*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+	return end_pfn;
+}
+
+/* paging_init() sets up the page tables */
+
+extern void cheetah_ecache_flush_init(void);
+
+static unsigned long last_valid_pfn;
+
+void __init paging_init(void)
+{
+	extern pmd_t swapper_pmd_dir[1024];
+	extern unsigned int sparc64_vpte_patchme1[1];
+	extern unsigned int sparc64_vpte_patchme2[1];
+	unsigned long alias_base = kern_base + PAGE_OFFSET;
+	unsigned long second_alias_page = 0;
+	unsigned long pt, flags, end_pfn, pages_avail;
+	unsigned long shift = alias_base - ((unsigned long)KERNBASE);
+	unsigned long real_end;
+
+	set_bit(0, mmu_context_bmap);
+
+	real_end = (unsigned long)_end;
+	if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
+		bigkernel = 1;
+#ifdef CONFIG_BLK_DEV_INITRD
+	if (sparc_ramdisk_image || sparc_ramdisk_image64)
+		real_end = (PAGE_ALIGN(real_end) + PAGE_ALIGN(sparc_ramdisk_size));
+#endif
+
+	/* We assume physical memory starts at some 4mb multiple,
+	 * if this were not true we wouldn't boot up to this point
+	 * anyways.
+	 */
+	pt  = kern_base | _PAGE_VALID | _PAGE_SZ4MB;
+	pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W;
+	local_irq_save(flags);
+	if (tlb_type == spitfire) {
+		__asm__ __volatile__(
+	"	stxa	%1, [%0] %3\n"
+	"	stxa	%2, [%5] %4\n"
+	"	membar	#Sync\n"
+	"	flush	%%g6\n"
+	"	nop\n"
+	"	nop\n"
+	"	nop\n"
+		: /* No outputs */
+		: "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
+		  "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)
+		: "memory");
+		if (real_end >= KERNBASE + 0x340000) {
+			second_alias_page = alias_base + 0x400000;
+			__asm__ __volatile__(
+		"	stxa	%1, [%0] %3\n"
+		"	stxa	%2, [%5] %4\n"
+		"	membar	#Sync\n"
+		"	flush	%%g6\n"
+		"	nop\n"
+		"	nop\n"
+		"	nop\n"
+			: /* No outputs */
+			: "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
+			  "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3)
+			: "memory");
+		}
+	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+		__asm__ __volatile__(
+	"	stxa	%1, [%0] %3\n"
+	"	stxa	%2, [%5] %4\n"
+	"	membar	#Sync\n"
+	"	flush	%%g6\n"
+	"	nop\n"
+	"	nop\n"
+	"	nop\n"
+		: /* No outputs */
+		: "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
+		  "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (13<<3))
+		: "memory");
+		if (real_end >= KERNBASE + 0x340000) {
+			second_alias_page = alias_base + 0x400000;
+			__asm__ __volatile__(
+		"	stxa	%1, [%0] %3\n"
+		"	stxa	%2, [%5] %4\n"
+		"	membar	#Sync\n"
+		"	flush	%%g6\n"
+		"	nop\n"
+		"	nop\n"
+		"	nop\n"
+			: /* No outputs */
+			: "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
+			  "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (12<<3))
+			: "memory");
+		}
+	}
+	local_irq_restore(flags);
+	
+	/* Now set kernel pgd to upper alias so physical page computations
+	 * work.
+	 */
+	init_mm.pgd += ((shift) / (sizeof(pgd_t)));
+	
+	memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir));
+
+	/* Now can init the kernel/bad page tables. */
+	pud_set(pud_offset(&swapper_pg_dir[0], 0),
+		swapper_pmd_dir + (shift / sizeof(pgd_t)));
+	
+	sparc64_vpte_patchme1[0] |=
+		(((unsigned long)pgd_val(init_mm.pgd[0])) >> 10);
+	sparc64_vpte_patchme2[0] |=
+		(((unsigned long)pgd_val(init_mm.pgd[0])) & 0x3ff);
+	flushi((long)&sparc64_vpte_patchme1[0]);
+	
+	/* Setup bootmem... */
+	pages_avail = 0;
+	last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
+
+	/* Inherit non-locked OBP mappings. */
+	inherit_prom_mappings();
+	
+	/* Ok, we can use our TLB miss and window trap handlers safely.
+	 * We need to do a quick peek here to see if we are on StarFire
+	 * or not, so setup_tba can setup the IRQ globals correctly (it
+	 * needs to get the hard smp processor id correctly).
+	 */
+	{
+		extern void setup_tba(int);
+		setup_tba(this_is_starfire);
+	}
+
+	inherit_locked_prom_mappings(1);
+
+	/* We only created DTLB mapping of this stuff. */
+	spitfire_flush_dtlb_nucleus_page(alias_base);
+	if (second_alias_page)
+		spitfire_flush_dtlb_nucleus_page(second_alias_page);
+
+	__flush_tlb_all();
+
+	{
+		unsigned long zones_size[MAX_NR_ZONES];
+		unsigned long zholes_size[MAX_NR_ZONES];
+		unsigned long npages;
+		int znum;
+
+		for (znum = 0; znum < MAX_NR_ZONES; znum++)
+			zones_size[znum] = zholes_size[znum] = 0;
+
+		npages = end_pfn - pfn_base;
+		zones_size[ZONE_DMA] = npages;
+		zholes_size[ZONE_DMA] = npages - pages_avail;
+
+		free_area_init_node(0, &contig_page_data, zones_size,
+				    phys_base >> PAGE_SHIFT, zholes_size);
+	}
+
+	device_scan();
+}
+
+/* Ok, it seems that the prom can allocate some more memory chunks
+ * as a side effect of some prom calls we perform during the
+ * boot sequence.  My most likely theory is that it is from the
+ * prom_set_traptable() call, and OBP is allocating a scratchpad
+ * for saving client program register state etc.
+ */
+static void __init sort_memlist(struct linux_mlist_p1275 *thislist)
+{
+	int swapi = 0;
+	int i, mitr;
+	unsigned long tmpaddr, tmpsize;
+	unsigned long lowest;
+
+	for (i = 0; thislist[i].theres_more != 0; i++) {
+		lowest = thislist[i].start_adr;
+		for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
+			if (thislist[mitr].start_adr < lowest) {
+				lowest = thislist[mitr].start_adr;
+				swapi = mitr;
+			}
+		if (lowest == thislist[i].start_adr)
+			continue;
+		tmpaddr = thislist[swapi].start_adr;
+		tmpsize = thislist[swapi].num_bytes;
+		for (mitr = swapi; mitr > i; mitr--) {
+			thislist[mitr].start_adr = thislist[mitr-1].start_adr;
+			thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
+		}
+		thislist[i].start_adr = tmpaddr;
+		thislist[i].num_bytes = tmpsize;
+	}
+}
+
+void __init rescan_sp_banks(void)
+{
+	struct linux_prom64_registers memlist[64];
+	struct linux_mlist_p1275 avail[64], *mlist;
+	unsigned long bytes, base_paddr;
+	int num_regs, node = prom_finddevice("/memory");
+	int i;
+
+	num_regs = prom_getproperty(node, "available",
+				    (char *) memlist, sizeof(memlist));
+	num_regs = (num_regs / sizeof(struct linux_prom64_registers));
+	for (i = 0; i < num_regs; i++) {
+		avail[i].start_adr = memlist[i].phys_addr;
+		avail[i].num_bytes = memlist[i].reg_size;
+		avail[i].theres_more = &avail[i + 1];
+	}
+	avail[i - 1].theres_more = NULL;
+	sort_memlist(avail);
+
+	mlist = &avail[0];
+	i = 0;
+	bytes = mlist->num_bytes;
+	base_paddr = mlist->start_adr;
+  
+	sp_banks[0].base_addr = base_paddr;
+	sp_banks[0].num_bytes = bytes;
+
+	while (mlist->theres_more != NULL){
+		i++;
+		mlist = mlist->theres_more;
+		bytes = mlist->num_bytes;
+		if (i >= SPARC_PHYS_BANKS-1) {
+			printk ("The machine has more banks than "
+				"this kernel can support\n"
+				"Increase the SPARC_PHYS_BANKS "
+				"setting (currently %d)\n",
+				SPARC_PHYS_BANKS);
+			i = SPARC_PHYS_BANKS-1;
+			break;
+		}
+    
+		sp_banks[i].base_addr = mlist->start_adr;
+		sp_banks[i].num_bytes = mlist->num_bytes;
+	}
+
+	i++;
+	sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
+	sp_banks[i].num_bytes = 0;
+
+	for (i = 0; sp_banks[i].num_bytes != 0; i++)
+		sp_banks[i].num_bytes &= PAGE_MASK;
+}
+
+static void __init taint_real_pages(void)
+{
+	struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS];
+	int i;
+
+	for (i = 0; i < SPARC_PHYS_BANKS; i++) {
+		saved_sp_banks[i].base_addr =
+			sp_banks[i].base_addr;
+		saved_sp_banks[i].num_bytes =
+			sp_banks[i].num_bytes;
+	}
+
+	rescan_sp_banks();
+
+	/* Find changes discovered in the sp_bank rescan and
+	 * reserve the lost portions in the bootmem maps.
+	 */
+	for (i = 0; saved_sp_banks[i].num_bytes; i++) {
+		unsigned long old_start, old_end;
+
+		old_start = saved_sp_banks[i].base_addr;
+		old_end = old_start +
+			saved_sp_banks[i].num_bytes;
+		while (old_start < old_end) {
+			int n;
+
+			for (n = 0; sp_banks[n].num_bytes; n++) {
+				unsigned long new_start, new_end;
+
+				new_start = sp_banks[n].base_addr;
+				new_end = new_start + sp_banks[n].num_bytes;
+
+				if (new_start <= old_start &&
+				    new_end >= (old_start + PAGE_SIZE)) {
+					set_bit (old_start >> 22,
+						 sparc64_valid_addr_bitmap);
+					goto do_next_page;
+				}
+			}
+			reserve_bootmem(old_start, PAGE_SIZE);
+
+		do_next_page:
+			old_start += PAGE_SIZE;
+		}
+	}
+}
+
+void __init mem_init(void)
+{
+	unsigned long codepages, datapages, initpages;
+	unsigned long addr, last;
+	int i;
+
+	i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
+	i += 1;
+	sparc64_valid_addr_bitmap = (unsigned long *)
+		__alloc_bootmem(i << 3, SMP_CACHE_BYTES, bootmap_base);
+	if (sparc64_valid_addr_bitmap == NULL) {
+		prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
+		prom_halt();
+	}
+	memset(sparc64_valid_addr_bitmap, 0, i << 3);
+
+	addr = PAGE_OFFSET + kern_base;
+	last = PAGE_ALIGN(kern_size) + addr;
+	while (addr < last) {
+		set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
+		addr += PAGE_SIZE;
+	}
+
+	taint_real_pages();
+
+	max_mapnr = last_valid_pfn - pfn_base;
+	high_memory = __va(last_valid_pfn << PAGE_SHIFT);
+
+#ifdef CONFIG_DEBUG_BOOTMEM
+	prom_printf("mem_init: Calling free_all_bootmem().\n");
+#endif
+	totalram_pages = num_physpages = free_all_bootmem() - 1;
+
+	/*
+	 * Set up the zero page, mark it reserved, so that page count
+	 * is not manipulated when freeing the page from user ptes.
+	 */
+	mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
+	if (mem_map_zero == NULL) {
+		prom_printf("paging_init: Cannot alloc zero page.\n");
+		prom_halt();
+	}
+	SetPageReserved(mem_map_zero);
+
+	codepages = (((unsigned long) _etext) - ((unsigned long) _start));
+	codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
+	datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
+	datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
+	initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
+	initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
+
+	printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
+	       nr_free_pages() << (PAGE_SHIFT-10),
+	       codepages << (PAGE_SHIFT-10),
+	       datapages << (PAGE_SHIFT-10), 
+	       initpages << (PAGE_SHIFT-10), 
+	       PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
+
+	if (tlb_type == cheetah || tlb_type == cheetah_plus)
+		cheetah_ecache_flush_init();
+}
+
+void free_initmem (void)
+{
+	unsigned long addr, initend;
+
+	/*
+	 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
+	 */
+	addr = PAGE_ALIGN((unsigned long)(__init_begin));
+	initend = (unsigned long)(__init_end) & PAGE_MASK;
+	for (; addr < initend; addr += PAGE_SIZE) {
+		unsigned long page;
+		struct page *p;
+
+		page = (addr +
+			((unsigned long) __va(kern_base)) -
+			((unsigned long) KERNBASE));
+		memset((void *)addr, 0xcc, PAGE_SIZE);
+		p = virt_to_page(page);
+
+		ClearPageReserved(p);
+		set_page_count(p, 1);
+		__free_page(p);
+		num_physpages++;
+		totalram_pages++;
+	}
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+	if (start < end)
+		printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+	for (; start < end; start += PAGE_SIZE) {
+		struct page *p = virt_to_page(start);
+
+		ClearPageReserved(p);
+		set_page_count(p, 1);
+		__free_page(p);
+		num_physpages++;
+		totalram_pages++;
+	}
+}
+#endif
diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c
new file mode 100644
index 0000000..90ca99d
--- /dev/null
+++ b/arch/sparc64/mm/tlb.c
@@ -0,0 +1,151 @@
+/* arch/sparc64/mm/tlb.c
+ *
+ * Copyright (C) 2004 David S. Miller <davem@redhat.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+#include <asm/tlb.h>
+
+/* Heavily inspired by the ppc64 code.  */
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) =
+	{ NULL, 0, 0, 0, 0, 0, { 0 }, { NULL }, };
+
+void flush_tlb_pending(void)
+{
+	struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
+
+	if (mp->tlb_nr) {
+		if (CTX_VALID(mp->mm->context)) {
+#ifdef CONFIG_SMP
+			smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
+					      &mp->vaddrs[0]);
+#else
+			__flush_tlb_pending(CTX_HWBITS(mp->mm->context),
+					    mp->tlb_nr, &mp->vaddrs[0]);
+#endif
+		}
+		mp->tlb_nr = 0;
+	}
+}
+
+void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
+{
+	struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
+	unsigned long nr;
+
+	vaddr &= PAGE_MASK;
+	if (pte_exec(orig))
+		vaddr |= 0x1UL;
+
+	if (pte_dirty(orig)) {
+		unsigned long paddr, pfn = pte_pfn(orig);
+		struct address_space *mapping;
+		struct page *page;
+
+		if (!pfn_valid(pfn))
+			goto no_cache_flush;
+
+		page = pfn_to_page(pfn);
+		if (PageReserved(page))
+			goto no_cache_flush;
+
+		/* A real file page? */
+		mapping = page_mapping(page);
+		if (!mapping)
+			goto no_cache_flush;
+
+		paddr = (unsigned long) page_address(page);
+		if ((paddr ^ vaddr) & (1 << 13))
+			flush_dcache_page_all(mm, page);
+	}
+
+no_cache_flush:
+
+	if (mp->tlb_frozen)
+		return;
+
+	nr = mp->tlb_nr;
+
+	if (unlikely(nr != 0 && mm != mp->mm)) {
+		flush_tlb_pending();
+		nr = 0;
+	}
+
+	if (nr == 0)
+		mp->mm = mm;
+
+	mp->vaddrs[nr] = vaddr;
+	mp->tlb_nr = ++nr;
+	if (nr >= TLB_BATCH_NR)
+		flush_tlb_pending();
+}
+
+void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+	struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
+	unsigned long nr = mp->tlb_nr;
+	long s = start, e = end, vpte_base;
+
+	if (mp->tlb_frozen)
+		return;
+
+	/* If start is greater than end, that is a real problem.  */
+	BUG_ON(start > end);
+
+	/* However, straddling the VA space hole is quite normal. */
+	s &= PMD_MASK;
+	e = (e + PMD_SIZE - 1) & PMD_MASK;
+
+	vpte_base = (tlb_type == spitfire ?
+		     VPTE_BASE_SPITFIRE :
+		     VPTE_BASE_CHEETAH);
+
+	if (unlikely(nr != 0 && mm != mp->mm)) {
+		flush_tlb_pending();
+		nr = 0;
+	}
+
+	if (nr == 0)
+		mp->mm = mm;
+
+	start = vpte_base + (s >> (PAGE_SHIFT - 3));
+	end = vpte_base + (e >> (PAGE_SHIFT - 3));
+
+	/* If the request straddles the VA space hole, we
+	 * need to swap start and end.  The reason this
+	 * occurs is that "vpte_base" is the center of
+	 * the linear page table mapping area.  Thus,
+	 * high addresses with the sign bit set map to
+	 * addresses below vpte_base and non-sign bit
+	 * addresses map to addresses above vpte_base.
+	 */
+	if (end < start) {
+		unsigned long tmp = start;
+
+		start = end;
+		end = tmp;
+	}
+
+	while (start < end) {
+		mp->vaddrs[nr] = start;
+		mp->tlb_nr = ++nr;
+		if (nr >= TLB_BATCH_NR) {
+			flush_tlb_pending();
+			nr = 0;
+		}
+		start += PAGE_SIZE;
+	}
+	if (nr)
+		flush_tlb_pending();
+}
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
new file mode 100644
index 0000000..7a09343
--- /dev/null
+++ b/arch/sparc64/mm/ultra.S
@@ -0,0 +1,583 @@
+/* $Id: ultra.S,v 1.72 2002/02/09 19:49:31 davem Exp $
+ * ultra.S: Don't expand these all over the place...
+ *
+ * Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/config.h>
+#include <asm/asi.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/spitfire.h>
+#include <asm/mmu_context.h>
+#include <asm/pil.h>
+#include <asm/head.h>
+#include <asm/thread_info.h>
+#include <asm/cacheflush.h>
+
+	/* Basically, most of the Spitfire vs. Cheetah madness
+	 * has to do with the fact that Cheetah does not support
+	 * IMMU flushes out of the secondary context.  Someone needs
+	 * to throw a south lake birthday party for the folks
+	 * in Microelectronics who refused to fix this shit.
+	 */
+
+	/* This file is meant to be read efficiently by the CPU, not humans.
+	 * Staraj sie tego nikomu nie pierdolnac...
+	 */
+	.text
+	.align		32
+	.globl		__flush_tlb_mm
+__flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
+	ldxa		[%o1] ASI_DMMU, %g2
+	cmp		%g2, %o0
+	bne,pn		%icc, __spitfire_flush_tlb_mm_slow
+	 mov		0x50, %g3
+	stxa		%g0, [%g3] ASI_DMMU_DEMAP
+	stxa		%g0, [%g3] ASI_IMMU_DEMAP
+	retl
+	 flush		%g6
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	.align		32
+	.globl		__flush_tlb_pending
+__flush_tlb_pending:
+	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
+	rdpr		%pstate, %g7
+	sllx		%o1, 3, %o1
+	andn		%g7, PSTATE_IE, %g2
+	wrpr		%g2, %pstate
+	mov		SECONDARY_CONTEXT, %o4
+	ldxa		[%o4] ASI_DMMU, %g2
+	stxa		%o0, [%o4] ASI_DMMU
+1:	sub		%o1, (1 << 3), %o1
+	ldx		[%o2 + %o1], %o3
+	andcc		%o3, 1, %g0
+	andn		%o3, 1, %o3
+	be,pn		%icc, 2f
+	 or		%o3, 0x10, %o3
+	stxa		%g0, [%o3] ASI_IMMU_DEMAP
+2:	stxa		%g0, [%o3] ASI_DMMU_DEMAP
+	membar		#Sync
+	brnz,pt		%o1, 1b
+	 nop
+	stxa		%g2, [%o4] ASI_DMMU
+	flush		%g6
+	retl
+	 wrpr		%g7, 0x0, %pstate
+
+	.align		32
+	.globl		__flush_tlb_kernel_range
+__flush_tlb_kernel_range:	/* %o0=start, %o1=end */
+	cmp		%o0, %o1
+	be,pn		%xcc, 2f
+	 sethi		%hi(PAGE_SIZE), %o4
+	sub		%o1, %o0, %o3
+	sub		%o3, %o4, %o3
+	or		%o0, 0x20, %o0		! Nucleus
+1:	stxa		%g0, [%o0 + %o3] ASI_DMMU_DEMAP
+	stxa		%g0, [%o0 + %o3] ASI_IMMU_DEMAP
+	membar		#Sync
+	brnz,pt		%o3, 1b
+	 sub		%o3, %o4, %o3
+2:	retl
+	 flush		%g6
+
+__spitfire_flush_tlb_mm_slow:
+	rdpr		%pstate, %g1
+	wrpr		%g1, PSTATE_IE, %pstate
+	stxa		%o0, [%o1] ASI_DMMU
+	stxa		%g0, [%g3] ASI_DMMU_DEMAP
+	stxa		%g0, [%g3] ASI_IMMU_DEMAP
+	flush		%g6
+	stxa		%g2, [%o1] ASI_DMMU
+	flush		%g6
+	retl
+	 wrpr		%g1, 0, %pstate
+
+/*
+ * The following code flushes one page_size worth.
+ */
+#if (PAGE_SHIFT == 13)
+#define ITAG_MASK 0xfe
+#elif (PAGE_SHIFT == 16)
+#define ITAG_MASK 0x7fe
+#else
+#error unsupported PAGE_SIZE
+#endif
+	.align		32
+	.globl		__flush_icache_page
+__flush_icache_page:	/* %o0 = phys_page */
+	membar		#StoreStore
+	srlx		%o0, PAGE_SHIFT, %o0
+	sethi		%uhi(PAGE_OFFSET), %g1
+	sllx		%o0, PAGE_SHIFT, %o0
+	sethi		%hi(PAGE_SIZE), %g2
+	sllx		%g1, 32, %g1
+	add		%o0, %g1, %o0
+1:	subcc		%g2, 32, %g2
+	bne,pt		%icc, 1b
+	 flush		%o0 + %g2
+	retl
+	 nop
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+
+#if (PAGE_SHIFT != 13)
+#error only page shift of 13 is supported by dcache flush
+#endif
+
+#define DTAG_MASK 0x3
+
+	.align		64
+	.globl		__flush_dcache_page
+__flush_dcache_page:	/* %o0=kaddr, %o1=flush_icache */
+	sethi		%uhi(PAGE_OFFSET), %g1
+	sllx		%g1, 32, %g1
+	sub		%o0, %g1, %o0
+	clr		%o4
+	srlx		%o0, 11, %o0
+	sethi		%hi(1 << 14), %o2
+1:	ldxa		[%o4] ASI_DCACHE_TAG, %o3	! LSU	Group
+	add		%o4, (1 << 5), %o4		! IEU0
+	ldxa		[%o4] ASI_DCACHE_TAG, %g1	! LSU	Group
+	add		%o4, (1 << 5), %o4		! IEU0
+	ldxa		[%o4] ASI_DCACHE_TAG, %g2	! LSU	Group	o3 available
+	add		%o4, (1 << 5), %o4		! IEU0
+	andn		%o3, DTAG_MASK, %o3		! IEU1
+	ldxa		[%o4] ASI_DCACHE_TAG, %g3	! LSU	Group
+	add		%o4, (1 << 5), %o4		! IEU0
+	andn		%g1, DTAG_MASK, %g1		! IEU1
+	cmp		%o0, %o3			! IEU1	Group
+	be,a,pn		%xcc, dflush1			! CTI
+	 sub		%o4, (4 << 5), %o4		! IEU0	(Group)
+	cmp		%o0, %g1			! IEU1	Group
+	andn		%g2, DTAG_MASK, %g2		! IEU0
+	be,a,pn		%xcc, dflush2			! CTI
+	 sub		%o4, (3 << 5), %o4		! IEU0	(Group)
+	cmp		%o0, %g2			! IEU1	Group
+	andn		%g3, DTAG_MASK, %g3		! IEU0
+	be,a,pn		%xcc, dflush3			! CTI
+	 sub		%o4, (2 << 5), %o4		! IEU0	(Group)
+	cmp		%o0, %g3			! IEU1	Group
+	be,a,pn		%xcc, dflush4			! CTI
+	 sub		%o4, (1 << 5), %o4		! IEU0
+2:	cmp		%o4, %o2			! IEU1	Group
+	bne,pt		%xcc, 1b			! CTI
+	 nop						! IEU0
+
+	/* The I-cache does not snoop local stores so we
+	 * better flush that too when necessary.
+	 */
+	brnz,pt		%o1, __flush_icache_page
+	 sllx		%o0, 11, %o0
+	retl
+	 nop
+
+dflush1:stxa		%g0, [%o4] ASI_DCACHE_TAG
+	add		%o4, (1 << 5), %o4
+dflush2:stxa		%g0, [%o4] ASI_DCACHE_TAG
+	add		%o4, (1 << 5), %o4
+dflush3:stxa		%g0, [%o4] ASI_DCACHE_TAG
+	add		%o4, (1 << 5), %o4
+dflush4:stxa		%g0, [%o4] ASI_DCACHE_TAG
+	add		%o4, (1 << 5), %o4
+	membar		#Sync
+	ba,pt		%xcc, 2b
+	 nop
+#endif /* DCACHE_ALIASING_POSSIBLE */
+
+	.align		32
+__prefill_dtlb:
+	rdpr		%pstate, %g7
+	wrpr		%g7, PSTATE_IE, %pstate
+	mov		TLB_TAG_ACCESS, %g1
+	stxa		%o5, [%g1] ASI_DMMU
+	stxa		%o2, [%g0] ASI_DTLB_DATA_IN
+	flush		%g6
+	retl
+	 wrpr		%g7, %pstate
+__prefill_itlb:
+	rdpr		%pstate, %g7
+	wrpr		%g7, PSTATE_IE, %pstate
+	mov		TLB_TAG_ACCESS, %g1
+	stxa		%o5, [%g1] ASI_IMMU
+	stxa		%o2, [%g0] ASI_ITLB_DATA_IN
+	flush		%g6
+	retl
+	 wrpr		%g7, %pstate
+
+	.globl		__update_mmu_cache
+__update_mmu_cache:	/* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */
+	srlx		%o1, PAGE_SHIFT, %o1
+	andcc		%o3, FAULT_CODE_DTLB, %g0
+	sllx		%o1, PAGE_SHIFT, %o5
+	bne,pt		%xcc, __prefill_dtlb
+	 or		%o5, %o0, %o5
+	ba,a,pt		%xcc, __prefill_itlb
+
+	/* Cheetah specific versions, patched at boot time.
+	 *
+	 * This writes of the PRIMARY_CONTEXT register in this file are
+	 * safe even on Cheetah+ and later wrt. the page size fields.
+	 * The nucleus page size fields do not matter because we make
+	 * no data references, and these instructions execute out of a
+	 * locked I-TLB entry sitting in the fully assosciative I-TLB.
+	 * This sequence should also never trap.
+	 */
+__cheetah_flush_tlb_mm: /* 15 insns */
+	rdpr		%pstate, %g7
+	andn		%g7, PSTATE_IE, %g2
+	wrpr		%g2, 0x0, %pstate
+	wrpr		%g0, 1, %tl
+	mov		PRIMARY_CONTEXT, %o2
+	mov		0x40, %g3
+	ldxa		[%o2] ASI_DMMU, %g2
+	stxa		%o0, [%o2] ASI_DMMU
+	stxa		%g0, [%g3] ASI_DMMU_DEMAP
+	stxa		%g0, [%g3] ASI_IMMU_DEMAP
+	stxa		%g2, [%o2] ASI_DMMU
+	flush		%g6
+	wrpr		%g0, 0, %tl
+	retl
+	 wrpr		%g7, 0x0, %pstate
+
+__cheetah_flush_tlb_pending:	/* 22 insns */
+	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
+	rdpr		%pstate, %g7
+	sllx		%o1, 3, %o1
+	andn		%g7, PSTATE_IE, %g2
+	wrpr		%g2, 0x0, %pstate
+	wrpr		%g0, 1, %tl
+	mov		PRIMARY_CONTEXT, %o4
+	ldxa		[%o4] ASI_DMMU, %g2
+	stxa		%o0, [%o4] ASI_DMMU
+1:	sub		%o1, (1 << 3), %o1
+	ldx		[%o2 + %o1], %o3
+	andcc		%o3, 1, %g0
+	be,pn		%icc, 2f
+	 andn		%o3, 1, %o3
+	stxa		%g0, [%o3] ASI_IMMU_DEMAP
+2:	stxa		%g0, [%o3] ASI_DMMU_DEMAP	
+	brnz,pt		%o1, 1b
+	 membar		#Sync
+	stxa		%g2, [%o4] ASI_DMMU
+	flush		%g6
+	wrpr		%g0, 0, %tl
+	retl
+	 wrpr		%g7, 0x0, %pstate
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+flush_dcpage_cheetah: /* 11 insns */
+	sethi		%uhi(PAGE_OFFSET), %g1
+	sllx		%g1, 32, %g1
+	sub		%o0, %g1, %o0
+	sethi		%hi(PAGE_SIZE), %o4
+1:	subcc		%o4, (1 << 5), %o4
+	stxa		%g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
+	membar		#Sync
+	bne,pt		%icc, 1b
+	 nop
+	retl		/* I-cache flush never needed on Cheetah, see callers. */
+	 nop
+#endif /* DCACHE_ALIASING_POSSIBLE */
+
+cheetah_patch_one:
+1:	lduw		[%o1], %g1
+	stw		%g1, [%o0]
+	flush		%o0
+	subcc		%o2, 1, %o2
+	add		%o1, 4, %o1
+	bne,pt		%icc, 1b
+	 add		%o0, 4, %o0
+	retl
+	 nop
+
+	.globl		cheetah_patch_cachetlbops
+cheetah_patch_cachetlbops:
+	save		%sp, -128, %sp
+
+	sethi		%hi(__flush_tlb_mm), %o0
+	or		%o0, %lo(__flush_tlb_mm), %o0
+	sethi		%hi(__cheetah_flush_tlb_mm), %o1
+	or		%o1, %lo(__cheetah_flush_tlb_mm), %o1
+	call		cheetah_patch_one
+	 mov		15, %o2
+
+	sethi		%hi(__flush_tlb_pending), %o0
+	or		%o0, %lo(__flush_tlb_pending), %o0
+	sethi		%hi(__cheetah_flush_tlb_pending), %o1
+	or		%o1, %lo(__cheetah_flush_tlb_pending), %o1
+	call		cheetah_patch_one
+	 mov		22, %o2
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+	sethi		%hi(__flush_dcache_page), %o0
+	or		%o0, %lo(__flush_dcache_page), %o0
+	sethi		%hi(flush_dcpage_cheetah), %o1
+	or		%o1, %lo(flush_dcpage_cheetah), %o1
+	call		cheetah_patch_one
+	 mov		11, %o2
+#endif /* DCACHE_ALIASING_POSSIBLE */
+
+	ret
+	 restore
+
+#ifdef CONFIG_SMP
+	/* These are all called by the slaves of a cross call, at
+	 * trap level 1, with interrupts fully disabled.
+	 *
+	 * Register usage:
+	 *   %g5	mm->context	(all tlb flushes)
+	 *   %g1	address arg 1	(tlb page and range flushes)
+	 *   %g7	address arg 2	(tlb range flush only)
+	 *
+	 *   %g6	ivector table, don't touch
+	 *   %g2	scratch 1
+	 *   %g3	scratch 2
+	 *   %g4	scratch 3
+	 *
+	 * TODO: Make xcall TLB range flushes use the tricks above... -DaveM
+	 */
+	.align		32
+	.globl		xcall_flush_tlb_mm
+xcall_flush_tlb_mm:
+	mov		PRIMARY_CONTEXT, %g2
+	mov		0x40, %g4
+	ldxa		[%g2] ASI_DMMU, %g3
+	stxa		%g5, [%g2] ASI_DMMU
+	stxa		%g0, [%g4] ASI_DMMU_DEMAP
+	stxa		%g0, [%g4] ASI_IMMU_DEMAP
+	stxa		%g3, [%g2] ASI_DMMU
+	retry
+
+	.globl		xcall_flush_tlb_pending
+xcall_flush_tlb_pending:
+	/* %g5=context, %g1=nr, %g7=vaddrs[] */
+	sllx		%g1, 3, %g1
+	mov		PRIMARY_CONTEXT, %g4
+	ldxa		[%g4] ASI_DMMU, %g2
+	stxa		%g5, [%g4] ASI_DMMU
+1:	sub		%g1, (1 << 3), %g1
+	ldx		[%g7 + %g1], %g5
+	andcc		%g5, 0x1, %g0
+	be,pn		%icc, 2f
+
+	 andn		%g5, 0x1, %g5
+	stxa		%g0, [%g5] ASI_IMMU_DEMAP
+2:	stxa		%g0, [%g5] ASI_DMMU_DEMAP
+	membar		#Sync
+	brnz,pt		%g1, 1b
+	 nop
+	stxa		%g2, [%g4] ASI_DMMU
+	retry
+
+	.globl		xcall_flush_tlb_kernel_range
+xcall_flush_tlb_kernel_range:
+	sethi		%hi(PAGE_SIZE - 1), %g2
+	or		%g2, %lo(PAGE_SIZE - 1), %g2
+	andn		%g1, %g2, %g1
+	andn		%g7, %g2, %g7
+	sub		%g7, %g1, %g3
+	add		%g2, 1, %g2
+	sub		%g3, %g2, %g3
+	or		%g1, 0x20, %g1		! Nucleus
+1:	stxa		%g0, [%g1 + %g3] ASI_DMMU_DEMAP
+	stxa		%g0, [%g1 + %g3] ASI_IMMU_DEMAP
+	membar		#Sync
+	brnz,pt		%g3, 1b
+	 sub		%g3, %g2, %g3
+	retry
+	nop
+	nop
+
+	/* This runs in a very controlled environment, so we do
+	 * not need to worry about BH races etc.
+	 */
+	.globl		xcall_sync_tick
+xcall_sync_tick:
+	rdpr		%pstate, %g2
+	wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate
+	rdpr		%pil, %g2
+	wrpr		%g0, 15, %pil
+	sethi		%hi(109f), %g7
+	b,pt		%xcc, etrap_irq
+109:	 or		%g7, %lo(109b), %g7
+	call		smp_synchronize_tick_client
+	 nop
+	clr		%l6
+	b		rtrap_xcall
+	 ldx		[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+
+	/* NOTE: This is SPECIAL!!  We do etrap/rtrap however
+	 *       we choose to deal with the "BH's run with
+	 *       %pil==15" problem (described in asm/pil.h)
+	 *       by just invoking rtrap directly past where
+	 *       BH's are checked for.
+	 *
+	 *       We do it like this because we do not want %pil==15
+	 *       lockups to prevent regs being reported.
+	 */
+	.globl		xcall_report_regs
+xcall_report_regs:
+	rdpr		%pstate, %g2
+	wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate
+	rdpr		%pil, %g2
+	wrpr		%g0, 15, %pil
+	sethi		%hi(109f), %g7
+	b,pt		%xcc, etrap_irq
+109:	 or		%g7, %lo(109b), %g7
+	call		__show_regs
+	 add		%sp, PTREGS_OFF, %o0
+	clr		%l6
+	/* Has to be a non-v9 branch due to the large distance. */
+	b		rtrap_xcall
+	 ldx		[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+	.align		32
+	.globl		xcall_flush_dcache_page_cheetah
+xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
+	sethi		%hi(PAGE_SIZE), %g3
+1:	subcc		%g3, (1 << 5), %g3
+	stxa		%g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
+	membar		#Sync
+	bne,pt		%icc, 1b
+	 nop
+	retry
+	nop
+#endif /* DCACHE_ALIASING_POSSIBLE */
+
+	.globl		xcall_flush_dcache_page_spitfire
+xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
+				     %g7 == kernel page virtual address
+				     %g5 == (page->mapping != NULL)  */
+#ifdef DCACHE_ALIASING_POSSIBLE
+	srlx		%g1, (13 - 2), %g1	! Form tag comparitor
+	sethi		%hi(L1DCACHE_SIZE), %g3	! D$ size == 16K
+	sub		%g3, (1 << 5), %g3	! D$ linesize == 32
+1:	ldxa		[%g3] ASI_DCACHE_TAG, %g2
+	andcc		%g2, 0x3, %g0
+	be,pn		%xcc, 2f
+	 andn		%g2, 0x3, %g2
+	cmp		%g2, %g1
+
+	bne,pt		%xcc, 2f
+	 nop
+	stxa		%g0, [%g3] ASI_DCACHE_TAG
+	membar		#Sync
+2:	cmp		%g3, 0
+	bne,pt		%xcc, 1b
+	 sub		%g3, (1 << 5), %g3
+
+	brz,pn		%g5, 2f
+#endif /* DCACHE_ALIASING_POSSIBLE */
+	 sethi		%hi(PAGE_SIZE), %g3
+
+1:	flush		%g7
+	subcc		%g3, (1 << 5), %g3
+	bne,pt		%icc, 1b
+	 add		%g7, (1 << 5), %g7
+
+2:	retry
+	nop
+	nop
+
+	.globl		xcall_promstop
+xcall_promstop:
+	rdpr		%pstate, %g2
+	wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate
+	rdpr		%pil, %g2
+	wrpr		%g0, 15, %pil
+	sethi		%hi(109f), %g7
+	b,pt		%xcc, etrap_irq
+109:	 or		%g7, %lo(109b), %g7
+	flushw
+	call		prom_stopself
+	 nop
+	/* We should not return, just spin if we do... */
+1:	b,a,pt		%xcc, 1b
+	nop
+
+	.data
+
+errata32_hwbug:
+	.xword	0
+
+	.text
+
+	/* These two are not performance critical... */
+	.globl		xcall_flush_tlb_all_spitfire
+xcall_flush_tlb_all_spitfire:
+	/* Spitfire Errata #32 workaround. */
+	sethi		%hi(errata32_hwbug), %g4
+	stx		%g0, [%g4 + %lo(errata32_hwbug)]
+
+	clr		%g2
+	clr		%g3
+1:	ldxa		[%g3] ASI_DTLB_DATA_ACCESS, %g4
+	and		%g4, _PAGE_L, %g5
+	brnz,pn		%g5, 2f
+	 mov		TLB_TAG_ACCESS, %g7
+
+	stxa		%g0, [%g7] ASI_DMMU
+	membar		#Sync
+	stxa		%g0, [%g3] ASI_DTLB_DATA_ACCESS
+	membar		#Sync
+
+	/* Spitfire Errata #32 workaround. */
+	sethi		%hi(errata32_hwbug), %g4
+	stx		%g0, [%g4 + %lo(errata32_hwbug)]
+
+2:	ldxa		[%g3] ASI_ITLB_DATA_ACCESS, %g4
+	and		%g4, _PAGE_L, %g5
+	brnz,pn		%g5, 2f
+	 mov		TLB_TAG_ACCESS, %g7
+
+	stxa		%g0, [%g7] ASI_IMMU
+	membar		#Sync
+	stxa		%g0, [%g3] ASI_ITLB_DATA_ACCESS
+	membar		#Sync
+
+	/* Spitfire Errata #32 workaround. */
+	sethi		%hi(errata32_hwbug), %g4
+	stx		%g0, [%g4 + %lo(errata32_hwbug)]
+
+2:	add		%g2, 1, %g2
+	cmp		%g2, SPITFIRE_HIGHEST_LOCKED_TLBENT
+	ble,pt		%icc, 1b
+	 sll		%g2, 3, %g3
+	flush		%g6
+	retry
+
+	.globl		xcall_flush_tlb_all_cheetah
+xcall_flush_tlb_all_cheetah:
+	mov		0x80, %g2
+	stxa		%g0, [%g2] ASI_DMMU_DEMAP
+	stxa		%g0, [%g2] ASI_IMMU_DEMAP
+	retry
+
+	/* These just get rescheduled to PIL vectors. */
+	.globl		xcall_call_function
+xcall_call_function:
+	wr		%g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
+	retry
+
+	.globl		xcall_receive_signal
+xcall_receive_signal:
+	wr		%g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
+	retry
+
+	.globl		xcall_capture
+xcall_capture:
+	wr		%g0, (1 << PIL_SMP_CAPTURE), %set_softint
+	retry
+
+#endif /* CONFIG_SMP */
diff --git a/arch/sparc64/oprofile/Kconfig b/arch/sparc64/oprofile/Kconfig
new file mode 100644
index 0000000..5ade198
--- /dev/null
+++ b/arch/sparc64/oprofile/Kconfig
@@ -0,0 +1,23 @@
+
+menu "Profiling support"
+	depends on EXPERIMENTAL
+
+config PROFILING
+	bool "Profiling support (EXPERIMENTAL)"
+	help
+	  Say Y here to enable the extended profiling support mechanisms used
+	  by profilers such as OProfile.
+	  
+
+config OPROFILE
+	tristate "OProfile system profiling (EXPERIMENTAL)"
+	depends on PROFILING
+	help
+	  OProfile is a profiling system capable of profiling the
+	  whole system, include the kernel, kernel modules, libraries,
+	  and applications.
+
+	  If unsure, say N.
+
+endmenu
+
diff --git a/arch/sparc64/oprofile/Makefile b/arch/sparc64/oprofile/Makefile
new file mode 100644
index 0000000..e9feca1
--- /dev/null
+++ b/arch/sparc64/oprofile/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_OPROFILE) += oprofile.o
+
+DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
+		oprof.o cpu_buffer.o buffer_sync.o \
+		event_buffer.o oprofile_files.o \
+		oprofilefs.o oprofile_stats.o \
+		timer_int.o )
+
+oprofile-y				:= $(DRIVER_OBJS) init.o
diff --git a/arch/sparc64/oprofile/init.c b/arch/sparc64/oprofile/init.c
new file mode 100644
index 0000000..9ab815b
--- /dev/null
+++ b/arch/sparc64/oprofile/init.c
@@ -0,0 +1,23 @@
+/**
+ * @file init.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/oprofile.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+ 
+int __init oprofile_arch_init(struct oprofile_operations * ops)
+{
+	return -ENODEV;
+}
+
+
+void oprofile_arch_exit(void)
+{
+}
diff --git a/arch/sparc64/prom/Makefile b/arch/sparc64/prom/Makefile
new file mode 100644
index 0000000..8f2420d
--- /dev/null
+++ b/arch/sparc64/prom/Makefile
@@ -0,0 +1,10 @@
+# $Id: Makefile,v 1.7 2000/12/14 22:57:25 davem Exp $
+# Makefile for the Sun Boot PROM interface library under
+# Linux.
+#
+
+EXTRA_AFLAGS := -ansi
+EXTRA_CFLAGS := -Werror
+
+lib-y   := bootstr.o devops.o init.o memory.o misc.o \
+	   tree.o console.o printf.o p1275.o map.o cif.o
diff --git a/arch/sparc64/prom/bootstr.c b/arch/sparc64/prom/bootstr.c
new file mode 100644
index 0000000..a727861
--- /dev/null
+++ b/arch/sparc64/prom/bootstr.c
@@ -0,0 +1,40 @@
+/* $Id: bootstr.c,v 1.6 1999/08/31 06:55:01 davem Exp $
+ * bootstr.c:  Boot string/argument acquisition from the PROM.
+ *
+ * Copyright(C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright(C) 1996,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/string.h>
+#include <linux/init.h>
+#include <asm/oplib.h>
+
+/* WARNING: The boot loader knows that these next three variables come one right
+ *          after another in the .data section.  Do not move this stuff into
+ *          the .bss section or it will break things.
+ */
+
+#define BARG_LEN  256
+struct {
+	int bootstr_len;
+	int bootstr_valid;
+	char bootstr_buf[BARG_LEN];
+} bootstr_info = {
+	.bootstr_len = BARG_LEN,
+#ifdef CONFIG_CMDLINE
+	.bootstr_valid = 1,
+	.bootstr_buf = CONFIG_CMDLINE,
+#endif
+};
+
+char * __init
+prom_getbootargs(void)
+{
+	/* This check saves us from a panic when bootfd patches args. */
+	if (bootstr_info.bootstr_valid)
+		return bootstr_info.bootstr_buf;
+	prom_getstring(prom_chosen_node, "bootargs",
+		       bootstr_info.bootstr_buf, BARG_LEN);
+	bootstr_info.bootstr_valid = 1;
+	return bootstr_info.bootstr_buf;
+}
diff --git a/arch/sparc64/prom/cif.S b/arch/sparc64/prom/cif.S
new file mode 100644
index 0000000..29d0ae7
--- /dev/null
+++ b/arch/sparc64/prom/cif.S
@@ -0,0 +1,225 @@
+/* cif.S: PROM entry/exit assembler trampolines.
+ *
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 2005 David S. Miller <davem@davemloft.net>
+ */
+
+#include <asm/pstate.h>
+
+	.text
+	.globl	prom_cif_interface
+prom_cif_interface:
+	sethi	%hi(p1275buf), %o0
+	or	%o0, %lo(p1275buf), %o0
+	ldx	[%o0 + 0x010], %o1	! prom_cif_stack
+	save	%o1, -0x190, %sp
+	ldx	[%i0 + 0x008], %l2	! prom_cif_handler
+	rdpr	%pstate, %l4
+	wrpr	%g0, 0x15, %pstate	! save alternate globals
+	stx	%g1, [%sp + 2047 + 0x0b0]
+	stx	%g2, [%sp + 2047 + 0x0b8]
+	stx	%g3, [%sp + 2047 + 0x0c0]
+	stx	%g4, [%sp + 2047 + 0x0c8]
+	stx	%g5, [%sp + 2047 + 0x0d0]
+	stx	%g6, [%sp + 2047 + 0x0d8]
+	stx	%g7, [%sp + 2047 + 0x0e0]
+	wrpr	%g0, 0x814, %pstate	! save interrupt globals
+	stx	%g1, [%sp + 2047 + 0x0e8]
+	stx	%g2, [%sp + 2047 + 0x0f0]
+	stx	%g3, [%sp + 2047 + 0x0f8]
+	stx	%g4, [%sp + 2047 + 0x100]
+	stx	%g5, [%sp + 2047 + 0x108]
+	stx	%g6, [%sp + 2047 + 0x110]
+	stx	%g7, [%sp + 2047 + 0x118]
+	wrpr	%g0, 0x14, %pstate	! save normal globals
+	stx	%g1, [%sp + 2047 + 0x120]
+	stx	%g2, [%sp + 2047 + 0x128]
+	stx	%g3, [%sp + 2047 + 0x130]
+	stx	%g4, [%sp + 2047 + 0x138]
+	stx	%g5, [%sp + 2047 + 0x140]
+	stx	%g6, [%sp + 2047 + 0x148]
+	stx	%g7, [%sp + 2047 + 0x150]
+	wrpr	%g0, 0x414, %pstate	! save mmu globals
+	stx	%g1, [%sp + 2047 + 0x158]
+	stx	%g2, [%sp + 2047 + 0x160]
+	stx	%g3, [%sp + 2047 + 0x168]
+	stx	%g4, [%sp + 2047 + 0x170]
+	stx	%g5, [%sp + 2047 + 0x178]
+	stx	%g6, [%sp + 2047 + 0x180]
+	stx	%g7, [%sp + 2047 + 0x188]
+	mov	%g1, %l0		! also save to locals, so we can handle
+	mov	%g2, %l1		! tlb faults later on, when accessing
+	mov	%g3, %l3		! the stack.
+	mov	%g7, %l5
+	wrpr	%l4, PSTATE_IE, %pstate	! turn off interrupts
+	call	%l2
+	 add	%i0, 0x018, %o0		! prom_args
+	wrpr	%g0, 0x414, %pstate	! restore mmu globals
+	mov	%l0, %g1
+	mov	%l1, %g2
+	mov	%l3, %g3
+	mov	%l5, %g7
+	wrpr	%g0, 0x14, %pstate	! restore normal globals
+	ldx	[%sp + 2047 + 0x120], %g1
+	ldx	[%sp + 2047 + 0x128], %g2
+	ldx	[%sp + 2047 + 0x130], %g3
+	ldx	[%sp + 2047 + 0x138], %g4
+	ldx	[%sp + 2047 + 0x140], %g5
+	ldx	[%sp + 2047 + 0x148], %g6
+	ldx	[%sp + 2047 + 0x150], %g7
+	wrpr	%g0, 0x814, %pstate	! restore interrupt globals
+	ldx	[%sp + 2047 + 0x0e8], %g1
+	ldx	[%sp + 2047 + 0x0f0], %g2
+	ldx	[%sp + 2047 + 0x0f8], %g3
+	ldx	[%sp + 2047 + 0x100], %g4
+	ldx	[%sp + 2047 + 0x108], %g5
+	ldx	[%sp + 2047 + 0x110], %g6
+	ldx	[%sp + 2047 + 0x118], %g7
+	wrpr	%g0, 0x15, %pstate	! restore alternate globals
+	ldx	[%sp + 2047 + 0x0b0], %g1
+	ldx	[%sp + 2047 + 0x0b8], %g2
+	ldx	[%sp + 2047 + 0x0c0], %g3
+	ldx	[%sp + 2047 + 0x0c8], %g4
+	ldx	[%sp + 2047 + 0x0d0], %g5
+	ldx	[%sp + 2047 + 0x0d8], %g6
+	ldx	[%sp + 2047 + 0x0e0], %g7
+	wrpr	%l4, 0, %pstate	! restore original pstate
+	ret
+	 restore
+
+	.globl	prom_cif_callback
+prom_cif_callback:
+	sethi	%hi(p1275buf), %o1
+	or	%o1, %lo(p1275buf), %o1
+	save	%sp, -0x270, %sp
+	rdpr	%pstate, %l4
+	wrpr	%g0, 0x15, %pstate	! save PROM alternate globals
+	stx	%g1, [%sp + 2047 + 0x0b0]
+	stx	%g2, [%sp + 2047 + 0x0b8]
+	stx	%g3, [%sp + 2047 + 0x0c0]
+	stx	%g4, [%sp + 2047 + 0x0c8]
+	stx	%g5, [%sp + 2047 + 0x0d0]
+	stx	%g6, [%sp + 2047 + 0x0d8]
+	stx	%g7, [%sp + 2047 + 0x0e0]
+					! restore Linux alternate globals
+	ldx	[%sp + 2047 + 0x190], %g1
+	ldx	[%sp + 2047 + 0x198], %g2
+	ldx	[%sp + 2047 + 0x1a0], %g3
+	ldx	[%sp + 2047 + 0x1a8], %g4
+	ldx	[%sp + 2047 + 0x1b0], %g5
+	ldx	[%sp + 2047 + 0x1b8], %g6
+	ldx	[%sp + 2047 + 0x1c0], %g7
+	wrpr	%g0, 0x814, %pstate	! save PROM interrupt globals
+	stx	%g1, [%sp + 2047 + 0x0e8]
+	stx	%g2, [%sp + 2047 + 0x0f0]
+	stx	%g3, [%sp + 2047 + 0x0f8]
+	stx	%g4, [%sp + 2047 + 0x100]
+	stx	%g5, [%sp + 2047 + 0x108]
+	stx	%g6, [%sp + 2047 + 0x110]
+	stx	%g7, [%sp + 2047 + 0x118]
+					! restore Linux interrupt globals
+	ldx	[%sp + 2047 + 0x1c8], %g1
+	ldx	[%sp + 2047 + 0x1d0], %g2
+	ldx	[%sp + 2047 + 0x1d8], %g3
+	ldx	[%sp + 2047 + 0x1e0], %g4
+	ldx	[%sp + 2047 + 0x1e8], %g5
+	ldx	[%sp + 2047 + 0x1f0], %g6
+	ldx	[%sp + 2047 + 0x1f8], %g7
+	wrpr	%g0, 0x14, %pstate	! save PROM normal globals
+	stx	%g1, [%sp + 2047 + 0x120]
+	stx	%g2, [%sp + 2047 + 0x128]
+	stx	%g3, [%sp + 2047 + 0x130]
+	stx	%g4, [%sp + 2047 + 0x138]
+	stx	%g5, [%sp + 2047 + 0x140]
+	stx	%g6, [%sp + 2047 + 0x148]
+	stx	%g7, [%sp + 2047 + 0x150]
+					! restore Linux normal globals
+	ldx	[%sp + 2047 + 0x200], %g1
+	ldx	[%sp + 2047 + 0x208], %g2
+	ldx	[%sp + 2047 + 0x210], %g3
+	ldx	[%sp + 2047 + 0x218], %g4
+	ldx	[%sp + 2047 + 0x220], %g5
+	ldx	[%sp + 2047 + 0x228], %g6
+	ldx	[%sp + 2047 + 0x230], %g7
+	wrpr	%g0, 0x414, %pstate	! save PROM mmu globals
+	stx	%g1, [%sp + 2047 + 0x158]
+	stx	%g2, [%sp + 2047 + 0x160]
+	stx	%g3, [%sp + 2047 + 0x168]
+	stx	%g4, [%sp + 2047 + 0x170]
+	stx	%g5, [%sp + 2047 + 0x178]
+	stx	%g6, [%sp + 2047 + 0x180]
+	stx	%g7, [%sp + 2047 + 0x188]
+					! restore Linux mmu globals
+	ldx	[%sp + 2047 + 0x238], %o0
+	ldx	[%sp + 2047 + 0x240], %o1
+	ldx	[%sp + 2047 + 0x248], %l2
+	ldx	[%sp + 2047 + 0x250], %l3
+	ldx	[%sp + 2047 + 0x258], %l5
+	ldx	[%sp + 2047 + 0x260], %l6
+	ldx	[%sp + 2047 + 0x268], %l7
+					! switch to Linux tba
+	sethi	%hi(sparc64_ttable_tl0), %l1
+	rdpr	%tba, %l0		! save PROM tba
+	mov	%o0, %g1
+	mov	%o1, %g2
+	mov	%l2, %g3
+	mov	%l3, %g4
+	mov	%l5, %g5
+	mov	%l6, %g6
+	mov	%l7, %g7
+	wrpr	%l1, %tba		! install Linux tba
+	wrpr	%l4, 0, %pstate		! restore PSTATE
+	call	prom_world
+	 mov	%g0, %o0
+	ldx	[%i1 + 0x000], %l2
+	call	%l2
+	 mov	%i0, %o0
+	mov	%o0, %l1
+	call	prom_world
+	 or	%g0, 1, %o0
+	wrpr	%g0, 0x14, %pstate	! interrupts off
+					! restore PROM mmu globals
+	ldx	[%sp + 2047 + 0x158], %o0
+	ldx	[%sp + 2047 + 0x160], %o1
+	ldx	[%sp + 2047 + 0x168], %l2
+	ldx	[%sp + 2047 + 0x170], %l3
+	ldx	[%sp + 2047 + 0x178], %l5
+	ldx	[%sp + 2047 + 0x180], %l6
+	ldx	[%sp + 2047 + 0x188], %l7
+	wrpr	%g0, 0x414, %pstate	! restore PROM mmu globals
+	mov	%o0, %g1
+	mov	%o1, %g2
+	mov	%l2, %g3
+	mov	%l3, %g4
+	mov	%l5, %g5
+	mov	%l6, %g6
+	mov	%l7, %g7
+	wrpr	%l0, %tba		! restore PROM tba
+	wrpr	%g0, 0x14, %pstate	! restore PROM normal globals
+	ldx	[%sp + 2047 + 0x120], %g1
+	ldx	[%sp + 2047 + 0x128], %g2
+	ldx	[%sp + 2047 + 0x130], %g3
+	ldx	[%sp + 2047 + 0x138], %g4
+	ldx	[%sp + 2047 + 0x140], %g5
+	ldx	[%sp + 2047 + 0x148], %g6
+	ldx	[%sp + 2047 + 0x150], %g7
+	wrpr	%g0, 0x814, %pstate	! restore PROM interrupt globals
+	ldx	[%sp + 2047 + 0x0e8], %g1
+	ldx	[%sp + 2047 + 0x0f0], %g2
+	ldx	[%sp + 2047 + 0x0f8], %g3
+	ldx	[%sp + 2047 + 0x100], %g4
+	ldx	[%sp + 2047 + 0x108], %g5
+	ldx	[%sp + 2047 + 0x110], %g6
+	ldx	[%sp + 2047 + 0x118], %g7
+	wrpr	%g0, 0x15, %pstate	! restore PROM alternate globals
+	ldx	[%sp + 2047 + 0x0b0], %g1
+	ldx	[%sp + 2047 + 0x0b8], %g2
+	ldx	[%sp + 2047 + 0x0c0], %g3
+	ldx	[%sp + 2047 + 0x0c8], %g4
+	ldx	[%sp + 2047 + 0x0d0], %g5
+	ldx	[%sp + 2047 + 0x0d8], %g6
+	ldx	[%sp + 2047 + 0x0e0], %g7
+	wrpr	%l4, 0, %pstate
+	ret
+	 restore %l1, 0, %o0
+
diff --git a/arch/sparc64/prom/console.c b/arch/sparc64/prom/console.c
new file mode 100644
index 0000000..028a53f
--- /dev/null
+++ b/arch/sparc64/prom/console.c
@@ -0,0 +1,146 @@
+/* $Id: console.c,v 1.9 1997/10/29 07:41:43 ecd Exp $
+ * console.c: Routines that deal with sending and receiving IO
+ *            to/from the current console device using the PROM.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/system.h>
+#include <linux/string.h>
+
+extern int prom_stdin, prom_stdout;
+
+/* Non blocking get character from console input device, returns -1
+ * if no input was taken.  This can be used for polling.
+ */
+__inline__ int
+prom_nbgetchar(void)
+{
+	char inc;
+
+	if (p1275_cmd("read", P1275_ARG(1,P1275_ARG_OUT_BUF)|
+			      P1275_INOUT(3,1),
+			      prom_stdin, &inc, P1275_SIZE(1)) == 1)
+		return inc;
+	else
+		return -1;
+}
+
+/* Non blocking put character to console device, returns -1 if
+ * unsuccessful.
+ */
+__inline__ int
+prom_nbputchar(char c)
+{
+	char outc;
+	
+	outc = c;
+	if (p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
+			       P1275_INOUT(3,1),
+			       prom_stdout, &outc, P1275_SIZE(1)) == 1)
+		return 0;
+	else
+		return -1;
+}
+
+/* Blocking version of get character routine above. */
+char
+prom_getchar(void)
+{
+	int character;
+	while((character = prom_nbgetchar()) == -1) ;
+	return (char) character;
+}
+
+/* Blocking version of put character routine above. */
+void
+prom_putchar(char c)
+{
+	prom_nbputchar(c);
+	return;
+}
+
+void
+prom_puts(char *s, int len)
+{
+	p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
+			   P1275_INOUT(3,1),
+			   prom_stdout, s, P1275_SIZE(len));
+}
+
+/* Query for input device type */
+enum prom_input_device
+prom_query_input_device(void)
+{
+	int st_p;
+	char propb[64];
+
+	st_p = prom_inst2pkg(prom_stdin);
+	if(prom_node_has_property(st_p, "keyboard"))
+		return PROMDEV_IKBD;
+	prom_getproperty(st_p, "device_type", propb, sizeof(propb));
+	if(strncmp(propb, "serial", 6))
+		return PROMDEV_I_UNK;
+	/* FIXME: Is there any better way how to find out? */	
+	memset(propb, 0, sizeof(propb));
+	st_p = prom_finddevice ("/options");
+	prom_getproperty(st_p, "input-device", propb, sizeof(propb));
+
+	/*
+	 * If we get here with propb == 'keyboard', we are on ttya, as
+	 * the PROM defaulted to this due to 'no input device'.
+	 */
+	if (!strncmp(propb, "keyboard", 8))
+		return PROMDEV_ITTYA;
+
+	if (strncmp (propb, "tty", 3) || !propb[3])
+		return PROMDEV_I_UNK;
+	switch (propb[3]) {
+		case 'a': return PROMDEV_ITTYA;
+		case 'b': return PROMDEV_ITTYB;
+		default: return PROMDEV_I_UNK;
+	}
+}
+
+/* Query for output device type */
+
+enum prom_output_device
+prom_query_output_device(void)
+{
+	int st_p;
+	char propb[64];
+	int propl;
+
+	st_p = prom_inst2pkg(prom_stdout);
+	propl = prom_getproperty(st_p, "device_type", propb, sizeof(propb));
+	if (propl >= 0 && propl == sizeof("display") &&
+	    strncmp("display", propb, sizeof("display")) == 0)
+		return PROMDEV_OSCREEN;
+	if(strncmp("serial", propb, 6))
+		return PROMDEV_O_UNK;
+	/* FIXME: Is there any better way how to find out? */	
+	memset(propb, 0, sizeof(propb));
+	st_p = prom_finddevice ("/options");
+	prom_getproperty(st_p, "output-device", propb, sizeof(propb));
+
+	/*
+	 * If we get here with propb == 'screen', we are on ttya, as
+	 * the PROM defaulted to this due to 'no input device'.
+	 */
+	if (!strncmp(propb, "screen", 6))
+		return PROMDEV_OTTYA;
+
+	if (strncmp (propb, "tty", 3) || !propb[3])
+		return PROMDEV_O_UNK;
+	switch (propb[3]) {
+		case 'a': return PROMDEV_OTTYA;
+		case 'b': return PROMDEV_OTTYB;
+		default: return PROMDEV_O_UNK;
+	}
+}
diff --git a/arch/sparc64/prom/devops.c b/arch/sparc64/prom/devops.c
new file mode 100644
index 0000000..2c99b21
--- /dev/null
+++ b/arch/sparc64/prom/devops.c
@@ -0,0 +1,41 @@
+/* $Id: devops.c,v 1.3 1997/10/29 07:43:28 ecd Exp $
+ * devops.c:  Device operations using the PROM.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+/* Open the device described by the string 'dstr'.  Returns the handle
+ * to that device used for subsequent operations on that device.
+ * Returns 0 on failure.
+ */
+int
+prom_devopen(char *dstr)
+{
+	return p1275_cmd ("open", P1275_ARG(0,P1275_ARG_IN_STRING)|
+				  P1275_INOUT(1,1),
+				  dstr);
+}
+
+/* Close the device described by device handle 'dhandle'. */
+int
+prom_devclose(int dhandle)
+{
+	p1275_cmd ("close", P1275_INOUT(1,0), dhandle);
+	return 0;
+}
+
+/* Seek to specified location described by 'seekhi' and 'seeklo'
+ * for device 'dhandle'.
+ */
+void
+prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo)
+{
+	p1275_cmd ("seek", P1275_INOUT(3,1), dhandle, seekhi, seeklo);
+}
diff --git a/arch/sparc64/prom/init.c b/arch/sparc64/prom/init.c
new file mode 100644
index 0000000..817faae
--- /dev/null
+++ b/arch/sparc64/prom/init.c
@@ -0,0 +1,101 @@
+/* $Id: init.c,v 1.10 1999/09/21 14:35:59 davem Exp $
+ * init.c:  Initialize internal variables used by the PROM
+ *          library functions.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+enum prom_major_version prom_vers;
+unsigned int prom_rev, prom_prev;
+
+/* The root node of the prom device tree. */
+int prom_root_node;
+int prom_stdin, prom_stdout;
+int prom_chosen_node;
+
+/* You must call prom_init() before you attempt to use any of the
+ * routines in the prom library.  It returns 0 on success, 1 on
+ * failure.  It gets passed the pointer to the PROM vector.
+ */
+
+extern void prom_meminit(void);
+extern void prom_cif_init(void *, void *);
+
+void __init prom_init(void *cif_handler, void *cif_stack)
+{
+	char buffer[80], *p;
+	int ints[3];
+	int node;
+	int i = 0;
+	int bufadjust;
+
+	prom_vers = PROM_P1275;
+
+	prom_cif_init(cif_handler, cif_stack);
+
+	prom_root_node = prom_getsibling(0);
+	if((prom_root_node == 0) || (prom_root_node == -1))
+		prom_halt();
+
+	prom_chosen_node = prom_finddevice("/chosen");
+	if (!prom_chosen_node || prom_chosen_node == -1)
+		prom_halt();
+
+	prom_stdin = prom_getint (prom_chosen_node, "stdin");
+	prom_stdout = prom_getint (prom_chosen_node, "stdout");
+
+	node = prom_finddevice("/openprom");
+	if (!node || node == -1)
+		prom_halt();
+
+	prom_getstring (node, "version", buffer, sizeof (buffer));
+
+	prom_printf ("\n");
+
+	if (strncmp (buffer, "OBP ", 4))
+		goto strange_version;
+
+	/*
+	 * Version field is expected to be 'OBP xx.yy.zz date...'
+	 * However, Sun can't stick to this format very well, so
+	 * we need to check for 'OBP  xx.yy.zz date...' and adjust
+	 * accordingly. -spot
+	 */
+
+	if (strncmp (buffer, "OBP  ", 5))
+		bufadjust = 4;
+	else
+		bufadjust = 5;
+
+	p = buffer + bufadjust;
+	while (p && isdigit(*p) && i < 3) {
+		ints[i++] = simple_strtoul(p, NULL, 0);
+		if ((p = strchr(p, '.')) != NULL)
+			p++;
+	}
+	if (i != 3)
+		goto strange_version;
+
+	prom_rev = ints[1];
+	prom_prev = (ints[0] << 16) | (ints[1] << 8) | ints[2];
+
+	printk ("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust);
+
+	prom_meminit();
+
+	/* Initialization successful. */
+	return;
+
+strange_version:
+	prom_printf ("Strange OBP version `%s'.\n", buffer);
+	prom_halt ();
+}
diff --git a/arch/sparc64/prom/map.S b/arch/sparc64/prom/map.S
new file mode 100644
index 0000000..21b3f9c
--- /dev/null
+++ b/arch/sparc64/prom/map.S
@@ -0,0 +1,72 @@
+/* $Id: map.S,v 1.2 1999/11/19 05:53:02 davem Exp $
+ * map.S: Tricky coding required to fixup the kernel OBP maps
+ *	  properly.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+	.text
+	.align	8192
+	.globl	prom_boot_page
+prom_boot_page:
+call_method:
+	.asciz	"call-method"
+	.align	8
+map:
+	.asciz	"map"
+	.align	8
+
+	/* When we are invoked, our caller has remapped us to
+	 * page zero, therefore we must use PC relative addressing
+	 * for everything after we begin performing the unmap/map
+	 * calls.
+	 */
+	.globl	prom_remap
+prom_remap:	/* %o0 = physpage, %o1 = virtpage, %o2 = mmu_ihandle */
+	rd	%pc, %g1
+	srl	%o2, 0, %o2			! kill sign extension
+	sethi	%hi(p1275buf), %g2
+	or	%g2, %lo(p1275buf), %g2
+	ldx	[%g2 + 0x10], %g3		! prom_cif_stack
+	save	%g3, -(192 + 128), %sp
+	ldx	[%g2 + 0x08], %l0		! prom_cif_handler
+	mov	%g6, %i3
+	mov	%g4, %i4
+	mov	%g5, %i5
+	flushw
+
+	sethi	%hi(prom_remap - call_method), %g7
+	or	%g7, %lo(prom_remap - call_method), %g7
+	sub	%g1, %g7, %l2			! call-method string
+	sethi	%hi(prom_remap - map), %g7
+	or	%g7, %lo(prom_remap - map), %g7
+	sub	%g1, %g7, %l4			! map string
+
+	/* OK, map the 4MB region we really live at. */
+	stx	%l2, [%sp + 2047 + 128 + 0x00]	! call-method
+	mov	7, %l5
+	stx	%l5, [%sp + 2047 + 128 + 0x08]	! num_args
+	mov	1, %l5
+	stx	%l5, [%sp + 2047 + 128 + 0x10]	! num_rets
+	stx	%l4, [%sp + 2047 + 128 + 0x18]	! map
+	stx	%i2, [%sp + 2047 + 128 + 0x20]	! mmu_ihandle
+	mov	-1, %l5
+	stx	%l5, [%sp + 2047 + 128 + 0x28]	! mode == default
+	sethi	%hi(4 * 1024 * 1024), %l5
+	stx	%l5, [%sp + 2047 + 128 + 0x30]	! size
+	stx	%i1, [%sp + 2047 + 128 + 0x38]	! vaddr
+	stx	%g0, [%sp + 2047 + 128 + 0x40]	! filler
+	stx	%i0, [%sp + 2047 + 128 + 0x48]	! paddr
+	call	%l0
+	 add	%sp, (2047 + 128), %o0		! argument array
+
+	/* Restore hard-coded globals. */
+	mov	%i3, %g6
+	mov	%i4, %g4
+	mov	%i5, %g5
+
+	/* Wheee.... we are done. */
+	ret
+	restore
+
+	.align	8192
diff --git a/arch/sparc64/prom/memory.c b/arch/sparc64/prom/memory.c
new file mode 100644
index 0000000..f4a8143
--- /dev/null
+++ b/arch/sparc64/prom/memory.c
@@ -0,0 +1,152 @@
+/* $Id: memory.c,v 1.5 1999/08/31 06:55:04 davem Exp $
+ * memory.c: Prom routine for acquiring various bits of information
+ *           about RAM on the machine, both virtual and physical.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+/* This routine, for consistency, returns the ram parameters in the
+ * V0 prom memory descriptor format.  I choose this format because I
+ * think it was the easiest to work with.  I feel the religious
+ * arguments now... ;)  Also, I return the linked lists sorted to
+ * prevent paging_init() upset stomach as I have not yet written
+ * the pepto-bismol kernel module yet.
+ */
+
+struct linux_prom64_registers prom_reg_memlist[64];
+struct linux_prom64_registers prom_reg_tmp[64];
+
+struct linux_mlist_p1275 prom_phys_total[64];
+struct linux_mlist_p1275 prom_prom_taken[64];
+struct linux_mlist_p1275 prom_phys_avail[64];
+
+struct linux_mlist_p1275 *prom_ptot_ptr = prom_phys_total;
+struct linux_mlist_p1275 *prom_ptak_ptr = prom_prom_taken;
+struct linux_mlist_p1275 *prom_pavl_ptr = prom_phys_avail;
+
+struct linux_mem_p1275 prom_memlist;
+
+
+/* Internal Prom library routine to sort a linux_mlist_p1275 memory
+ * list.  Used below in initialization.
+ */
+static void __init
+prom_sortmemlist(struct linux_mlist_p1275 *thislist)
+{
+	int swapi = 0;
+	int i, mitr;
+	unsigned long tmpaddr, tmpsize;
+	unsigned long lowest;
+
+	for(i=0; thislist[i].theres_more; i++) {
+		lowest = thislist[i].start_adr;
+		for(mitr = i+1; thislist[mitr-1].theres_more; mitr++)
+			if(thislist[mitr].start_adr < lowest) {
+				lowest = thislist[mitr].start_adr;
+				swapi = mitr;
+			}
+		if(lowest == thislist[i].start_adr) continue;
+		tmpaddr = thislist[swapi].start_adr;
+		tmpsize = thislist[swapi].num_bytes;
+		for(mitr = swapi; mitr > i; mitr--) {
+			thislist[mitr].start_adr = thislist[mitr-1].start_adr;
+			thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
+		}
+		thislist[i].start_adr = tmpaddr;
+		thislist[i].num_bytes = tmpsize;
+	}
+}
+
+/* Initialize the memory lists based upon the prom version. */
+void __init prom_meminit(void)
+{
+	int node = 0;
+	unsigned int iter, num_regs;
+
+	node = prom_finddevice("/memory");
+	num_regs = prom_getproperty(node, "available",
+				    (char *) prom_reg_memlist,
+				    sizeof(prom_reg_memlist));
+	num_regs = (num_regs/sizeof(struct linux_prom64_registers));
+	for(iter=0; iter<num_regs; iter++) {
+		prom_phys_avail[iter].start_adr =
+			prom_reg_memlist[iter].phys_addr;
+		prom_phys_avail[iter].num_bytes =
+			prom_reg_memlist[iter].reg_size;
+		prom_phys_avail[iter].theres_more =
+			&prom_phys_avail[iter+1];
+	}
+	prom_phys_avail[iter-1].theres_more = NULL;
+
+	num_regs = prom_getproperty(node, "reg",
+				    (char *) prom_reg_memlist,
+				    sizeof(prom_reg_memlist));
+	num_regs = (num_regs/sizeof(struct linux_prom64_registers));
+	for(iter=0; iter<num_regs; iter++) {
+		prom_phys_total[iter].start_adr =
+			prom_reg_memlist[iter].phys_addr;
+		prom_phys_total[iter].num_bytes =
+			prom_reg_memlist[iter].reg_size;
+		prom_phys_total[iter].theres_more =
+			&prom_phys_total[iter+1];
+	}
+	prom_phys_total[iter-1].theres_more = NULL;
+
+	node = prom_finddevice("/virtual-memory");
+	num_regs = prom_getproperty(node, "available",
+				    (char *) prom_reg_memlist,
+				    sizeof(prom_reg_memlist));
+	num_regs = (num_regs/sizeof(struct linux_prom64_registers));
+
+	/* Convert available virtual areas to taken virtual
+	 * areas.  First sort, then convert.
+	 */
+	for(iter=0; iter<num_regs; iter++) {
+		prom_prom_taken[iter].start_adr =
+			prom_reg_memlist[iter].phys_addr;
+		prom_prom_taken[iter].num_bytes =
+			prom_reg_memlist[iter].reg_size;
+		prom_prom_taken[iter].theres_more =
+			&prom_prom_taken[iter+1];
+	}
+	prom_prom_taken[iter-1].theres_more = NULL;
+
+	prom_sortmemlist(prom_prom_taken);
+
+	/* Finally, convert. */
+	for(iter=0; iter<num_regs; iter++) {
+		prom_prom_taken[iter].start_adr =
+			prom_prom_taken[iter].start_adr +
+			prom_prom_taken[iter].num_bytes;
+		prom_prom_taken[iter].num_bytes =
+			prom_prom_taken[iter+1].start_adr -
+			prom_prom_taken[iter].start_adr;
+	}
+	prom_prom_taken[iter-1].num_bytes =
+		-1UL - prom_prom_taken[iter-1].start_adr;
+
+	/* Sort the other two lists. */
+	prom_sortmemlist(prom_phys_total);
+	prom_sortmemlist(prom_phys_avail);
+
+	/* Link all the lists into the top-level descriptor. */
+	prom_memlist.p1275_totphys=&prom_ptot_ptr;
+	prom_memlist.p1275_prommap=&prom_ptak_ptr;
+	prom_memlist.p1275_available=&prom_pavl_ptr;
+}
+
+/* This returns a pointer to our libraries internal p1275 format
+ * memory descriptor.
+ */
+struct linux_mem_p1275 *
+prom_meminfo(void)
+{
+	return &prom_memlist;
+}
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c
new file mode 100644
index 0000000..19c44e9
--- /dev/null
+++ b/arch/sparc64/prom/misc.c
@@ -0,0 +1,339 @@
+/* $Id: misc.c,v 1.20 2001/09/21 03:17:07 kanoj Exp $
+ * misc.c:  Miscellaneous prom functions that don't belong
+ *          anywhere else.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/system.h>
+
+/* Reset and reboot the machine with the command 'bcommand'. */
+void prom_reboot(char *bcommand)
+{
+	p1275_cmd("boot", P1275_ARG(0, P1275_ARG_IN_STRING) |
+		  P1275_INOUT(1, 0), bcommand);
+}
+
+/* Forth evaluate the expression contained in 'fstring'. */
+void prom_feval(char *fstring)
+{
+	if (!fstring || fstring[0] == 0)
+		return;
+	p1275_cmd("interpret", P1275_ARG(0, P1275_ARG_IN_STRING) |
+		  P1275_INOUT(1, 1), fstring);
+}
+
+/* We want to do this more nicely some day. */
+extern void (*prom_palette)(int);
+
+#ifdef CONFIG_SMP
+extern void smp_capture(void);
+extern void smp_release(void);
+#endif
+
+/* Drop into the prom, with the chance to continue with the 'go'
+ * prom command.
+ */
+void prom_cmdline(void)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	if (!serial_console && prom_palette)
+		prom_palette(1);
+
+#ifdef CONFIG_SMP
+	smp_capture();
+#endif
+
+	p1275_cmd("enter", P1275_INOUT(0, 0));
+
+#ifdef CONFIG_SMP
+	smp_release();
+#endif
+
+	if (!serial_console && prom_palette)
+		prom_palette(0);
+
+	local_irq_restore(flags);
+}
+
+#ifdef CONFIG_SMP
+extern void smp_promstop_others(void);
+#endif
+
+/* Drop into the prom, but completely terminate the program.
+ * No chance of continuing.
+ */
+void prom_halt(void)
+{
+#ifdef CONFIG_SMP
+	smp_promstop_others();
+	udelay(8000);
+#endif
+again:
+	p1275_cmd("exit", P1275_INOUT(0, 0));
+	goto again; /* PROM is out to get me -DaveM */
+}
+
+void prom_halt_power_off(void)
+{
+#ifdef CONFIG_SMP
+	smp_promstop_others();
+	udelay(8000);
+#endif
+	p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0));
+
+	/* if nothing else helps, we just halt */
+	prom_halt();
+}
+
+/* Set prom sync handler to call function 'funcp'. */
+void prom_setcallback(callback_func_t funcp)
+{
+	if (!funcp)
+		return;
+	p1275_cmd("set-callback", P1275_ARG(0, P1275_ARG_IN_FUNCTION) |
+		  P1275_INOUT(1, 1), funcp);
+}
+
+/* Get the idprom and stuff it into buffer 'idbuf'.  Returns the
+ * format type.  'num_bytes' is the number of bytes that your idbuf
+ * has space for.  Returns 0xff on error.
+ */
+unsigned char prom_get_idprom(char *idbuf, int num_bytes)
+{
+	int len;
+
+	len = prom_getproplen(prom_root_node, "idprom");
+	if ((len >num_bytes) || (len == -1))
+		return 0xff;
+	if (!prom_getproperty(prom_root_node, "idprom", idbuf, num_bytes))
+		return idbuf[0];
+
+	return 0xff;
+}
+
+/* Get the major prom version number. */
+int prom_version(void)
+{
+	return PROM_P1275;
+}
+
+/* Get the prom plugin-revision. */
+int prom_getrev(void)
+{
+	return prom_rev;
+}
+
+/* Get the prom firmware print revision. */
+int prom_getprev(void)
+{
+	return prom_prev;
+}
+
+/* Install Linux trap table so PROM uses that instead of its own. */
+void prom_set_trap_table(unsigned long tba)
+{
+	p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba);
+}
+
+int mmu_ihandle_cache = 0;
+
+int prom_get_mmu_ihandle(void)
+{
+	int node, ret;
+
+	if (mmu_ihandle_cache != 0)
+		return mmu_ihandle_cache;
+
+	node = prom_finddevice("/chosen");
+	ret = prom_getint(node, "mmu");
+	if (ret == -1 || ret == 0)
+		mmu_ihandle_cache = -1;
+	else
+		mmu_ihandle_cache = ret;
+
+	return ret;
+}
+
+static int prom_get_memory_ihandle(void)
+{
+	static int memory_ihandle_cache;
+	int node, ret;
+
+	if (memory_ihandle_cache != 0)
+		return memory_ihandle_cache;
+
+	node = prom_finddevice("/chosen");
+	ret = prom_getint(node, "memory");
+	if (ret == -1 || ret == 0)
+		memory_ihandle_cache = -1;
+	else
+		memory_ihandle_cache = ret;
+
+	return ret;
+}
+
+/* Load explicit I/D TLB entries. */
+long prom_itlb_load(unsigned long index,
+		    unsigned long tte_data,
+		    unsigned long vaddr)
+{
+	return p1275_cmd("call-method",
+			 (P1275_ARG(0, P1275_ARG_IN_STRING) |
+			  P1275_ARG(2, P1275_ARG_IN_64B) |
+			  P1275_ARG(3, P1275_ARG_IN_64B) |
+			  P1275_INOUT(5, 1)),
+			 "SUNW,itlb-load",
+			 prom_get_mmu_ihandle(),
+			 /* And then our actual args are pushed backwards. */
+			 vaddr,
+			 tte_data,
+			 index);
+}
+
+long prom_dtlb_load(unsigned long index,
+		    unsigned long tte_data,
+		    unsigned long vaddr)
+{
+	return p1275_cmd("call-method",
+			 (P1275_ARG(0, P1275_ARG_IN_STRING) |
+			  P1275_ARG(2, P1275_ARG_IN_64B) |
+			  P1275_ARG(3, P1275_ARG_IN_64B) |
+			  P1275_INOUT(5, 1)),
+			 "SUNW,dtlb-load",
+			 prom_get_mmu_ihandle(),
+			 /* And then our actual args are pushed backwards. */
+			 vaddr,
+			 tte_data,
+			 index);
+}
+
+int prom_map(int mode, unsigned long size,
+	     unsigned long vaddr, unsigned long paddr)
+{
+	int ret = p1275_cmd("call-method",
+			    (P1275_ARG(0, P1275_ARG_IN_STRING) |
+			     P1275_ARG(3, P1275_ARG_IN_64B) |
+			     P1275_ARG(4, P1275_ARG_IN_64B) |
+			     P1275_ARG(6, P1275_ARG_IN_64B) |
+			     P1275_INOUT(7, 1)),
+			    "map",
+			    prom_get_mmu_ihandle(),
+			    mode,
+			    size,
+			    vaddr,
+			    0,
+			    paddr);
+
+	if (ret == 0)
+		ret = -1;
+	return ret;
+}
+
+void prom_unmap(unsigned long size, unsigned long vaddr)
+{
+	p1275_cmd("call-method",
+		  (P1275_ARG(0, P1275_ARG_IN_STRING) |
+		   P1275_ARG(2, P1275_ARG_IN_64B) |
+		   P1275_ARG(3, P1275_ARG_IN_64B) |
+		   P1275_INOUT(4, 0)),
+		  "unmap",
+		  prom_get_mmu_ihandle(),
+		  size,
+		  vaddr);
+}
+
+/* Set aside physical memory which is not touched or modified
+ * across soft resets.
+ */
+unsigned long prom_retain(char *name,
+			  unsigned long pa_low, unsigned long pa_high,
+			  long size, long align)
+{
+	/* XXX I don't think we return multiple values correctly.
+	 * XXX OBP supposedly returns pa_low/pa_high here, how does
+	 * XXX it work?
+	 */
+
+	/* If align is zero, the pa_low/pa_high args are passed,
+	 * else they are not.
+	 */
+	if (align == 0)
+		return p1275_cmd("SUNW,retain",
+				 (P1275_ARG(0, P1275_ARG_IN_BUF) | P1275_INOUT(5, 2)),
+				 name, pa_low, pa_high, size, align);
+	else
+		return p1275_cmd("SUNW,retain",
+				 (P1275_ARG(0, P1275_ARG_IN_BUF) | P1275_INOUT(3, 2)),
+				 name, size, align);
+}
+
+/* Get "Unumber" string for the SIMM at the given
+ * memory address.  Usually this will be of the form
+ * "Uxxxx" where xxxx is a decimal number which is
+ * etched into the motherboard next to the SIMM slot
+ * in question.
+ */
+int prom_getunumber(int syndrome_code,
+		    unsigned long phys_addr,
+		    char *buf, int buflen)
+{
+	return p1275_cmd("call-method",
+			 (P1275_ARG(0, P1275_ARG_IN_STRING)	|
+			  P1275_ARG(3, P1275_ARG_OUT_BUF)	|
+			  P1275_ARG(6, P1275_ARG_IN_64B)	|
+			  P1275_INOUT(8, 2)),
+			 "SUNW,get-unumber", prom_get_memory_ihandle(),
+			 buflen, buf, P1275_SIZE(buflen),
+			 0, phys_addr, syndrome_code);
+}
+
+/* Power management extensions. */
+void prom_sleepself(void)
+{
+	p1275_cmd("SUNW,sleep-self", P1275_INOUT(0, 0));
+}
+
+int prom_sleepsystem(void)
+{
+	return p1275_cmd("SUNW,sleep-system", P1275_INOUT(0, 1));
+}
+
+int prom_wakeupsystem(void)
+{
+	return p1275_cmd("SUNW,wakeup-system", P1275_INOUT(0, 1));
+}
+
+#ifdef CONFIG_SMP
+void prom_startcpu(int cpunode, unsigned long pc, unsigned long o0)
+{
+	p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, o0);
+}
+
+void prom_stopself(void)
+{
+	p1275_cmd("SUNW,stop-self", P1275_INOUT(0, 0));
+}
+
+void prom_idleself(void)
+{
+	p1275_cmd("SUNW,idle-self", P1275_INOUT(0, 0));
+}
+
+void prom_resumecpu(int cpunode)
+{
+	p1275_cmd("SUNW,resume-cpu", P1275_INOUT(1, 0), cpunode);
+}
+#endif
diff --git a/arch/sparc64/prom/p1275.c b/arch/sparc64/prom/p1275.c
new file mode 100644
index 0000000..59fe38b
--- /dev/null
+++ b/arch/sparc64/prom/p1275.c
@@ -0,0 +1,161 @@
+/* $Id: p1275.c,v 1.22 2001/10/18 09:40:00 davem Exp $
+ * p1275.c: Sun IEEE 1275 PROM low level interface routines
+ *
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/system.h>
+#include <asm/spitfire.h>
+#include <asm/pstate.h>
+
+struct {
+	long prom_callback;			/* 0x00 */
+	void (*prom_cif_handler)(long *);	/* 0x08 */
+	unsigned long prom_cif_stack;		/* 0x10 */
+	unsigned long prom_args [23];		/* 0x18 */
+	char prom_buffer [3000];
+} p1275buf;
+
+extern void prom_world(int);
+
+extern void prom_cif_interface(void);
+extern void prom_cif_callback(void);
+
+static inline unsigned long spitfire_get_primary_context(void)
+{
+	unsigned long ctx;
+
+	__asm__ __volatile__("ldxa	[%1] %2, %0"
+			     : "=r" (ctx)
+			     : "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+	return ctx;
+}
+
+/*
+ * This provides SMP safety on the p1275buf. prom_callback() drops this lock
+ * to allow recursuve acquisition.
+ */
+DEFINE_SPINLOCK(prom_entry_lock);
+
+long p1275_cmd (char *service, long fmt, ...)
+{
+	char *p, *q;
+	unsigned long flags;
+	int nargs, nrets, i;
+	va_list list;
+	long attrs, x;
+	
+	p = p1275buf.prom_buffer;
+	BUG_ON((spitfire_get_primary_context() & CTX_NR_MASK) != 0);
+
+	spin_lock_irqsave(&prom_entry_lock, flags);
+
+	p1275buf.prom_args[0] = (unsigned long)p;		/* service */
+	strcpy (p, service);
+	p = (char *)(((long)(strchr (p, 0) + 8)) & ~7);
+	p1275buf.prom_args[1] = nargs = (fmt & 0x0f);		/* nargs */
+	p1275buf.prom_args[2] = nrets = ((fmt & 0xf0) >> 4); 	/* nrets */
+	attrs = fmt >> 8;
+	va_start(list, fmt);
+	for (i = 0; i < nargs; i++, attrs >>= 3) {
+		switch (attrs & 0x7) {
+		case P1275_ARG_NUMBER:
+			p1275buf.prom_args[i + 3] =
+						(unsigned)va_arg(list, long);
+			break;
+		case P1275_ARG_IN_64B:
+			p1275buf.prom_args[i + 3] =
+				va_arg(list, unsigned long);
+			break;
+		case P1275_ARG_IN_STRING:
+			strcpy (p, va_arg(list, char *));
+			p1275buf.prom_args[i + 3] = (unsigned long)p;
+			p = (char *)(((long)(strchr (p, 0) + 8)) & ~7);
+			break;
+		case P1275_ARG_OUT_BUF:
+			(void) va_arg(list, char *);
+			p1275buf.prom_args[i + 3] = (unsigned long)p;
+			x = va_arg(list, long);
+			i++; attrs >>= 3;
+			p = (char *)(((long)(p + (int)x + 7)) & ~7);
+			p1275buf.prom_args[i + 3] = x;
+			break;
+		case P1275_ARG_IN_BUF:
+			q = va_arg(list, char *);
+			p1275buf.prom_args[i + 3] = (unsigned long)p;
+			x = va_arg(list, long);
+			i++; attrs >>= 3;
+			memcpy (p, q, (int)x);
+			p = (char *)(((long)(p + (int)x + 7)) & ~7);
+			p1275buf.prom_args[i + 3] = x;
+			break;
+		case P1275_ARG_OUT_32B:
+			(void) va_arg(list, char *);
+			p1275buf.prom_args[i + 3] = (unsigned long)p;
+			p += 32;
+			break;
+		case P1275_ARG_IN_FUNCTION:
+			p1275buf.prom_args[i + 3] =
+					(unsigned long)prom_cif_callback;
+			p1275buf.prom_callback = va_arg(list, long);
+			break;
+		}
+	}
+	va_end(list);
+
+	prom_world(1);
+	prom_cif_interface();
+	prom_world(0);
+
+	attrs = fmt >> 8;
+	va_start(list, fmt);
+	for (i = 0; i < nargs; i++, attrs >>= 3) {
+		switch (attrs & 0x7) {
+		case P1275_ARG_NUMBER:
+			(void) va_arg(list, long);
+			break;
+		case P1275_ARG_IN_STRING:
+			(void) va_arg(list, char *);
+			break;
+		case P1275_ARG_IN_FUNCTION:
+			(void) va_arg(list, long);
+			break;
+		case P1275_ARG_IN_BUF:
+			(void) va_arg(list, char *);
+			(void) va_arg(list, long);
+			i++; attrs >>= 3;
+			break;
+		case P1275_ARG_OUT_BUF:
+			p = va_arg(list, char *);
+			x = va_arg(list, long);
+			memcpy (p, (char *)(p1275buf.prom_args[i + 3]), (int)x);
+			i++; attrs >>= 3;
+			break;
+		case P1275_ARG_OUT_32B:
+			p = va_arg(list, char *);
+			memcpy (p, (char *)(p1275buf.prom_args[i + 3]), 32);
+			break;
+		}
+	}
+	va_end(list);
+	x = p1275buf.prom_args [nargs + 3];
+
+	spin_unlock_irqrestore(&prom_entry_lock, flags);
+
+	return x;
+}
+
+void prom_cif_init(void *cif_handler, void *cif_stack)
+{
+	p1275buf.prom_cif_handler = (void (*)(long *))cif_handler;
+	p1275buf.prom_cif_stack = (unsigned long)cif_stack;
+}
diff --git a/arch/sparc64/prom/printf.c b/arch/sparc64/prom/printf.c
new file mode 100644
index 0000000..a6df82c
--- /dev/null
+++ b/arch/sparc64/prom/printf.c
@@ -0,0 +1,47 @@
+/*
+ * printf.c:  Internal prom library printf facility.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (c) 2002 Pete Zaitcev (zaitcev@yahoo.com)
+ *
+ * We used to warn all over the code: DO NOT USE prom_printf(),
+ * and yet people do. Anton's banking code was outputting banks
+ * with prom_printf for most of the 2.4 lifetime. Since an effective
+ * stick is not available, we deployed a carrot: an early printk
+ * through PROM by means of -p boot option. This ought to fix it.
+ * USE printk; if you need, deploy -p.
+ */
+
+#include <linux/kernel.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+static char ppbuf[1024];
+
+void
+prom_write(const char *buf, unsigned int n)
+{
+	char ch;
+
+	while (n != 0) {
+		--n;
+		if ((ch = *buf++) == '\n')
+			prom_putchar('\r');
+		prom_putchar(ch);
+	}
+}
+
+void
+prom_printf(char *fmt, ...)
+{
+	va_list args;
+	int i;
+
+	va_start(args, fmt);
+	i = vscnprintf(ppbuf, sizeof(ppbuf), fmt, args);
+	va_end(args);
+
+	prom_write(ppbuf, i);
+}
diff --git a/arch/sparc64/prom/tree.c b/arch/sparc64/prom/tree.c
new file mode 100644
index 0000000..ccf7325
--- /dev/null
+++ b/arch/sparc64/prom/tree.c
@@ -0,0 +1,377 @@
+/* $Id: tree.c,v 1.10 1998/01/10 22:39:00 ecd Exp $
+ * tree.c: Basic device tree traversal/scanning for the Linux
+ *         prom library.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+/* Return the child of node 'node' or zero if no this node has no
+ * direct descendent.
+ */
+__inline__ int
+__prom_getchild(int node)
+{
+	return p1275_cmd ("child", P1275_INOUT(1, 1), node);
+}
+
+__inline__ int
+prom_getchild(int node)
+{
+	int cnode;
+
+	if(node == -1) return 0;
+	cnode = __prom_getchild(node);
+	if(cnode == -1) return 0;
+	return (int)cnode;
+}
+
+__inline__ int
+prom_getparent(int node)
+{
+	int cnode;
+
+	if(node == -1) return 0;
+	cnode = p1275_cmd ("parent", P1275_INOUT(1, 1), node);
+	if(cnode == -1) return 0;
+	return (int)cnode;
+}
+
+/* Return the next sibling of node 'node' or zero if no more siblings
+ * at this level of depth in the tree.
+ */
+__inline__ int
+__prom_getsibling(int node)
+{
+	return p1275_cmd ("peer", P1275_INOUT(1, 1), node);
+}
+
+__inline__ int
+prom_getsibling(int node)
+{
+	int sibnode;
+
+	if(node == -1) return 0;
+	sibnode = __prom_getsibling(node);
+	if(sibnode == -1) return 0;
+	return sibnode;
+}
+
+/* Return the length in bytes of property 'prop' at node 'node'.
+ * Return -1 on error.
+ */
+__inline__ int
+prom_getproplen(int node, char *prop)
+{
+	if((!node) || (!prop)) return -1;
+	return p1275_cmd ("getproplen", 
+			  P1275_ARG(1,P1275_ARG_IN_STRING)|
+			  P1275_INOUT(2, 1), 
+			  node, prop);
+}
+
+/* Acquire a property 'prop' at node 'node' and place it in
+ * 'buffer' which has a size of 'bufsize'.  If the acquisition
+ * was successful the length will be returned, else -1 is returned.
+ */
+__inline__ int
+prom_getproperty(int node, char *prop, char *buffer, int bufsize)
+{
+	int plen;
+
+	plen = prom_getproplen(node, prop);
+	if((plen > bufsize) || (plen == 0) || (plen == -1))
+		return -1;
+	else {
+		/* Ok, things seem all right. */
+		return p1275_cmd ("getprop", 
+				  P1275_ARG(1,P1275_ARG_IN_STRING)|
+				  P1275_ARG(2,P1275_ARG_OUT_BUF)|
+				  P1275_INOUT(4, 1), 
+				  node, prop, buffer, P1275_SIZE(plen));
+	}
+}
+
+/* Acquire an integer property and return its value.  Returns -1
+ * on failure.
+ */
+__inline__ int
+prom_getint(int node, char *prop)
+{
+	int intprop;
+
+	if(prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1)
+		return intprop;
+
+	return -1;
+}
+
+/* Acquire an integer property, upon error return the passed default
+ * integer.
+ */
+
+int
+prom_getintdefault(int node, char *property, int deflt)
+{
+	int retval;
+
+	retval = prom_getint(node, property);
+	if(retval == -1) return deflt;
+
+	return retval;
+}
+
+/* Acquire a boolean property, 1=TRUE 0=FALSE. */
+int
+prom_getbool(int node, char *prop)
+{
+	int retval;
+
+	retval = prom_getproplen(node, prop);
+	if(retval == -1) return 0;
+	return 1;
+}
+
+/* Acquire a property whose value is a string, returns a null
+ * string on error.  The char pointer is the user supplied string
+ * buffer.
+ */
+void
+prom_getstring(int node, char *prop, char *user_buf, int ubuf_size)
+{
+	int len;
+
+	len = prom_getproperty(node, prop, user_buf, ubuf_size);
+	if(len != -1) return;
+	user_buf[0] = 0;
+	return;
+}
+
+
+/* Does the device at node 'node' have name 'name'?
+ * YES = 1   NO = 0
+ */
+int
+prom_nodematch(int node, char *name)
+{
+	char namebuf[128];
+	prom_getproperty(node, "name", namebuf, sizeof(namebuf));
+	if(strcmp(namebuf, name) == 0) return 1;
+	return 0;
+}
+
+/* Search siblings at 'node_start' for a node with name
+ * 'nodename'.  Return node if successful, zero if not.
+ */
+int
+prom_searchsiblings(int node_start, char *nodename)
+{
+
+	int thisnode, error;
+	char promlib_buf[128];
+
+	for(thisnode = node_start; thisnode;
+	    thisnode=prom_getsibling(thisnode)) {
+		error = prom_getproperty(thisnode, "name", promlib_buf,
+					 sizeof(promlib_buf));
+		/* Should this ever happen? */
+		if(error == -1) continue;
+		if(strcmp(nodename, promlib_buf)==0) return thisnode;
+	}
+
+	return 0;
+}
+
+/* Gets name in the {name@x,yyyyy|name (if no reg)} form */
+int 
+prom_getname (int node, char *buffer, int len)
+{
+	int i, sbus = 0;
+	int pci = 0, ebus = 0, ide = 0;
+	struct linux_prom_registers *reg;
+	struct linux_prom64_registers reg64[PROMREG_MAX];
+	
+	for (sbus = prom_getparent (node); sbus; sbus = prom_getparent (sbus)) {
+		i = prom_getproperty (sbus, "name", buffer, len);
+		if (i > 0) {
+			buffer [i] = 0;
+			if (!strcmp (buffer, "sbus"))
+				goto getit;
+		}
+	}
+	if ((pci = prom_getparent (node))) {
+		i = prom_getproperty (pci, "name", buffer, len);
+		if (i > 0) {
+			buffer [i] = 0;
+			if (!strcmp (buffer, "pci"))
+				goto getit;
+		}
+		pci = 0;
+	}
+	if ((ebus = prom_getparent (node))) {
+		i = prom_getproperty (ebus, "name", buffer, len);
+		if (i > 0) {
+			buffer[i] = 0;
+			if (!strcmp (buffer, "ebus"))
+				goto getit;
+		}
+		ebus = 0;
+	}
+	if ((ide = prom_getparent (node))) {
+		i = prom_getproperty (ide, "name", buffer, len);
+		if (i > 0) {
+			buffer [i] = 0;
+			if (!strcmp (buffer, "ide"))
+				goto getit;
+		}
+		ide = 0;
+	}
+getit:
+	i = prom_getproperty (node, "name", buffer, len);
+	if (i <= 0) {
+		buffer [0] = 0;
+		return -1;
+	}
+	buffer [i] = 0;
+	len -= i;
+	i = prom_getproperty (node, "reg", (char *)reg64, sizeof (reg64));
+	if (i <= 0) return 0;
+	if (len < 16) return -1;
+	buffer = strchr (buffer, 0);
+	if (sbus) {
+		reg = (struct linux_prom_registers *)reg64;
+		sprintf (buffer, "@%x,%x", reg[0].which_io, (uint)reg[0].phys_addr);
+	} else if (pci) {
+		int dev, fn;
+		reg = (struct linux_prom_registers *)reg64;
+		fn = (reg[0].which_io >> 8) & 0x07;
+		dev = (reg[0].which_io >> 11) & 0x1f;
+		if (fn)
+			sprintf (buffer, "@%x,%x", dev, fn);
+		else
+			sprintf (buffer, "@%x", dev);
+	} else if (ebus) {
+		reg = (struct linux_prom_registers *)reg64;
+		sprintf (buffer, "@%x,%x", reg[0].which_io, reg[0].phys_addr);
+	} else if (ide) {
+		reg = (struct linux_prom_registers *)reg64;
+		sprintf (buffer, "@%x,%x", reg[0].which_io, reg[0].phys_addr);
+	} else if (i == 4) {	/* Happens on 8042's children on Ultra/PCI. */
+		reg = (struct linux_prom_registers *)reg64;
+		sprintf (buffer, "@%x", reg[0].which_io);
+	} else {
+		sprintf (buffer, "@%x,%x",
+			 (unsigned int)(reg64[0].phys_addr >> 36),
+			 (unsigned int)(reg64[0].phys_addr));
+	}
+	return 0;
+}
+
+/* Return the first property type for node 'node'.
+ * buffer should be at least 32B in length
+ */
+__inline__ char *
+prom_firstprop(int node, char *buffer)
+{
+	*buffer = 0;
+	if(node == -1) return buffer;
+	p1275_cmd ("nextprop", P1275_ARG(2,P1275_ARG_OUT_32B)|
+			       P1275_INOUT(3, 0), 
+			       node, (char *) 0x0, buffer);
+	return buffer;
+}
+
+/* Return the property type string after property type 'oprop'
+ * at node 'node' .  Returns NULL string if no more
+ * property types for this node.
+ */
+__inline__ char *
+prom_nextprop(int node, char *oprop, char *buffer)
+{
+	char buf[32];
+
+	if(node == -1) {
+		*buffer = 0;
+		return buffer;
+	}
+	if (oprop == buffer) {
+		strcpy (buf, oprop);
+		oprop = buf;
+	}
+	p1275_cmd ("nextprop", P1275_ARG(1,P1275_ARG_IN_STRING)|
+				    P1275_ARG(2,P1275_ARG_OUT_32B)|
+				    P1275_INOUT(3, 0), 
+				    node, oprop, buffer); 
+	return buffer;
+}
+
+int
+prom_finddevice(char *name)
+{
+	if(!name) return 0;
+	return p1275_cmd ("finddevice", P1275_ARG(0,P1275_ARG_IN_STRING)|
+				        P1275_INOUT(1, 1), 
+				        name);
+}
+
+int prom_node_has_property(int node, char *prop)
+{
+	char buf [32];
+        
+	*buf = 0;
+	do {
+		prom_nextprop(node, buf, buf);
+		if(!strcmp(buf, prop))
+			return 1;
+	} while (*buf);
+	return 0;
+}
+                                                                                           
+/* Set property 'pname' at node 'node' to value 'value' which has a length
+ * of 'size' bytes.  Return the number of bytes the prom accepted.
+ */
+int
+prom_setprop(int node, char *pname, char *value, int size)
+{
+	if(size == 0) return 0;
+	if((pname == 0) || (value == 0)) return 0;
+	
+	return p1275_cmd ("setprop", P1275_ARG(1,P1275_ARG_IN_STRING)|
+					  P1275_ARG(2,P1275_ARG_IN_BUF)|
+					  P1275_INOUT(4, 1), 
+					  node, pname, value, P1275_SIZE(size));
+}
+
+__inline__ int
+prom_inst2pkg(int inst)
+{
+	int node;
+	
+	node = p1275_cmd ("instance-to-package", P1275_INOUT(1, 1), inst);
+	if (node == -1) return 0;
+	return node;
+}
+
+/* Return 'node' assigned to a particular prom 'path'
+ * FIXME: Should work for v0 as well
+ */
+int
+prom_pathtoinode(char *path)
+{
+	int node, inst;
+
+	inst = prom_devopen (path);
+	if (inst == 0) return 0;
+	node = prom_inst2pkg (inst);
+	prom_devclose (inst);
+	if (node == -1) return 0;
+	return node;
+}
diff --git a/arch/sparc64/solaris/Makefile b/arch/sparc64/solaris/Makefile
new file mode 100644
index 0000000..8c86630
--- /dev/null
+++ b/arch/sparc64/solaris/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the Solaris binary emulation.
+#
+
+EXTRA_AFLAGS := -ansi
+
+solaris-objs := entry64.o fs.o misc.o signal.o systbl.o socket.o \
+		ioctl.o ipc.o socksys.o timod.o
+
+obj-$(CONFIG_SOLARIS_EMUL) += solaris.o
diff --git a/arch/sparc64/solaris/conv.h b/arch/sparc64/solaris/conv.h
new file mode 100644
index 0000000..5faf59a
--- /dev/null
+++ b/arch/sparc64/solaris/conv.h
@@ -0,0 +1,38 @@
+/* $Id: conv.h,v 1.4 1998/08/15 20:42:51 davem Exp $
+ * conv.h: Utility macros for Solaris emulation
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+ 
+/* #define DEBUG_SOLARIS */
+#define DEBUG_SOLARIS_KMALLOC
+
+#ifndef __ASSEMBLY__
+
+#include <asm/unistd.h>
+
+/* Use this to get at 32-bit user passed pointers. */
+#define A(__x)				\
+({	unsigned long __ret;		\
+	__asm__ ("srl	%0, 0, %0"	\
+		 : "=r" (__ret)		\
+		 : "0" (__x));		\
+	(void __user *)__ret;		\
+})
+
+extern unsigned sys_call_table[];
+extern unsigned sys_call_table32[];
+extern unsigned sunos_sys_table[];
+
+#define SYS(name) ((long)sys_call_table[__NR_##name])
+#define SUNOS(x) ((long)sunos_sys_table[x])
+
+#ifdef DEBUG_SOLARIS
+#define SOLD(s) printk("%s,%d,%s(): %s\n",__FILE__,__LINE__,__FUNCTION__,(s))
+#define SOLDD(s) printk("solaris: "); printk s
+#else
+#define SOLD(s)
+#define SOLDD(s)
+#endif
+
+#endif /* __ASSEMBLY__ */
diff --git a/arch/sparc64/solaris/entry64.S b/arch/sparc64/solaris/entry64.S
new file mode 100644
index 0000000..0cc9dad
--- /dev/null
+++ b/arch/sparc64/solaris/entry64.S
@@ -0,0 +1,218 @@
+/* $Id: entry64.S,v 1.7 2002/02/09 19:49:31 davem Exp $
+ * entry64.S:  Solaris syscall emulation entry point.
+ *
+ * Copyright (C) 1996,1997,1998 Jakub Jelinek   (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Miguel de Icaza      (miguel@nuclecu.unam.mx)
+ */
+
+#include <linux/errno.h>
+
+#include <asm/head.h>
+#include <asm/asi.h>
+#include <asm/smp.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+#include <asm/signal.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/thread_info.h>
+
+#include "conv.h"
+
+#define NR_SYSCALLS	256
+
+	.text
+solaris_syscall_trace:
+	call		syscall_trace
+	 nop
+	srl		%i0, 0, %o0
+	mov		%i4, %o4
+	srl		%i1, 0, %o1
+	mov		%i5, %o5
+	andcc		%l3, 1, %g0
+	be,pt		%icc, 2f
+	 srl		%i2, 0, %o2
+	b,pt		%xcc, 2f
+	 add		%sp, PTREGS_OFF, %o0
+
+solaris_sucks:
+/* Solaris is a big system which needs to be able to do all the things
+ * in Inf+1 different ways */
+	add		%i6, 0x5c, %o0
+	mov		%i0, %g1
+	mov		%i1, %i0
+	mov		%i2, %i1
+	srl		%o0, 0, %o0
+	mov		%i3, %i2
+	movrz		%g1, 256, %g1 /* Ensure we don't loop forever */
+	mov		%i4, %i3
+	mov		%i5, %i4
+	ba,pt		%xcc, solaris_sparc_syscall
+exen:	 lduwa		[%o0] ASI_S, %i5
+
+exenf:	ba,pt		%xcc, solaris_sparc_syscall
+	 clr		%i5
+
+/* For shared binaries, binfmt_elf32 already sets up personality
+   and exec_domain. This is to handle static binaries as well */
+solaris_reg:
+	call		solaris_register
+	 nop
+	ba,pt		%xcc, 1f
+	 mov		%i4, %o4
+
+linux_syscall_for_solaris:
+	sethi		%hi(sys_call_table32), %l6
+	or		%l6, %lo(sys_call_table32), %l6
+	sll		%l3, 2, %l4
+	ba,pt		%xcc, 10f
+	 lduw		[%l6 + %l4], %l3
+
+	/* Solaris system calls enter here... */
+	.align	32
+	.globl	solaris_sparc_syscall, entry64_personality_patch
+solaris_sparc_syscall:
+entry64_personality_patch:
+	ldub		[%g4 + 0x0], %l0
+	cmp		%g1, 255
+	bg,pn		%icc, solaris_unimplemented
+	 srl		%g1, 0, %g1
+	sethi		%hi(solaris_sys_table), %l7
+	or		%l7, %lo(solaris_sys_table), %l7
+	brz,pn		%g1, solaris_sucks
+	 mov		%i4, %o4
+	sll		%g1, 2, %l4
+	cmp		%l0, 1
+	bne,pn		%icc, solaris_reg
+1:	 srl		%i0, 0, %o0
+	lduw		[%l7 + %l4], %l3
+	srl		%i1, 0, %o1
+	ldx		[%g6 + TI_FLAGS], %l5
+	cmp		%l3, NR_SYSCALLS
+	bleu,a,pn	%xcc, linux_syscall_for_solaris
+	 nop
+	andcc		%l3, 1, %g0
+	bne,a,pn	%icc, 10f
+	 add		%sp, PTREGS_OFF, %o0
+10:	srl		%i2, 0, %o2
+	mov		%i5, %o5
+	andn		%l3, 3, %l7
+	andcc		%l5, _TIF_SYSCALL_TRACE, %g0				
+	bne,pn		%icc, solaris_syscall_trace		
+	 mov		%i0, %l5
+2:	call		%l7
+	 srl		%i3, 0, %o3
+ret_from_solaris:
+	stx		%o0, [%sp + PTREGS_OFF + PT_V9_I0]
+	ldx		[%g6 + TI_FLAGS], %l6
+	sra		%o0, 0, %o0
+	mov		%ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
+	ldx		[%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
+	cmp		%o0, -ERESTART_RESTARTBLOCK
+	sllx		%g2, 32, %g2
+	bgeu,pn		%xcc, 1f
+	 andcc		%l6, _TIF_SYSCALL_TRACE, %l6	
+
+	/* System call success, clear Carry condition code. */
+	andn		%g3, %g2, %g3
+	stx		%g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]	
+	bne,pn		%icc, solaris_syscall_trace2
+	 ldx		[%sp + PTREGS_OFF + PT_V9_TNPC], %l1
+	andcc		%l1, 1, %g0
+	bne,pn		%icc, 2f
+	 clr		%l6
+	add		%l1, 0x4, %l2				         
+	stx		%l1, [%sp + PTREGS_OFF + PT_V9_TPC]	 ! pc = npc
+	call		rtrap
+	 stx		%l2, [%sp + PTREGS_OFF + PT_V9_TNPC] !npc = npc+4
+
+	/* When tnpc & 1, this comes from setcontext and we don't want to advance pc */
+2:	andn		%l1, 3, %l1
+	call		rtrap
+	 stx		%l1, [%sp + PTREGS_OFF + PT_V9_TNPC] !npc = npc&~3
+
+1:
+	/* System call failure, set Carry condition code.
+	 * Also, get abs(errno) to return to the process.
+	 */
+	sub		%g0, %o0, %o0
+	or		%g3, %g2, %g3
+	cmp		%o0, ERANGE	/* 0-ERANGE are identity mapped */
+	bleu,pt		%icc, 1f
+	 cmp		%o0, EMEDIUMTYPE
+	bgu,pn		%icc, 1f
+	 sethi		%hi(solaris_err_table), %l6
+	sll		%o0, 2, %o0
+	or		%l6, %lo(solaris_err_table), %l6
+	ldsw		[%l6 + %o0], %o0
+1:	stx		%o0, [%sp + PTREGS_OFF + PT_V9_I0]
+	mov		1, %l6
+	stx		%g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
+	bne,pn		%icc, solaris_syscall_trace2
+	 ldx		[%sp + PTREGS_OFF + PT_V9_TNPC], %l1
+	andcc		%l1, 1, %g0
+	bne,pn		%icc, 2b
+	 add		%l1, 0x4, %l2
+	stx		%l1, [%sp + PTREGS_OFF + PT_V9_TPC]  ! pc = npc
+	call		rtrap
+	 stx		%l2, [%sp + PTREGS_OFF + PT_V9_TNPC] !npc = npc+4 
+
+solaris_syscall_trace2:
+	call		syscall_trace
+	 add		%l1, 0x4, %l2			/* npc = npc+4 */
+	andcc		%l1, 1, %g0
+	bne,pn		%icc, 2b
+	 nop
+	stx		%l1, [%sp + PTREGS_OFF + PT_V9_TPC]
+	call		rtrap
+	 stx		%l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
+
+	/* This one is tricky, so that's why we do it in assembly */
+	.globl		solaris_sigsuspend
+solaris_sigsuspend:
+	call		do_sol_sigsuspend
+	 nop
+	brlz,pn		%o0, ret_from_solaris
+	 nop
+	call		sys_sigsuspend
+	 stx		%o0, [%sp + PTREGS_OFF + PT_V9_I0]
+
+	.globl		solaris_getpid
+solaris_getpid:
+	call		sys_getppid
+	 nop
+	call		sys_getpid
+	 stx		%o0, [%sp + PTREGS_OFF + PT_V9_I1]
+	b,pt		%xcc, ret_from_solaris
+	 nop
+
+	.globl	solaris_getuid
+solaris_getuid:
+	call		sys_geteuid
+	 nop
+	call		sys_getuid
+	 stx		%o1, [%sp + PTREGS_OFF + PT_V9_I1]
+	b,pt		%xcc, ret_from_solaris
+	 nop
+
+	.globl	solaris_getgid
+solaris_getgid:
+	call		sys_getegid
+	 nop
+	call		sys_getgid
+	 stx		%o1, [%sp + PTREGS_OFF + PT_V9_I1]
+	b,pt		%xcc, ret_from_solaris
+	 nop
+
+	.globl		solaris_unimplemented
+solaris_unimplemented:
+	call		do_sol_unimplemented
+	 add		%sp, PTREGS_OFF, %o0
+	ba,pt		%xcc, ret_from_solaris
+	 nop
+
+	.section	__ex_table,#alloc
+	.align		4
+	.word		exen, exenf
+
diff --git a/arch/sparc64/solaris/fs.c b/arch/sparc64/solaris/fs.c
new file mode 100644
index 0000000..d7c99fa
--- /dev/null
+++ b/arch/sparc64/solaris/fs.c
@@ -0,0 +1,739 @@
+/* $Id: fs.c,v 1.27 2002/02/08 03:57:14 davem Exp $
+ * fs.c: fs related syscall emulation for Solaris
+ *
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ *
+ * 1999-08-19 Implemented solaris F_FREESP (truncate)
+ *            fcntl, by Jason Rappleye (rappleye@ccr.buffalo.edu)
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/stat.h>
+#include <linux/smp_lock.h>
+#include <linux/limits.h>
+#include <linux/resource.h>
+#include <linux/quotaops.h>
+#include <linux/mount.h>
+#include <linux/vfs.h>
+
+#include <asm/uaccess.h>
+#include <asm/string.h>
+#include <asm/ptrace.h>
+
+#include "conv.h"
+
+#define R3_VERSION	1
+#define R4_VERSION	2
+
+typedef struct {
+	s32	tv_sec;
+	s32	tv_nsec;
+} timestruct_t;
+
+struct sol_stat {
+	u32		st_dev;
+	s32		st_pad1[3];     /* network id */
+	u32		st_ino;
+	u32		st_mode;
+	u32		st_nlink;
+	u32		st_uid;
+	u32		st_gid;
+	u32		st_rdev;
+	s32		st_pad2[2];
+	s32		st_size;
+	s32		st_pad3;	/* st_size, off_t expansion */
+	timestruct_t	st_atime;
+	timestruct_t	st_mtime;
+	timestruct_t	st_ctime;
+	s32		st_blksize;
+	s32		st_blocks;
+	char		st_fstype[16];
+	s32		st_pad4[8];     /* expansion area */
+};
+
+struct sol_stat64 {
+	u32		st_dev;
+	s32		st_pad1[3];     /* network id */
+	u64		st_ino;
+	u32		st_mode;
+	u32		st_nlink;
+	u32		st_uid;
+	u32		st_gid;
+	u32		st_rdev;
+	s32		st_pad2[2];
+	s64		st_size;
+	timestruct_t	st_atime;
+	timestruct_t	st_mtime;
+	timestruct_t	st_ctime;
+	s64		st_blksize;
+	s32		st_blocks;
+	char		st_fstype[16];
+	s32		st_pad4[4];     /* expansion area */
+};
+
+#define UFSMAGIC (((unsigned)'u'<<24)||((unsigned)'f'<<16)||((unsigned)'s'<<8))
+
+static inline int putstat(struct sol_stat __user *ubuf, struct kstat *kbuf)
+{
+	if (kbuf->size > MAX_NON_LFS ||
+	    !sysv_valid_dev(kbuf->dev) ||
+	    !sysv_valid_dev(kbuf->rdev))
+		return -EOVERFLOW;
+	if (put_user (sysv_encode_dev(kbuf->dev), &ubuf->st_dev)	||
+	    __put_user (kbuf->ino, &ubuf->st_ino)		||
+	    __put_user (kbuf->mode, &ubuf->st_mode)		||
+	    __put_user (kbuf->nlink, &ubuf->st_nlink)	||
+	    __put_user (kbuf->uid, &ubuf->st_uid)		||
+	    __put_user (kbuf->gid, &ubuf->st_gid)		||
+	    __put_user (sysv_encode_dev(kbuf->rdev), &ubuf->st_rdev)	||
+	    __put_user (kbuf->size, &ubuf->st_size)		||
+	    __put_user (kbuf->atime.tv_sec, &ubuf->st_atime.tv_sec)	||
+	    __put_user (kbuf->atime.tv_nsec, &ubuf->st_atime.tv_nsec)	||
+	    __put_user (kbuf->mtime.tv_sec, &ubuf->st_mtime.tv_sec)	||
+	    __put_user (kbuf->mtime.tv_nsec, &ubuf->st_mtime.tv_nsec)	||
+	    __put_user (kbuf->ctime.tv_sec, &ubuf->st_ctime.tv_sec)	||
+	    __put_user (kbuf->ctime.tv_nsec, &ubuf->st_ctime.tv_nsec)	||
+	    __put_user (kbuf->blksize, &ubuf->st_blksize)	||
+	    __put_user (kbuf->blocks, &ubuf->st_blocks)	||
+	    __put_user (UFSMAGIC, (unsigned __user *)ubuf->st_fstype))
+		return -EFAULT;
+	return 0;
+}
+
+static inline int putstat64(struct sol_stat64 __user *ubuf, struct kstat *kbuf)
+{
+	if (!sysv_valid_dev(kbuf->dev) || !sysv_valid_dev(kbuf->rdev))
+		return -EOVERFLOW;
+	if (put_user (sysv_encode_dev(kbuf->dev), &ubuf->st_dev)	||
+	    __put_user (kbuf->ino, &ubuf->st_ino)		||
+	    __put_user (kbuf->mode, &ubuf->st_mode)		||
+	    __put_user (kbuf->nlink, &ubuf->st_nlink)	||
+	    __put_user (kbuf->uid, &ubuf->st_uid)		||
+	    __put_user (kbuf->gid, &ubuf->st_gid)		||
+	    __put_user (sysv_encode_dev(kbuf->rdev), &ubuf->st_rdev)	||
+	    __put_user (kbuf->size, &ubuf->st_size)		||
+	    __put_user (kbuf->atime.tv_sec, &ubuf->st_atime.tv_sec)	||
+	    __put_user (kbuf->atime.tv_nsec, &ubuf->st_atime.tv_nsec)	||
+	    __put_user (kbuf->mtime.tv_sec, &ubuf->st_mtime.tv_sec)	||
+	    __put_user (kbuf->mtime.tv_nsec, &ubuf->st_mtime.tv_nsec)	||
+	    __put_user (kbuf->ctime.tv_sec, &ubuf->st_ctime.tv_sec)	||
+	    __put_user (kbuf->ctime.tv_nsec, &ubuf->st_ctime.tv_nsec)	||
+	    __put_user (kbuf->blksize, &ubuf->st_blksize)	||
+	    __put_user (kbuf->blocks, &ubuf->st_blocks)	||
+	    __put_user (UFSMAGIC, (unsigned __user *)ubuf->st_fstype))
+		return -EFAULT;
+	return 0;
+}
+
+asmlinkage int solaris_stat(u32 filename, u32 statbuf)
+{
+	struct kstat s;
+	int ret = vfs_stat(A(filename), &s);
+	if (!ret)
+		return putstat(A(statbuf), &s);
+	return ret;
+}
+
+asmlinkage int solaris_xstat(int vers, u32 filename, u32 statbuf)
+{
+	/* Solaris doesn't bother with looking at vers, so we do neither */
+	return solaris_stat(filename, statbuf);
+}
+
+asmlinkage int solaris_stat64(u32 filename, u32 statbuf)
+{
+	struct kstat s;
+	int ret = vfs_stat(A(filename), &s);
+	if (!ret)
+		return putstat64(A(statbuf), &s);
+	return ret;
+}
+
+asmlinkage int solaris_lstat(u32 filename, u32 statbuf)
+{
+	struct kstat s;
+	int ret = vfs_lstat(A(filename), &s);
+	if (!ret)
+		return putstat(A(statbuf), &s);
+	return ret;
+}
+
+asmlinkage int solaris_lxstat(int vers, u32 filename, u32 statbuf)
+{
+	return solaris_lstat(filename, statbuf);
+}
+
+asmlinkage int solaris_lstat64(u32 filename, u32 statbuf)
+{
+	struct kstat s;
+	int ret = vfs_lstat(A(filename), &s);
+	if (!ret)
+		return putstat64(A(statbuf), &s);
+	return ret;
+}
+
+asmlinkage int solaris_fstat(unsigned int fd, u32 statbuf)
+{
+	struct kstat s;
+	int ret = vfs_fstat(fd, &s);
+	if (!ret)
+		return putstat(A(statbuf), &s);
+	return ret;
+}
+
+asmlinkage int solaris_fxstat(int vers, u32 fd, u32 statbuf)
+{
+	return solaris_fstat(fd, statbuf);
+}
+
+asmlinkage int solaris_fstat64(unsigned int fd, u32 statbuf)
+{
+	struct kstat s;
+	int ret = vfs_fstat(fd, &s);
+	if (!ret)
+		return putstat64(A(statbuf), &s);
+	return ret;
+}
+
+asmlinkage int solaris_mknod(u32 path, u32 mode, s32 dev)
+{
+	int (*sys_mknod)(const char __user *,int,unsigned) = 
+		(int (*)(const char __user *,int,unsigned))SYS(mknod);
+	int major = sysv_major(dev);
+	int minor = sysv_minor(dev);
+
+	/* minor is guaranteed to be OK for MKDEV, major might be not */
+	if (major > 0xfff)
+		return -EINVAL;
+	return sys_mknod(A(path), mode, new_encode_dev(MKDEV(major,minor)));
+}
+
+asmlinkage int solaris_xmknod(int vers, u32 path, u32 mode, s32 dev)
+{
+	return solaris_mknod(path, mode, dev);
+}
+
+asmlinkage int solaris_getdents64(unsigned int fd, void __user *dirent, unsigned int count)
+{
+	int (*sys_getdents)(unsigned int, void __user *, unsigned int) =
+		(int (*)(unsigned int, void __user *, unsigned int))SYS(getdents);
+		
+	return sys_getdents(fd, dirent, count);
+}
+
+/* This statfs thingie probably will go in the near future, but... */
+
+struct sol_statfs {
+	short	f_type;
+	s32	f_bsize;
+	s32	f_frsize;
+	s32	f_blocks;
+	s32	f_bfree;
+	u32	f_files;
+	u32	f_ffree;
+	char	f_fname[6];
+	char	f_fpack[6];
+};
+
+asmlinkage int solaris_statfs(u32 path, u32 buf, int len, int fstype)
+{
+	int ret;
+	struct statfs s;
+	mm_segment_t old_fs = get_fs();
+	int (*sys_statfs)(const char __user *,struct statfs __user *) = 
+		(int (*)(const char __user *,struct statfs __user *))SYS(statfs);
+	struct sol_statfs __user *ss = A(buf);
+	
+	if (len != sizeof(struct sol_statfs)) return -EINVAL;
+	if (!fstype) {
+		/* FIXME: mixing userland and kernel pointers */
+		set_fs (KERNEL_DS);
+		ret = sys_statfs(A(path), &s);
+		set_fs (old_fs);
+		if (!ret) {
+			if (put_user (s.f_type, &ss->f_type)		||
+			    __put_user (s.f_bsize, &ss->f_bsize)	||
+			    __put_user (0, &ss->f_frsize)		||
+			    __put_user (s.f_blocks, &ss->f_blocks)	||
+			    __put_user (s.f_bfree, &ss->f_bfree)	||
+			    __put_user (s.f_files, &ss->f_files)	||
+			    __put_user (s.f_ffree, &ss->f_ffree)	||
+			    __clear_user (&ss->f_fname, 12))
+				return -EFAULT;
+		}
+		return ret;
+	}
+/* Linux can't stat unmounted filesystems so we
+ * simply lie and claim 100MB of 1GB is free. Sorry.
+ */
+	if (put_user (fstype, &ss->f_type)		||
+	    __put_user (1024, &ss->f_bsize)		||
+	    __put_user (0, &ss->f_frsize)		||
+	    __put_user (1024*1024, &ss->f_blocks)	||
+	    __put_user (100*1024, &ss->f_bfree)		||
+	    __put_user (60000, &ss->f_files)		||
+	    __put_user (50000, &ss->f_ffree)		||
+	    __clear_user (&ss->f_fname, 12))
+		return -EFAULT;
+	return 0;
+}
+
+asmlinkage int solaris_fstatfs(u32 fd, u32 buf, int len, int fstype)
+{
+	int ret;
+	struct statfs s;
+	mm_segment_t old_fs = get_fs();
+	int (*sys_fstatfs)(unsigned,struct statfs __user *) = 
+		(int (*)(unsigned,struct statfs __user *))SYS(fstatfs);
+	struct sol_statfs __user *ss = A(buf);
+	
+	if (len != sizeof(struct sol_statfs)) return -EINVAL;
+	if (!fstype) {
+		set_fs (KERNEL_DS);
+		ret = sys_fstatfs(fd, &s);
+		set_fs (old_fs);
+		if (!ret) {
+			if (put_user (s.f_type, &ss->f_type)		||
+			    __put_user (s.f_bsize, &ss->f_bsize)	||
+			    __put_user (0, &ss->f_frsize)		||
+			    __put_user (s.f_blocks, &ss->f_blocks)	||
+			    __put_user (s.f_bfree, &ss->f_bfree)	||
+			    __put_user (s.f_files, &ss->f_files)	||
+			    __put_user (s.f_ffree, &ss->f_ffree)	||
+			    __clear_user (&ss->f_fname, 12))
+				return -EFAULT;
+		}
+		return ret;
+	}
+	/* Otherwise fstatfs is the same as statfs */
+	return solaris_statfs(0, buf, len, fstype);
+}
+
+struct sol_statvfs {
+	u32	f_bsize;
+	u32	f_frsize;
+	u32	f_blocks;
+	u32	f_bfree;
+	u32	f_bavail;
+	u32	f_files;
+	u32	f_ffree;
+	u32	f_favail;
+	u32	f_fsid;
+	char	f_basetype[16];
+	u32	f_flag;
+	u32	f_namemax;
+	char	f_fstr[32];
+	u32	f_filler[16];
+};
+
+struct sol_statvfs64 {
+	u32	f_bsize;
+	u32	f_frsize;
+	u64	f_blocks;
+	u64	f_bfree;
+	u64	f_bavail;
+	u64	f_files;
+	u64	f_ffree;
+	u64	f_favail;
+	u32	f_fsid;
+	char	f_basetype[16];
+	u32	f_flag;
+	u32	f_namemax;
+	char	f_fstr[32];
+	u32	f_filler[16];
+};
+
+static int report_statvfs(struct vfsmount *mnt, struct inode *inode, u32 buf)
+{
+	struct kstatfs s;
+	int error;
+	struct sol_statvfs __user *ss = A(buf);
+
+	error = vfs_statfs(mnt->mnt_sb, &s);
+	if (!error) {
+		const char *p = mnt->mnt_sb->s_type->name;
+		int i = 0;
+		int j = strlen (p);
+		
+		if (j > 15) j = 15;
+		if (IS_RDONLY(inode)) i = 1;
+		if (mnt->mnt_flags & MNT_NOSUID) i |= 2;
+		if (!sysv_valid_dev(inode->i_sb->s_dev))
+			return -EOVERFLOW;
+		if (put_user (s.f_bsize, &ss->f_bsize)		||
+		    __put_user (0, &ss->f_frsize)		||
+		    __put_user (s.f_blocks, &ss->f_blocks)	||
+		    __put_user (s.f_bfree, &ss->f_bfree)	||
+		    __put_user (s.f_bavail, &ss->f_bavail)	||
+		    __put_user (s.f_files, &ss->f_files)	||
+		    __put_user (s.f_ffree, &ss->f_ffree)	||
+		    __put_user (s.f_ffree, &ss->f_favail)	||
+		    __put_user (sysv_encode_dev(inode->i_sb->s_dev), &ss->f_fsid) ||
+		    __copy_to_user (ss->f_basetype,p,j)		||
+		    __put_user (0, (char __user *)&ss->f_basetype[j])	||
+		    __put_user (s.f_namelen, &ss->f_namemax)	||
+		    __put_user (i, &ss->f_flag)			||		    
+		    __clear_user (&ss->f_fstr, 32))
+			return -EFAULT;
+	}
+	return error;
+}
+
+static int report_statvfs64(struct vfsmount *mnt, struct inode *inode, u32 buf)
+{
+	struct kstatfs s;
+	int error;
+	struct sol_statvfs64 __user *ss = A(buf);
+			
+	error = vfs_statfs(mnt->mnt_sb, &s);
+	if (!error) {
+		const char *p = mnt->mnt_sb->s_type->name;
+		int i = 0;
+		int j = strlen (p);
+		
+		if (j > 15) j = 15;
+		if (IS_RDONLY(inode)) i = 1;
+		if (mnt->mnt_flags & MNT_NOSUID) i |= 2;
+		if (!sysv_valid_dev(inode->i_sb->s_dev))
+			return -EOVERFLOW;
+		if (put_user (s.f_bsize, &ss->f_bsize)		||
+		    __put_user (0, &ss->f_frsize)		||
+		    __put_user (s.f_blocks, &ss->f_blocks)	||
+		    __put_user (s.f_bfree, &ss->f_bfree)	||
+		    __put_user (s.f_bavail, &ss->f_bavail)	||
+		    __put_user (s.f_files, &ss->f_files)	||
+		    __put_user (s.f_ffree, &ss->f_ffree)	||
+		    __put_user (s.f_ffree, &ss->f_favail)	||
+		    __put_user (sysv_encode_dev(inode->i_sb->s_dev), &ss->f_fsid) ||
+		    __copy_to_user (ss->f_basetype,p,j)		||
+		    __put_user (0, (char __user *)&ss->f_basetype[j])	||
+		    __put_user (s.f_namelen, &ss->f_namemax)	||
+		    __put_user (i, &ss->f_flag)			||		    
+		    __clear_user (&ss->f_fstr, 32))
+			return -EFAULT;
+	}
+	return error;
+}
+
+asmlinkage int solaris_statvfs(u32 path, u32 buf)
+{
+	struct nameidata nd;
+	int error;
+
+	error = user_path_walk(A(path),&nd);
+	if (!error) {
+		struct inode * inode = nd.dentry->d_inode;
+		error = report_statvfs(nd.mnt, inode, buf);
+		path_release(&nd);
+	}
+	return error;
+}
+
+asmlinkage int solaris_fstatvfs(unsigned int fd, u32 buf)
+{
+	struct file * file;
+	int error;
+
+	error = -EBADF;
+	file = fget(fd);
+	if (file) {
+		error = report_statvfs(file->f_vfsmnt, file->f_dentry->d_inode, buf);
+		fput(file);
+	}
+
+	return error;
+}
+
+asmlinkage int solaris_statvfs64(u32 path, u32 buf)
+{
+	struct nameidata nd;
+	int error;
+
+	lock_kernel();
+	error = user_path_walk(A(path), &nd);
+	if (!error) {
+		struct inode * inode = nd.dentry->d_inode;
+		error = report_statvfs64(nd.mnt, inode, buf);
+		path_release(&nd);
+	}
+	unlock_kernel();
+	return error;
+}
+
+asmlinkage int solaris_fstatvfs64(unsigned int fd, u32 buf)
+{
+	struct file * file;
+	int error;
+
+	error = -EBADF;
+	file = fget(fd);
+	if (file) {
+		lock_kernel();
+		error = report_statvfs64(file->f_vfsmnt, file->f_dentry->d_inode, buf);
+		unlock_kernel();
+		fput(file);
+	}
+	return error;
+}
+
+extern asmlinkage long sparc32_open(const char * filename, int flags, int mode);
+
+asmlinkage int solaris_open(u32 fname, int flags, u32 mode)
+{
+	const char *filename = (const char *)(long)fname;
+	int fl = flags & 0xf;
+
+	/* Translate flags first. */
+	if (flags & 0x2000) fl |= O_LARGEFILE;
+	if (flags & 0x8050) fl |= O_SYNC;
+	if (flags & 0x80) fl |= O_NONBLOCK;
+	if (flags & 0x100) fl |= O_CREAT;
+	if (flags & 0x200) fl |= O_TRUNC;
+	if (flags & 0x400) fl |= O_EXCL;
+	if (flags & 0x800) fl |= O_NOCTTY;
+	flags = fl;
+
+	return sparc32_open(filename, flags, mode);
+}
+
+#define SOL_F_SETLK	6
+#define SOL_F_SETLKW	7
+#define SOL_F_FREESP    11
+#define SOL_F_ISSTREAM  13
+#define SOL_F_GETLK     14
+#define SOL_F_PRIV      15
+#define SOL_F_NPRIV     16
+#define SOL_F_QUOTACTL  17
+#define SOL_F_BLOCKS    18
+#define SOL_F_BLKSIZE   19
+#define SOL_F_GETOWN    23
+#define SOL_F_SETOWN    24
+
+struct sol_flock {
+	short	l_type;
+	short	l_whence;
+	u32	l_start;
+	u32	l_len;
+	s32	l_sysid;
+	s32	l_pid;
+	s32	l_pad[4];
+};
+
+asmlinkage int solaris_fcntl(unsigned fd, unsigned cmd, u32 arg)
+{
+	int (*sys_fcntl)(unsigned,unsigned,unsigned long) = 
+		(int (*)(unsigned,unsigned,unsigned long))SYS(fcntl);
+	int ret, flags;
+
+	switch (cmd) {
+	case F_DUPFD:
+	case F_GETFD:
+	case F_SETFD: return sys_fcntl(fd, cmd, (unsigned long)arg);
+	case F_GETFL:
+		flags = sys_fcntl(fd, cmd, 0);
+		ret = flags & 0xf;
+		if (flags & O_SYNC) ret |= 0x8050;
+		if (flags & O_NONBLOCK) ret |= 0x80;
+		return ret;
+	case F_SETFL:
+		flags = arg & 0xf;
+		if (arg & 0x8050) flags |= O_SYNC;
+		if (arg & 0x80) flags |= O_NONBLOCK;
+		return sys_fcntl(fd, cmd, (long)flags);
+	case SOL_F_GETLK:
+	case SOL_F_SETLK:
+	case SOL_F_SETLKW:
+		{
+			struct flock f;
+			struct sol_flock __user *p = A(arg);
+			mm_segment_t old_fs = get_fs();
+
+			switch (cmd) {
+			case SOL_F_GETLK: cmd = F_GETLK; break;
+			case SOL_F_SETLK: cmd = F_SETLK; break;
+			case SOL_F_SETLKW: cmd = F_SETLKW; break;
+			}
+
+			if (get_user (f.l_type, &p->l_type) ||
+			    __get_user (f.l_whence, &p->l_whence) ||
+			    __get_user (f.l_start, &p->l_start) ||
+			    __get_user (f.l_len, &p->l_len) ||
+			    __get_user (f.l_pid, &p->l_sysid))
+				return -EFAULT;
+
+			set_fs(KERNEL_DS);
+			ret = sys_fcntl(fd, cmd, (unsigned long)&f);
+			set_fs(old_fs);
+
+			if (__put_user (f.l_type, &p->l_type) ||
+			    __put_user (f.l_whence, &p->l_whence) ||
+			    __put_user (f.l_start, &p->l_start) ||
+			    __put_user (f.l_len, &p->l_len) ||
+			    __put_user (f.l_pid, &p->l_pid) ||
+			    __put_user (0, &p->l_sysid))
+				return -EFAULT;
+
+			return ret;
+		}
+	case SOL_F_FREESP:
+	        { 
+		    int length;
+		    int (*sys_newftruncate)(unsigned int, unsigned long)=
+			    (int (*)(unsigned int, unsigned long))SYS(ftruncate);
+
+		    if (get_user(length, &((struct sol_flock __user *)A(arg))->l_start))
+			    return -EFAULT;
+
+		    return sys_newftruncate(fd, length);
+		}
+	};
+	return -EINVAL;
+}
+
+asmlinkage int solaris_ulimit(int cmd, int val)
+{
+	switch (cmd) {
+	case 1: /* UL_GETFSIZE - in 512B chunks */
+		return current->signal->rlim[RLIMIT_FSIZE].rlim_cur >> 9;
+	case 2: /* UL_SETFSIZE */
+		if ((unsigned long)val > (LONG_MAX>>9)) return -ERANGE;
+		val <<= 9;
+		task_lock(current->group_leader);
+		if (val > current->signal->rlim[RLIMIT_FSIZE].rlim_max) {
+			if (!capable(CAP_SYS_RESOURCE)) {
+				task_unlock(current->group_leader);
+				return -EPERM;
+			}
+			current->signal->rlim[RLIMIT_FSIZE].rlim_max = val;
+		}
+		current->signal->rlim[RLIMIT_FSIZE].rlim_cur = val;
+		task_unlock(current->group_leader);
+		return 0;
+	case 3: /* UL_GMEMLIM */
+		return current->signal->rlim[RLIMIT_DATA].rlim_cur;
+	case 4: /* UL_GDESLIM */
+		return NR_OPEN;
+	}
+	return -EINVAL;
+}
+
+/* At least at the time I'm writing this, Linux doesn't have ACLs, so we
+   just fake this */
+asmlinkage int solaris_acl(u32 filename, int cmd, int nentries, u32 aclbufp)
+{
+	return -ENOSYS;
+}
+
+asmlinkage int solaris_facl(unsigned int fd, int cmd, int nentries, u32 aclbufp)
+{
+	return -ENOSYS;
+}
+
+asmlinkage int solaris_pread(unsigned int fd, char __user *buf, u32 count, u32 pos)
+{
+	ssize_t (*sys_pread64)(unsigned int, char __user *, size_t, loff_t) =
+		(ssize_t (*)(unsigned int, char __user *, size_t, loff_t))SYS(pread64);
+
+	return sys_pread64(fd, buf, count, (loff_t)pos);
+}
+
+asmlinkage int solaris_pwrite(unsigned int fd, char __user *buf, u32 count, u32 pos)
+{
+	ssize_t (*sys_pwrite64)(unsigned int, char __user *, size_t, loff_t) =
+		(ssize_t (*)(unsigned int, char __user *, size_t, loff_t))SYS(pwrite64);
+
+	return sys_pwrite64(fd, buf, count, (loff_t)pos);
+}
+
+/* POSIX.1 names */
+#define _PC_LINK_MAX    1
+#define _PC_MAX_CANON   2
+#define _PC_MAX_INPUT   3
+#define _PC_NAME_MAX    4
+#define _PC_PATH_MAX    5
+#define _PC_PIPE_BUF    6
+#define _PC_NO_TRUNC    7
+#define _PC_VDISABLE    8
+#define _PC_CHOWN_RESTRICTED    9
+/* POSIX.4 names */
+#define _PC_ASYNC_IO    10
+#define _PC_PRIO_IO     11
+#define _PC_SYNC_IO     12
+#define _PC_LAST        12
+
+/* This is not a real and complete implementation yet, just to keep
+ * the easy Solaris binaries happy.
+ */
+asmlinkage int solaris_fpathconf(int fd, int name)
+{
+	int ret;
+
+	switch(name) {
+	case _PC_LINK_MAX:
+		ret = LINK_MAX;
+		break;
+	case _PC_MAX_CANON:
+		ret = MAX_CANON;
+		break;
+	case _PC_MAX_INPUT:
+		ret = MAX_INPUT;
+		break;
+	case _PC_NAME_MAX:
+		ret = NAME_MAX;
+		break;
+	case _PC_PATH_MAX:
+		ret = PATH_MAX;
+		break;
+	case _PC_PIPE_BUF:
+		ret = PIPE_BUF;
+		break;
+	case _PC_CHOWN_RESTRICTED:
+		ret = 1;
+		break;
+	case _PC_NO_TRUNC:
+	case _PC_VDISABLE:
+		ret = 0;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+asmlinkage int solaris_pathconf(u32 path, int name)
+{
+	return solaris_fpathconf(0, name);
+}
+
+/* solaris_llseek returns long long - quite difficult */
+asmlinkage long solaris_llseek(struct pt_regs *regs, u32 off_hi, u32 off_lo, int whence)
+{
+	int (*sys_llseek)(unsigned int, unsigned long, unsigned long, loff_t __user *, unsigned int) =
+		(int (*)(unsigned int, unsigned long, unsigned long, loff_t __user *, unsigned int))SYS(_llseek);
+	int ret;
+	mm_segment_t old_fs = get_fs();
+	loff_t retval;
+	
+	set_fs(KERNEL_DS);
+	ret = sys_llseek((unsigned int)regs->u_regs[UREG_I0], off_hi, off_lo, &retval, whence);
+	set_fs(old_fs);
+	if (ret < 0) return ret;
+	regs->u_regs[UREG_I1] = (u32)retval;
+	return (retval >> 32);
+}
+
+/* Have to mask out all but lower 3 bits */
+asmlinkage int solaris_access(u32 filename, long mode)
+{
+	int (*sys_access)(const char __user *, int) = 
+		(int (*)(const char __user *, int))SYS(access);
+		
+	return sys_access(A(filename), mode & 7);
+}
diff --git a/arch/sparc64/solaris/ioctl.c b/arch/sparc64/solaris/ioctl.c
new file mode 100644
index 0000000..cac0a1c
--- /dev/null
+++ b/arch/sparc64/solaris/ioctl.c
@@ -0,0 +1,820 @@
+/* $Id: ioctl.c,v 1.17 2002/02/08 03:57:14 davem Exp $
+ * ioctl.c: Solaris ioctl emulation.
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997,1998 Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)
+ *
+ * Streams & timod emulation based on code
+ * Copyright (C) 1995, 1996 Mike Jagdis (jaggy@purplet.demon.co.uk)
+ *
+ * 1999-08-19 Implemented solaris 'm' (mag tape) and
+ *            'O' (openprom) ioctls, by Jason Rappleye
+ *             (rappleye@ccr.buffalo.edu)
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/syscalls.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/mtio.h>
+#include <linux/time.h>
+#include <linux/compat.h>
+
+#include <net/sock.h>
+
+#include <asm/uaccess.h>
+#include <asm/termios.h>
+#include <asm/openpromio.h>
+
+#include "conv.h"
+#include "socksys.h"
+
+extern asmlinkage int compat_sys_ioctl(unsigned int fd, unsigned int cmd,
+	u32 arg);
+asmlinkage int solaris_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
+
+extern int timod_putmsg(unsigned int fd, char __user *ctl_buf, int ctl_len,
+			char __user *data_buf, int data_len, int flags);
+extern int timod_getmsg(unsigned int fd, char __user *ctl_buf, int ctl_maxlen, int __user *ctl_len,
+			char __user *data_buf, int data_maxlen, int __user *data_len, int *flags);
+
+/* termio* stuff {{{ */
+
+struct solaris_termios {
+	u32	c_iflag;
+	u32	c_oflag;
+	u32	c_cflag;
+	u32	c_lflag;
+	u8	c_cc[19];
+};
+
+struct solaris_termio {
+	u16	c_iflag;
+	u16	c_oflag;
+	u16	c_cflag;
+	u16	c_lflag;
+	s8	c_line;
+	u8	c_cc[8];
+};
+
+struct solaris_termiox {
+	u16	x_hflag;
+	u16	x_cflag;
+	u16	x_rflag[5];
+	u16	x_sflag;
+};
+
+static u32 solaris_to_linux_cflag(u32 cflag)
+{
+	cflag &= 0x7fdff000;
+	if (cflag & 0x200000) {
+		int baud = cflag & 0xf;
+		cflag &= ~0x20000f;
+		switch (baud) {
+		case 0: baud = B57600; break;
+		case 1: baud = B76800; break;
+		case 2: baud = B115200; break;
+		case 3: baud = B153600; break;
+		case 4: baud = B230400; break;
+		case 5: baud = B307200; break;
+		case 6: baud = B460800; break;
+		}
+		cflag |= CBAUDEX | baud;
+	}
+	return cflag;
+}
+
+static u32 linux_to_solaris_cflag(u32 cflag)
+{
+	cflag &= ~(CMSPAR | CIBAUD);
+	if (cflag & CBAUDEX) {
+		int baud = cflag & CBAUD;
+		cflag &= ~CBAUD;
+		switch (baud) {
+		case B57600: baud = 0; break;
+		case B76800: baud = 1; break;
+		case B115200: baud = 2; break;
+		case B153600: baud = 3; break;
+		case B230400: baud = 4; break;
+		case B307200: baud = 5; break;
+		case B460800: baud = 6; break;
+		case B614400: baud = 7; break;
+		case B921600: baud = 8; break;
+#if 0		
+		case B1843200: baud = 9; break;
+#endif
+		}
+		cflag |= 0x200000 | baud;
+	}
+	return cflag;
+}
+
+static inline int linux_to_solaris_termio(unsigned int fd, unsigned int cmd, u32 arg)
+{
+	struct solaris_termio __user *p = A(arg);
+	int ret;
+	
+	ret = sys_ioctl(fd, cmd, (unsigned long)p);
+	if (!ret) {
+		u32 cflag;
+		
+		if (__get_user (cflag, &p->c_cflag))
+			return -EFAULT;
+		cflag = linux_to_solaris_cflag(cflag);
+		if (__put_user (cflag, &p->c_cflag))
+			return -EFAULT;
+	}
+	return ret;
+}
+
+static int solaris_to_linux_termio(unsigned int fd, unsigned int cmd, u32 arg)
+{
+	int ret;
+	struct solaris_termio s;
+	mm_segment_t old_fs = get_fs();
+	
+	if (copy_from_user (&s, (struct solaris_termio __user *)A(arg), sizeof(struct solaris_termio)))
+		return -EFAULT;
+	s.c_cflag = solaris_to_linux_cflag(s.c_cflag);
+	set_fs(KERNEL_DS);
+	ret = sys_ioctl(fd, cmd, (unsigned long)&s);
+	set_fs(old_fs);
+	return ret;
+}
+
+static inline int linux_to_solaris_termios(unsigned int fd, unsigned int cmd, u32 arg)
+{
+	int ret;
+	struct solaris_termios s;
+	mm_segment_t old_fs = get_fs();
+
+	set_fs(KERNEL_DS);	
+	ret = sys_ioctl(fd, cmd, (unsigned long)&s);
+	set_fs(old_fs);
+	if (!ret) {
+		struct solaris_termios __user *p = A(arg);
+		if (put_user (s.c_iflag, &p->c_iflag) ||
+		    __put_user (s.c_oflag, &p->c_oflag) ||
+		    __put_user (linux_to_solaris_cflag(s.c_cflag), &p->c_cflag) ||
+		    __put_user (s.c_lflag, &p->c_lflag) ||
+		    __copy_to_user (p->c_cc, s.c_cc, 16) ||
+		    __clear_user (p->c_cc + 16, 2))
+			return -EFAULT;
+	}
+	return ret;
+}
+
+static int solaris_to_linux_termios(unsigned int fd, unsigned int cmd, u32 arg)
+{
+	int ret;
+	struct solaris_termios s;
+	struct solaris_termios __user *p = A(arg);
+	mm_segment_t old_fs = get_fs();
+
+	set_fs(KERNEL_DS);
+	ret = sys_ioctl(fd, TCGETS, (unsigned long)&s);
+	set_fs(old_fs);
+	if (ret) return ret;
+	if (put_user (s.c_iflag, &p->c_iflag) ||
+	    __put_user (s.c_oflag, &p->c_oflag) ||
+	    __put_user (s.c_cflag, &p->c_cflag) ||
+	    __put_user (s.c_lflag, &p->c_lflag) ||
+	    __copy_from_user (s.c_cc, p->c_cc, 16))
+		return -EFAULT;
+	s.c_cflag = solaris_to_linux_cflag(s.c_cflag);
+	set_fs(KERNEL_DS);
+	ret = sys_ioctl(fd, cmd, (unsigned long)&s);
+	set_fs(old_fs);
+	return ret;
+}
+
+static inline int solaris_T(unsigned int fd, unsigned int cmd, u32 arg)
+{
+	switch (cmd & 0xff) {
+	case 1: /* TCGETA */
+		return linux_to_solaris_termio(fd, TCGETA, arg);
+	case 2: /* TCSETA */
+		return solaris_to_linux_termio(fd, TCSETA, arg);
+	case 3: /* TCSETAW */
+		return solaris_to_linux_termio(fd, TCSETAW, arg);
+	case 4: /* TCSETAF */
+		return solaris_to_linux_termio(fd, TCSETAF, arg);
+	case 5: /* TCSBRK */
+		return sys_ioctl(fd, TCSBRK, arg);
+	case 6: /* TCXONC */
+		return sys_ioctl(fd, TCXONC, arg);
+	case 7: /* TCFLSH */
+		return sys_ioctl(fd, TCFLSH, arg);
+	case 13: /* TCGETS */
+		return linux_to_solaris_termios(fd, TCGETS, arg);
+	case 14: /* TCSETS */
+		return solaris_to_linux_termios(fd, TCSETS, arg);
+	case 15: /* TCSETSW */
+		return solaris_to_linux_termios(fd, TCSETSW, arg);
+	case 16: /* TCSETSF */
+		return solaris_to_linux_termios(fd, TCSETSF, arg);
+	case 103: /* TIOCSWINSZ */
+		return sys_ioctl(fd, TIOCSWINSZ, arg);
+	case 104: /* TIOCGWINSZ */
+		return sys_ioctl(fd, TIOCGWINSZ, arg);
+	}
+	return -ENOSYS;
+}
+
+static inline int solaris_t(unsigned int fd, unsigned int cmd, u32 arg)
+{
+	switch (cmd & 0xff) {
+	case 20: /* TIOCGPGRP */
+		return sys_ioctl(fd, TIOCGPGRP, arg);
+	case 21: /* TIOCSPGRP */
+		return sys_ioctl(fd, TIOCSPGRP, arg);
+	}
+	return -ENOSYS;
+}
+
+/* }}} */
+
+/* A pseudo STREAMS support {{{ */
+
+struct strioctl {
+	int cmd, timeout, len;
+	u32 data;
+};
+
+struct solaris_si_sockparams {
+	int sp_family;
+	int sp_type;
+	int sp_protocol;
+};
+
+struct solaris_o_si_udata {
+	int tidusize;
+	int addrsize;
+	int optsize;
+	int etsdusize;
+	int servtype;
+	int so_state;
+	int so_options;
+	int tsdusize;
+};
+
+struct solaris_si_udata {
+	int tidusize;
+	int addrsize;
+	int optsize;
+	int etsdusize;
+	int servtype;
+	int so_state;
+	int so_options;
+	int tsdusize;
+	struct solaris_si_sockparams sockparams;
+};
+
+#define SOLARIS_MODULE_TIMOD    0
+#define SOLARIS_MODULE_SOCKMOD  1
+#define SOLARIS_MODULE_MAX      2
+
+static struct module_info {
+        const char *name;
+        /* can be expanded further if needed */
+} module_table[ SOLARIS_MODULE_MAX + 1 ] = {
+        /* the ordering here must match the module numbers above! */
+        { "timod" },
+        { "sockmod" },
+        { NULL }
+};
+
+static inline int solaris_sockmod(unsigned int fd, unsigned int cmd, u32 arg)
+{
+	struct inode *ino;
+	/* I wonder which of these tests are superfluous... --patrik */
+	spin_lock(&current->files->file_lock);
+	if (! current->files->fd[fd] ||
+	    ! current->files->fd[fd]->f_dentry ||
+	    ! (ino = current->files->fd[fd]->f_dentry->d_inode) ||
+	    ! S_ISSOCK(ino->i_mode)) {
+		spin_unlock(&current->files->file_lock);
+		return TBADF;
+	}
+	spin_unlock(&current->files->file_lock);
+	
+	switch (cmd & 0xff) {
+	case 109: /* SI_SOCKPARAMS */
+	{
+		struct solaris_si_sockparams si;
+		if (copy_from_user (&si, A(arg), sizeof(si)))
+			return (EFAULT << 8) | TSYSERR;
+
+		/* Should we modify socket ino->socket_i.ops and type? */
+		return 0;
+	}
+	case 110: /* SI_GETUDATA */
+	{
+		int etsdusize, servtype;
+		struct solaris_si_udata __user *p = A(arg);
+		switch (SOCKET_I(ino)->type) {
+		case SOCK_STREAM:
+			etsdusize = 1;
+			servtype = 2;
+			break;
+		default:
+			etsdusize = -2;
+			servtype = 3;
+			break;
+		}
+		if (put_user(16384, &p->tidusize) ||
+		    __put_user(sizeof(struct sockaddr), &p->addrsize) ||
+		    __put_user(-1, &p->optsize) ||
+		    __put_user(etsdusize, &p->etsdusize) ||
+		    __put_user(servtype, &p->servtype) ||
+		    __put_user(0, &p->so_state) ||
+		    __put_user(0, &p->so_options) ||
+		    __put_user(16384, &p->tsdusize) ||
+		    __put_user(SOCKET_I(ino)->ops->family, &p->sockparams.sp_family) ||
+		    __put_user(SOCKET_I(ino)->type, &p->sockparams.sp_type) ||
+		    __put_user(SOCKET_I(ino)->ops->family, &p->sockparams.sp_protocol))
+			return (EFAULT << 8) | TSYSERR;
+		return 0;
+	}
+	case 101: /* O_SI_GETUDATA */
+	{
+		int etsdusize, servtype;
+		struct solaris_o_si_udata __user *p = A(arg);
+		switch (SOCKET_I(ino)->type) {
+		case SOCK_STREAM:
+			etsdusize = 1;
+			servtype = 2;
+			break;
+		default:
+			etsdusize = -2;
+			servtype = 3;
+			break;
+		}
+		if (put_user(16384, &p->tidusize) ||
+		    __put_user(sizeof(struct sockaddr), &p->addrsize) ||
+		    __put_user(-1, &p->optsize) ||
+		    __put_user(etsdusize, &p->etsdusize) ||
+		    __put_user(servtype, &p->servtype) ||
+		    __put_user(0, &p->so_state) ||
+		    __put_user(0, &p->so_options) ||
+		    __put_user(16384, &p->tsdusize))
+			return (EFAULT << 8) | TSYSERR;
+		return 0;
+	}
+	case 102: /* SI_SHUTDOWN */
+	case 103: /* SI_LISTEN */
+	case 104: /* SI_SETMYNAME */
+	case 105: /* SI_SETPEERNAME */
+	case 106: /* SI_GETINTRANSIT */
+	case 107: /* SI_TCL_LINK */
+	case 108: /* SI_TCL_UNLINK */
+		;
+	}
+	return TNOTSUPPORT;
+}
+
+static inline int solaris_timod(unsigned int fd, unsigned int cmd, u32 arg,
+                                    int len, int __user *len_p)
+{
+	int ret;
+		
+	switch (cmd & 0xff) {
+	case 141: /* TI_OPTMGMT */
+	{
+		int i;
+		u32 prim;
+		SOLD("TI_OPMGMT entry");
+		ret = timod_putmsg(fd, A(arg), len, NULL, -1, 0);
+		SOLD("timod_putmsg() returned");
+		if (ret)
+			return (-ret << 8) | TSYSERR;
+		i = MSG_HIPRI;
+		SOLD("calling timod_getmsg()");
+		ret = timod_getmsg(fd, A(arg), len, len_p, NULL, -1, NULL, &i);
+		SOLD("timod_getmsg() returned");
+		if (ret)
+			return (-ret << 8) | TSYSERR;
+		SOLD("ret ok");
+		if (get_user(prim, (u32 __user *)A(arg)))
+			return (EFAULT << 8) | TSYSERR;
+		SOLD("got prim");
+		if (prim == T_ERROR_ACK) {
+			u32 tmp, tmp2;
+			SOLD("prim is T_ERROR_ACK");
+			if (get_user(tmp, (u32 __user *)A(arg)+3) ||
+			    get_user(tmp2, (u32 __user *)A(arg)+2))
+				return (EFAULT << 8) | TSYSERR;
+			return (tmp2 << 8) | tmp;
+		}
+		SOLD("TI_OPMGMT return 0");
+		return 0;
+	}
+	case 142: /* TI_BIND */
+	{
+		int i;
+		u32 prim;
+		SOLD("TI_BIND entry");
+		ret = timod_putmsg(fd, A(arg), len, NULL, -1, 0);
+		SOLD("timod_putmsg() returned");
+		if (ret)
+			return (-ret << 8) | TSYSERR;
+		len = 1024; /* Solaris allows arbitrary return size */
+		i = MSG_HIPRI;
+		SOLD("calling timod_getmsg()");
+		ret = timod_getmsg(fd, A(arg), len, len_p, NULL, -1, NULL, &i);
+		SOLD("timod_getmsg() returned");
+		if (ret)
+			return (-ret << 8) | TSYSERR;
+		SOLD("ret ok");
+		if (get_user(prim, (u32 __user *)A(arg)))
+			return (EFAULT << 8) | TSYSERR;
+		SOLD("got prim");
+		if (prim == T_ERROR_ACK) {
+			u32 tmp, tmp2;
+			SOLD("prim is T_ERROR_ACK");
+			if (get_user(tmp, (u32 __user *)A(arg)+3) ||
+			    get_user(tmp2, (u32 __user *)A(arg)+2))
+				return (EFAULT << 8) | TSYSERR;
+			return (tmp2 << 8) | tmp;
+		}
+		SOLD("no ERROR_ACK requested");
+		if (prim != T_OK_ACK)
+			return TBADSEQ;
+		SOLD("OK_ACK requested");
+		i = MSG_HIPRI;
+		SOLD("calling timod_getmsg()");
+		ret = timod_getmsg(fd, A(arg), len, len_p, NULL, -1, NULL, &i);
+		SOLD("timod_getmsg() returned");
+		if (ret)
+			return (-ret << 8) | TSYSERR;
+		SOLD("TI_BIND return ok");
+		return 0;
+	}
+	case 140: /* TI_GETINFO */
+	case 143: /* TI_UNBIND */
+	case 144: /* TI_GETMYNAME */
+	case 145: /* TI_GETPEERNAME */
+	case 146: /* TI_SETMYNAME */
+	case 147: /* TI_SETPEERNAME */
+		;
+	}
+	return TNOTSUPPORT;
+}
+
+static inline int solaris_S(struct file *filp, unsigned int fd, unsigned int cmd, u32 arg)
+{
+	char *p;
+	int ret;
+	mm_segment_t old_fs;
+	struct strioctl si;
+	struct inode *ino;
+        struct sol_socket_struct *sock;
+        struct module_info *mi;
+
+        ino = filp->f_dentry->d_inode;
+        if (!S_ISSOCK(ino->i_mode))
+		return -EBADF;
+        sock = filp->private_data;
+        if (! sock) {
+                printk("solaris_S: NULL private_data\n");
+                return -EBADF;
+        }
+        if (sock->magic != SOLARIS_SOCKET_MAGIC) {
+                printk("solaris_S: invalid magic\n");
+                return -EBADF;
+        }
+        
+
+	switch (cmd & 0xff) {
+	case 1: /* I_NREAD */
+		return -ENOSYS;
+	case 2: /* I_PUSH */
+        {
+		p = getname (A(arg));
+		if (IS_ERR (p))
+			return PTR_ERR(p);
+                ret = -EINVAL;
+                for (mi = module_table; mi->name; mi++) {
+                        if (strcmp(mi->name, p) == 0) {
+                                sol_module m;
+                                if (sock->modcount >= MAX_NR_STREAM_MODULES) {
+                                        ret = -ENXIO;
+                                        break;
+                                }
+                                m = (sol_module) (mi - module_table);
+                                sock->module[sock->modcount++] = m;
+                                ret = 0;
+                                break;
+                        }
+                }
+		putname (p);
+		return ret;
+        }
+	case 3: /* I_POP */
+                if (sock->modcount <= 0) return -EINVAL;
+                sock->modcount--;
+		return 0;
+        case 4: /* I_LOOK */
+        {
+        	const char *p;
+                if (sock->modcount <= 0) return -EINVAL;
+                p = module_table[(unsigned)sock->module[sock->modcount]].name;
+                if (copy_to_user (A(arg), p, strlen(p)))
+                	return -EFAULT;
+                return 0;
+        }
+	case 5: /* I_FLUSH */
+		return 0;
+	case 8: /* I_STR */
+		if (copy_from_user(&si, A(arg), sizeof(struct strioctl)))
+			return -EFAULT;
+                /* We ignore what module is actually at the top of stack. */
+		switch ((si.cmd >> 8) & 0xff) {
+		case 'I':
+                        return solaris_sockmod(fd, si.cmd, si.data);
+		case 'T':
+                        return solaris_timod(fd, si.cmd, si.data, si.len,
+				&((struct strioctl __user *)A(arg))->len);
+		default:
+			return solaris_ioctl(fd, si.cmd, si.data);
+		}
+	case 9: /* I_SETSIG */
+		return sys_ioctl(fd, FIOSETOWN, current->pid);
+	case 10: /* I_GETSIG */
+		old_fs = get_fs();
+		set_fs(KERNEL_DS);
+		sys_ioctl(fd, FIOGETOWN, (unsigned long)&ret);
+		set_fs(old_fs);
+		if (ret == current->pid) return 0x3ff;
+		else return -EINVAL;
+	case 11: /* I_FIND */
+        {
+                int i;
+		p = getname (A(arg));
+		if (IS_ERR (p))
+			return PTR_ERR(p);
+                ret = 0;
+                for (i = 0; i < sock->modcount; i++) {
+                        unsigned m = sock->module[i];
+                        if (strcmp(module_table[m].name, p) == 0) {
+                                ret = 1;
+                                break;
+                        } 
+                }
+		putname (p);
+		return ret;
+        }
+	case 19: /* I_SWROPT */
+	case 32: /* I_SETCLTIME */
+		return 0;	/* Lie */
+	}
+	return -ENOSYS;
+}
+
+static inline int solaris_s(unsigned int fd, unsigned int cmd, u32 arg)
+{
+	switch (cmd & 0xff) {
+	case 0: /* SIOCSHIWAT */
+	case 2: /* SIOCSLOWAT */
+		return 0; /* We don't support them */
+	case 1: /* SIOCGHIWAT */
+	case 3: /* SIOCGLOWAT */
+		if (put_user (0, (u32 __user *)A(arg)))
+			return -EFAULT;
+		return 0; /* Lie */
+	case 7: /* SIOCATMARK */
+		return sys_ioctl(fd, SIOCATMARK, arg);
+	case 8: /* SIOCSPGRP */
+		return sys_ioctl(fd, SIOCSPGRP, arg);
+	case 9: /* SIOCGPGRP */
+		return sys_ioctl(fd, SIOCGPGRP, arg);
+	}
+	return -ENOSYS;
+}
+
+static inline int solaris_r(unsigned int fd, unsigned int cmd, u32 arg)
+{
+	switch (cmd & 0xff) {
+	case 10: /* SIOCADDRT */
+		return compat_sys_ioctl(fd, SIOCADDRT, arg);
+	case 11: /* SIOCDELRT */
+		return compat_sys_ioctl(fd, SIOCDELRT, arg);
+	}
+	return -ENOSYS;
+}
+
+static inline int solaris_i(unsigned int fd, unsigned int cmd, u32 arg)
+{
+	switch (cmd & 0xff) {
+	case 12: /* SIOCSIFADDR */
+		return compat_sys_ioctl(fd, SIOCSIFADDR, arg);
+	case 13: /* SIOCGIFADDR */
+		return compat_sys_ioctl(fd, SIOCGIFADDR, arg);
+	case 14: /* SIOCSIFDSTADDR */
+		return compat_sys_ioctl(fd, SIOCSIFDSTADDR, arg);
+	case 15: /* SIOCGIFDSTADDR */
+		return compat_sys_ioctl(fd, SIOCGIFDSTADDR, arg);
+	case 16: /* SIOCSIFFLAGS */
+		return compat_sys_ioctl(fd, SIOCSIFFLAGS, arg);
+	case 17: /* SIOCGIFFLAGS */
+		return compat_sys_ioctl(fd, SIOCGIFFLAGS, arg);
+	case 18: /* SIOCSIFMEM */
+		return compat_sys_ioctl(fd, SIOCSIFMEM, arg);
+	case 19: /* SIOCGIFMEM */
+		return compat_sys_ioctl(fd, SIOCGIFMEM, arg);
+	case 20: /* SIOCGIFCONF */
+		return compat_sys_ioctl(fd, SIOCGIFCONF, arg);
+	case 21: /* SIOCSIFMTU */
+		return compat_sys_ioctl(fd, SIOCSIFMTU, arg);
+	case 22: /* SIOCGIFMTU */
+		return compat_sys_ioctl(fd, SIOCGIFMTU, arg);
+	case 23: /* SIOCGIFBRDADDR */
+		return compat_sys_ioctl(fd, SIOCGIFBRDADDR, arg);
+	case 24: /* SIOCSIFBRDADDR */
+		return compat_sys_ioctl(fd, SIOCSIFBRDADDR, arg);
+	case 25: /* SIOCGIFNETMASK */
+		return compat_sys_ioctl(fd, SIOCGIFNETMASK, arg);
+	case 26: /* SIOCSIFNETMASK */
+		return compat_sys_ioctl(fd, SIOCSIFNETMASK, arg);
+	case 27: /* SIOCGIFMETRIC */
+		return compat_sys_ioctl(fd, SIOCGIFMETRIC, arg);
+	case 28: /* SIOCSIFMETRIC */
+		return compat_sys_ioctl(fd, SIOCSIFMETRIC, arg);
+	case 30: /* SIOCSARP */
+		return compat_sys_ioctl(fd, SIOCSARP, arg);
+	case 31: /* SIOCGARP */
+		return compat_sys_ioctl(fd, SIOCGARP, arg);
+	case 32: /* SIOCDARP */
+		return compat_sys_ioctl(fd, SIOCDARP, arg);
+	case 52: /* SIOCGETNAME */
+	case 53: /* SIOCGETPEER */
+		{
+			struct sockaddr uaddr;
+			int uaddr_len = sizeof(struct sockaddr), ret;
+			long args[3];
+			mm_segment_t old_fs = get_fs();
+			int (*sys_socketcall)(int, unsigned long *) =
+				(int (*)(int, unsigned long *))SYS(socketcall);
+			
+			args[0] = fd; args[1] = (long)&uaddr; args[2] = (long)&uaddr_len;
+			set_fs(KERNEL_DS);
+			ret = sys_socketcall(((cmd & 0xff) == 52) ? SYS_GETSOCKNAME : SYS_GETPEERNAME,
+					args);
+			set_fs(old_fs);
+			if (ret >= 0) {
+				if (copy_to_user(A(arg), &uaddr, uaddr_len))
+					return -EFAULT;
+			}
+			return ret;
+		}
+#if 0		
+	case 86: /* SIOCSOCKSYS */
+		return socksys_syscall(fd, arg);
+#endif		
+	case 87: /* SIOCGIFNUM */
+		{
+			struct net_device *d;
+			int i = 0;
+			
+			read_lock_bh(&dev_base_lock);
+			for (d = dev_base; d; d = d->next) i++;
+			read_unlock_bh(&dev_base_lock);
+
+			if (put_user (i, (int __user *)A(arg)))
+				return -EFAULT;
+			return 0;
+		}
+	}
+	return -ENOSYS;
+}
+
+static int solaris_m(unsigned int fd, unsigned int cmd, u32 arg)
+{
+	int ret;
+
+	switch (cmd & 0xff) {
+	case 1: /* MTIOCTOP */
+		ret = sys_ioctl(fd, MTIOCTOP, (unsigned long)&arg);
+		break;
+	case 2: /* MTIOCGET */
+		ret = sys_ioctl(fd, MTIOCGET, (unsigned long)&arg);
+		break;
+	case 3: /* MTIOCGETDRIVETYPE */
+	case 4: /* MTIOCPERSISTENT */
+	case 5: /* MTIOCPERSISTENTSTATUS */
+	case 6: /* MTIOCLRERR */
+	case 7: /* MTIOCGUARANTEEDORDER */
+	case 8: /* MTIOCRESERVE */
+	case 9: /* MTIOCRELEASE */
+	case 10: /* MTIOCFORCERESERVE */
+	case 13: /* MTIOCSTATE */
+	case 14: /* MTIOCREADIGNOREILI */
+	case 15: /* MTIOCREADIGNOREEOFS */
+	case 16: /* MTIOCSHORTFMK */
+	default:
+		ret = -ENOSYS; /* linux doesn't support these */
+		break;
+	};
+
+	return ret;
+}
+
+static int solaris_O(unsigned int fd, unsigned int cmd, u32 arg)
+{
+	int ret = -EINVAL;
+
+	switch (cmd & 0xff) {
+	case 1: /* OPROMGETOPT */
+		ret = sys_ioctl(fd, OPROMGETOPT, arg);
+		break;
+	case 2: /* OPROMSETOPT */
+		ret = sys_ioctl(fd, OPROMSETOPT, arg);
+		break;
+	case 3: /* OPROMNXTOPT */
+		ret = sys_ioctl(fd, OPROMNXTOPT, arg);
+		break;
+	case 4: /* OPROMSETOPT2 */
+		ret = sys_ioctl(fd, OPROMSETOPT2, arg);
+		break;
+	case 5: /* OPROMNEXT */
+		ret = sys_ioctl(fd, OPROMNEXT, arg);
+		break;
+	case 6: /* OPROMCHILD */
+		ret = sys_ioctl(fd, OPROMCHILD, arg);
+		break;
+	case 7: /* OPROMGETPROP */
+		ret = sys_ioctl(fd, OPROMGETPROP, arg);
+		break;
+	case 8: /* OPROMNXTPROP */
+		ret = sys_ioctl(fd, OPROMNXTPROP, arg);
+		break;
+	case 9: /* OPROMU2P */
+		ret = sys_ioctl(fd, OPROMU2P, arg);
+		break;
+	case 10: /* OPROMGETCONS */
+		ret = sys_ioctl(fd, OPROMGETCONS, arg);
+		break;
+	case 11: /* OPROMGETFBNAME */
+		ret = sys_ioctl(fd, OPROMGETFBNAME, arg);
+		break;
+	case 12: /* OPROMGETBOOTARGS */
+		ret = sys_ioctl(fd, OPROMGETBOOTARGS, arg);
+		break;
+	case 13: /* OPROMGETVERSION */
+	case 14: /* OPROMPATH2DRV */
+	case 15: /* OPROMDEV2PROMNAME */
+	case 16: /* OPROMPROM2DEVNAME */
+	case 17: /* OPROMGETPROPLEN */
+	default:
+		ret = -EINVAL;
+		break;
+	};
+	return ret;
+}
+
+/* }}} */
+
+asmlinkage int solaris_ioctl(unsigned int fd, unsigned int cmd, u32 arg)
+{
+	struct file *filp;
+	int error = -EBADF;
+
+	filp = fget(fd);
+	if (!filp)
+		goto out;
+
+	lock_kernel();
+	error = -EFAULT;
+	switch ((cmd >> 8) & 0xff) {
+	case 'S': error = solaris_S(filp, fd, cmd, arg); break;
+	case 'T': error = solaris_T(fd, cmd, arg); break;
+	case 'i': error = solaris_i(fd, cmd, arg); break;
+	case 'r': error = solaris_r(fd, cmd, arg); break;
+	case 's': error = solaris_s(fd, cmd, arg); break;
+	case 't': error = solaris_t(fd, cmd, arg); break;
+	case 'f': error = sys_ioctl(fd, cmd, arg); break;
+	case 'm': error = solaris_m(fd, cmd, arg); break;
+	case 'O': error = solaris_O(fd, cmd, arg); break;
+	default:
+		error = -ENOSYS;
+		break;
+	}
+	unlock_kernel();
+	fput(filp);
+out:
+	if (error == -ENOSYS) {
+		unsigned char c = cmd>>8;
+		
+		if (c < ' ' || c > 126) c = '.';
+		printk("solaris_ioctl: Unknown cmd fd(%d) cmd(%08x '%c') arg(%08x)\n",
+		       (int)fd, (unsigned int)cmd, c, (unsigned int)arg);
+		error = -EINVAL;
+	}
+	return error;
+}
diff --git a/arch/sparc64/solaris/ipc.c b/arch/sparc64/solaris/ipc.c
new file mode 100644
index 0000000..8cef5fd
--- /dev/null
+++ b/arch/sparc64/solaris/ipc.c
@@ -0,0 +1,127 @@
+/* $Id: ipc.c,v 1.5 1999/12/09 00:41:00 davem Exp $
+ * ipc.c: Solaris IPC emulation
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/smp_lock.h>
+#include <linux/wait.h>
+#include <linux/mm.h>
+#include <linux/shm.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+
+#include <asm/uaccess.h>
+#include <asm/string.h>
+#include <asm/ipc.h>
+
+#include "conv.h"
+
+struct solaris_ipc_perm {
+	s32	uid;
+	s32	gid;
+	s32	cuid;
+	s32	cgid;
+	u32	mode;
+	u32	seq;
+	int	key;
+	s32	pad[4];
+};
+
+struct solaris_shmid_ds {
+	struct solaris_ipc_perm	shm_perm;
+	int			shm_segsz;
+	u32			shm_amp;
+	unsigned short		shm_lkcnt;
+	char			__padxx[2];
+	s32			shm_lpid;
+	s32			shm_cpid;
+	u32			shm_nattch;
+	u32			shm_cnattch;
+	s32			shm_atime;
+	s32			shm_pad1;
+	s32			shm_dtime;
+	s32			shm_pad2;
+	s32			shm_ctime;
+	s32			shm_pad3;
+	unsigned short		shm_cv;
+	char			shm_pad4[2];
+	u32			shm_sptas;
+	s32			shm_pad5[2];
+};
+
+asmlinkage long solaris_shmsys(int cmd, u32 arg1, u32 arg2, u32 arg3)
+{
+	int (*sys_ipc)(unsigned,int,int,unsigned long,void __user *,long) = 
+		(int (*)(unsigned,int,int,unsigned long,void __user *,long))SYS(ipc);
+	mm_segment_t old_fs;
+	unsigned long raddr;
+	int ret;
+		
+	switch (cmd) {
+	case 0: /* shmat */
+		old_fs = get_fs();
+		set_fs(KERNEL_DS);
+		ret = sys_ipc(SHMAT, arg1, arg3 & ~0x4000, (unsigned long)&raddr, A(arg2), 0);
+		set_fs(old_fs);
+		if (ret >= 0) return (u32)raddr;
+		else return ret;
+	case 1: /* shmctl */
+		switch (arg2) {
+		case 3: /* SHM_LOCK */
+		case 4: /* SHM_UNLOCK */
+			return sys_ipc(SHMCTL, arg1, (arg2 == 3) ? SHM_LOCK : SHM_UNLOCK, 0, NULL, 0);
+		case 10: /* IPC_RMID */
+			return sys_ipc(SHMCTL, arg1, IPC_RMID, 0, NULL, 0);
+		case 11: /* IPC_SET */
+			{
+				struct shmid_ds s;
+				struct solaris_shmid_ds __user *p = A(arg3);
+				
+				if (get_user (s.shm_perm.uid, &p->shm_perm.uid) ||
+				    __get_user (s.shm_perm.gid, &p->shm_perm.gid) || 
+				    __get_user (s.shm_perm.mode, &p->shm_perm.mode))
+					return -EFAULT;
+				old_fs = get_fs();
+				set_fs(KERNEL_DS);
+				ret = sys_ipc(SHMCTL, arg1, IPC_SET, 0, &s, 0);
+				set_fs(old_fs);
+				return ret;
+			}
+		case 12: /* IPC_STAT */
+			{
+				struct shmid_ds s;
+				struct solaris_shmid_ds __user *p = A(arg3);
+				
+				old_fs = get_fs();
+				set_fs(KERNEL_DS);
+				ret = sys_ipc(SHMCTL, arg1, IPC_SET, 0, &s, 0);
+				set_fs(old_fs);
+				if (put_user (s.shm_perm.uid, &(p->shm_perm.uid)) ||
+				    __put_user (s.shm_perm.gid, &(p->shm_perm.gid)) || 
+				    __put_user (s.shm_perm.cuid, &(p->shm_perm.cuid)) ||
+				    __put_user (s.shm_perm.cgid, &(p->shm_perm.cgid)) || 
+				    __put_user (s.shm_perm.mode, &(p->shm_perm.mode)) ||
+				    __put_user (s.shm_perm.seq, &(p->shm_perm.seq)) ||
+				    __put_user (s.shm_perm.key, &(p->shm_perm.key)) ||
+				    __put_user (s.shm_segsz, &(p->shm_segsz)) ||
+				    __put_user (s.shm_lpid, &(p->shm_lpid)) ||
+				    __put_user (s.shm_cpid, &(p->shm_cpid)) ||
+				    __put_user (s.shm_nattch, &(p->shm_nattch)) ||
+				    __put_user (s.shm_atime, &(p->shm_atime)) ||
+				    __put_user (s.shm_dtime, &(p->shm_dtime)) ||
+				    __put_user (s.shm_ctime, &(p->shm_ctime)))
+					return -EFAULT;
+				return ret;
+			}
+		default: return -EINVAL;
+		}
+	case 2: /* shmdt */
+		return sys_ipc(SHMDT, 0, 0, 0, A(arg1), 0);
+	case 3: /* shmget */
+		return sys_ipc(SHMGET, arg1, arg2, arg3, NULL, 0);
+	}
+	return -EINVAL;
+}
diff --git a/arch/sparc64/solaris/misc.c b/arch/sparc64/solaris/misc.c
new file mode 100644
index 0000000..15b4cfe
--- /dev/null
+++ b/arch/sparc64/solaris/misc.c
@@ -0,0 +1,784 @@
+/* $Id: misc.c,v 1.36 2002/02/09 19:49:31 davem Exp $
+ * misc.c: Miscellaneous syscall emulation for Solaris
+ *
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/config.h>
+#include <linux/module.h> 
+#include <linux/types.h>
+#include <linux/smp_lock.h>
+#include <linux/utsname.h>
+#include <linux/limits.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/timex.h>
+#include <linux/major.h>
+#include <linux/compat.h>
+
+#include <asm/uaccess.h>
+#include <asm/string.h>
+#include <asm/oplib.h>
+#include <asm/idprom.h>
+#include <asm/smp.h>
+
+#include "conv.h"
+
+/* Conversion from Linux to Solaris errnos. 0-34 are identity mapped.
+   Some Linux errnos (EPROCLIM, EDOTDOT, ERREMOTE, EUCLEAN, ENOTNAM, 
+   ENAVAIL, EISNAM, EREMOTEIO, ENOMEDIUM, EMEDIUMTYPE) have no Solaris
+   equivalents. I return EINVAL in that case, which is very wrong. If
+   someone suggest a better value for them, you're welcomed.
+   On the other side, Solaris ECANCELED and ENOTSUP have no Linux equivalents,
+   but that doesn't matter here. --jj */
+int solaris_err_table[] = {
+/* 0 */  0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+/* 10 */  10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+/* 20 */  20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+/* 30 */  30, 31, 32, 33, 34, 22, 150, 149, 95, 96,
+/* 40 */  97, 98, 99, 120, 121, 122, 123, 124, 125, 126, 
+/* 50 */ 127, 128, 129, 130, 131, 132, 133, 134, 143, 144,
+/* 60 */ 145, 146, 90, 78, 147, 148, 93, 22, 94, 49,
+/* 70 */ 151, 66, 60, 62, 63, 35, 77, 36, 45, 46, 
+/* 80 */ 64, 22, 67, 68, 69, 70, 71, 74, 22, 82, 
+/* 90 */ 89, 92, 79, 81, 37, 38, 39, 40, 41, 42,
+/* 100 */ 43, 44, 50, 51, 52, 53, 54, 55, 56, 57,
+/* 110 */ 87, 61, 84, 65, 83, 80, 91, 22, 22, 22,
+/* 120 */ 22, 22, 88, 86, 85, 22, 22,
+};
+
+#define SOLARIS_NR_OPEN	256
+
+static u32 do_solaris_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u64 off)
+{
+	struct file *file = NULL;
+	unsigned long retval, ret_type;
+
+	/* Do we need it here? */
+	set_personality(PER_SVR4);
+	if (flags & MAP_NORESERVE) {
+		static int cnt;
+		
+		if (cnt < 5) {
+			printk("%s:  unimplemented Solaris MAP_NORESERVE mmap() flag\n",
+			       current->comm);
+			cnt++;
+		}
+		flags &= ~MAP_NORESERVE;
+	}
+	retval = -EBADF;
+	if(!(flags & MAP_ANONYMOUS)) {
+		if(fd >= SOLARIS_NR_OPEN)
+			goto out;
+ 		file = fget(fd);
+		if (!file)
+			goto out;
+		else {
+			struct inode * inode = file->f_dentry->d_inode;
+			if(imajor(inode) == MEM_MAJOR &&
+			   iminor(inode) == 5) {
+				flags |= MAP_ANONYMOUS;
+				fput(file);
+				file = NULL;
+			}
+		}
+	}
+
+	retval = -EINVAL;
+	len = PAGE_ALIGN(len);
+	if(!(flags & MAP_FIXED))
+		addr = 0;
+	else if (len > 0xf0000000UL || addr > 0xf0000000UL - len)
+		goto out_putf;
+	ret_type = flags & _MAP_NEW;
+	flags &= ~_MAP_NEW;
+
+	down_write(&current->mm->mmap_sem);
+	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+	retval = do_mmap(file,
+			 (unsigned long) addr, (unsigned long) len,
+			 (unsigned long) prot, (unsigned long) flags, off);
+	up_write(&current->mm->mmap_sem);
+	if(!ret_type)
+		retval = ((retval < 0xf0000000) ? 0 : retval);
+	                        
+out_putf:
+	if (file)
+		fput(file);
+out:
+	return (u32) retval;
+}
+
+asmlinkage u32 solaris_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u32 off)
+{
+	return do_solaris_mmap(addr, len, prot, flags, fd, (u64) off);
+}
+
+asmlinkage u32 solaris_mmap64(struct pt_regs *regs, u32 len, u32 prot, u32 flags, u32 fd, u32 offhi)
+{
+	u32 offlo;
+	
+	if (regs->u_regs[UREG_G1]) {
+		if (get_user (offlo, (u32 __user *)(long)((u32)regs->u_regs[UREG_I6] + 0x5c)))
+			return -EFAULT;
+	} else {
+		if (get_user (offlo, (u32 __user *)(long)((u32)regs->u_regs[UREG_I6] + 0x60)))
+			return -EFAULT;
+	}
+	return do_solaris_mmap((u32)regs->u_regs[UREG_I0], len, prot, flags, fd, (((u64)offhi)<<32)|offlo);
+}
+
+asmlinkage int solaris_brk(u32 brk)
+{
+	int (*sunos_brk)(u32) = (int (*)(u32))SUNOS(17);
+	
+	return sunos_brk(brk);
+}
+
+static int __set_utsfield(char __user *to, int to_size,
+			  const char *from, int from_size,
+			  int dotchop, int countfrom)
+{
+	int len = countfrom ? (to_size > from_size ?
+			       from_size : to_size) : to_size;
+	int off;
+
+	if (copy_to_user(to, from, len))
+		return -EFAULT;
+
+	off = len < to_size? len: len - 1;
+	if (dotchop) {
+		const char *p = strnchr(from, len, '.');
+		if (p) off = p - from;
+	}
+
+	if (__put_user('\0', to + off))
+		return -EFAULT;
+
+	return 0;
+}
+
+#define set_utsfield(to, from, dotchop, countfrom) \
+	__set_utsfield((to), sizeof(to), \
+		       (from), sizeof(from), \
+		       (dotchop), (countfrom))
+
+struct sol_uname {
+	char sysname[9];
+	char nodename[9];
+	char release[9];
+	char version[9];
+	char machine[9];
+};
+
+struct sol_utsname {
+	char sysname[257];
+	char nodename[257];
+	char release[257];
+	char version[257];
+	char machine[257];
+};
+
+static char *machine(void)
+{
+	switch (sparc_cpu_model) {
+	case sun4: return "sun4";
+	case sun4c: return "sun4c";
+	case sun4e: return "sun4e";
+	case sun4m: return "sun4m";
+	case sun4d: return "sun4d";
+	case sun4u: return "sun4u";
+	default: return "sparc";
+	}
+}
+
+static char *platform(char *buffer)
+{
+	int len;
+
+	*buffer = 0;
+	len = prom_getproperty(prom_root_node, "name", buffer, 256);
+	if(len > 0)
+		buffer[len] = 0;
+	if (*buffer) {
+		char *p;
+
+		for (p = buffer; *p; p++)
+			if (*p == '/' || *p == ' ') *p = '_';
+		return buffer;
+	}
+
+	return "sun4u";
+}
+
+static char *serial(char *buffer)
+{
+	int node = prom_getchild(prom_root_node);
+	int len;
+
+	node = prom_searchsiblings(node, "options");
+	*buffer = 0;
+	len = prom_getproperty(node, "system-board-serial#", buffer, 256);
+	if(len > 0)
+		buffer[len] = 0;
+	if (!*buffer)
+		return "4512348717234";
+	else
+		return buffer;
+}
+
+asmlinkage int solaris_utssys(u32 buf, u32 flags, int which, u32 buf2)
+{
+	struct sol_uname __user *v = A(buf);
+	int err;
+
+	switch (which) {
+	case 0:	/* old uname */
+		/* Let's cheat */
+		err  = set_utsfield(v->sysname, "SunOS", 1, 0);
+		down_read(&uts_sem);
+		err |= set_utsfield(v->nodename, system_utsname.nodename,
+				    1, 1);
+		up_read(&uts_sem);
+		err |= set_utsfield(v->release, "2.6", 0, 0);
+		err |= set_utsfield(v->version, "Generic", 0, 0);
+		err |= set_utsfield(v->machine, machine(), 0, 0);
+		return (err ? -EFAULT : 0);
+	case 2: /* ustat */
+		return -ENOSYS;
+	case 3: /* fusers */
+		return -ENOSYS;
+	default:
+		return -ENOSYS;
+	}
+}
+
+asmlinkage int solaris_utsname(u32 buf)
+{
+	struct sol_utsname __user *v = A(buf);
+	int err;
+
+	/* Why should we not lie a bit? */
+	down_read(&uts_sem);
+	err  = set_utsfield(v->sysname, "SunOS", 0, 0);
+	err |= set_utsfield(v->nodename, system_utsname.nodename, 1, 1);
+	err |= set_utsfield(v->release, "5.6", 0, 0);
+	err |= set_utsfield(v->version, "Generic", 0, 0);
+	err |= set_utsfield(v->machine, machine(), 0, 0);
+	up_read(&uts_sem);
+
+	return (err ? -EFAULT : 0);
+}
+
+#define SI_SYSNAME		1       /* return name of operating system */
+#define SI_HOSTNAME		2       /* return name of node */
+#define SI_RELEASE		3       /* return release of operating system */
+#define SI_VERSION		4       /* return version field of utsname */
+#define SI_MACHINE		5       /* return kind of machine */
+#define SI_ARCHITECTURE		6       /* return instruction set arch */
+#define SI_HW_SERIAL		7       /* return hardware serial number */
+#define SI_HW_PROVIDER		8       /* return hardware manufacturer */
+#define SI_SRPC_DOMAIN		9       /* return secure RPC domain */
+#define SI_PLATFORM		513     /* return platform identifier */
+
+asmlinkage int solaris_sysinfo(int cmd, u32 buf, s32 count)
+{
+	char *p, *q, *r;
+	char buffer[256];
+	int len;
+	
+	/* Again, we cheat :)) */
+	switch (cmd) {
+	case SI_SYSNAME: r = "SunOS"; break;
+	case SI_HOSTNAME:
+		r = buffer + 256;
+		down_read(&uts_sem);
+		for (p = system_utsname.nodename, q = buffer; 
+		     q < r && *p && *p != '.'; *q++ = *p++);
+		up_read(&uts_sem);
+		*q = 0;
+		r = buffer;
+		break;
+	case SI_RELEASE: r = "5.6"; break;
+	case SI_MACHINE: r = machine(); break;
+	case SI_ARCHITECTURE: r = "sparc"; break;
+	case SI_HW_PROVIDER: r = "Sun_Microsystems"; break;
+	case SI_HW_SERIAL: r = serial(buffer); break;
+	case SI_PLATFORM: r = platform(buffer); break;
+	case SI_SRPC_DOMAIN: r = ""; break;
+	case SI_VERSION: r = "Generic"; break;
+	default: return -EINVAL;
+	}
+	len = strlen(r) + 1;
+	if (count < len) {
+		if (copy_to_user(A(buf), r, count - 1) ||
+		    __put_user(0, (char __user *)A(buf) + count - 1))
+			return -EFAULT;
+	} else {
+		if (copy_to_user(A(buf), r, len))
+			return -EFAULT;
+	}
+	return len;
+}
+
+#define	SOLARIS_CONFIG_NGROUPS			2
+#define	SOLARIS_CONFIG_CHILD_MAX		3
+#define	SOLARIS_CONFIG_OPEN_FILES		4
+#define	SOLARIS_CONFIG_POSIX_VER		5
+#define	SOLARIS_CONFIG_PAGESIZE			6
+#define	SOLARIS_CONFIG_CLK_TCK			7
+#define	SOLARIS_CONFIG_XOPEN_VER		8
+#define	SOLARIS_CONFIG_PROF_TCK			10
+#define	SOLARIS_CONFIG_NPROC_CONF		11
+#define	SOLARIS_CONFIG_NPROC_ONLN		12
+#define	SOLARIS_CONFIG_AIO_LISTIO_MAX		13
+#define	SOLARIS_CONFIG_AIO_MAX			14
+#define	SOLARIS_CONFIG_AIO_PRIO_DELTA_MAX	15
+#define	SOLARIS_CONFIG_DELAYTIMER_MAX		16
+#define	SOLARIS_CONFIG_MQ_OPEN_MAX		17
+#define	SOLARIS_CONFIG_MQ_PRIO_MAX		18
+#define	SOLARIS_CONFIG_RTSIG_MAX		19
+#define	SOLARIS_CONFIG_SEM_NSEMS_MAX		20
+#define	SOLARIS_CONFIG_SEM_VALUE_MAX		21
+#define	SOLARIS_CONFIG_SIGQUEUE_MAX		22
+#define	SOLARIS_CONFIG_SIGRT_MIN		23
+#define	SOLARIS_CONFIG_SIGRT_MAX		24
+#define	SOLARIS_CONFIG_TIMER_MAX		25
+#define	SOLARIS_CONFIG_PHYS_PAGES		26
+#define	SOLARIS_CONFIG_AVPHYS_PAGES		27
+
+asmlinkage int solaris_sysconf(int id)
+{
+	switch (id) {
+	case SOLARIS_CONFIG_NGROUPS:	return NGROUPS_MAX;
+	case SOLARIS_CONFIG_CHILD_MAX:	return CHILD_MAX;
+	case SOLARIS_CONFIG_OPEN_FILES:	return OPEN_MAX;
+	case SOLARIS_CONFIG_POSIX_VER:	return 199309;
+	case SOLARIS_CONFIG_PAGESIZE:	return PAGE_SIZE;
+	case SOLARIS_CONFIG_XOPEN_VER:	return 3;
+	case SOLARIS_CONFIG_CLK_TCK:
+	case SOLARIS_CONFIG_PROF_TCK:
+		return sparc64_get_clock_tick(smp_processor_id());
+#ifdef CONFIG_SMP	
+	case SOLARIS_CONFIG_NPROC_CONF:	return NR_CPUS;
+	case SOLARIS_CONFIG_NPROC_ONLN:	return num_online_cpus();
+#else
+	case SOLARIS_CONFIG_NPROC_CONF:	return 1;
+	case SOLARIS_CONFIG_NPROC_ONLN:	return 1;
+#endif
+	case SOLARIS_CONFIG_SIGRT_MIN:		return 37;
+	case SOLARIS_CONFIG_SIGRT_MAX:		return 44;
+	case SOLARIS_CONFIG_PHYS_PAGES:
+	case SOLARIS_CONFIG_AVPHYS_PAGES:
+		{
+			struct sysinfo s;
+			
+			si_meminfo(&s);
+			if (id == SOLARIS_CONFIG_PHYS_PAGES)
+				return s.totalram >>= PAGE_SHIFT;
+			else
+				return s.freeram >>= PAGE_SHIFT;
+		}
+	/* XXX support these as well -jj */
+	case SOLARIS_CONFIG_AIO_LISTIO_MAX:	return -EINVAL;
+	case SOLARIS_CONFIG_AIO_MAX:		return -EINVAL;
+	case SOLARIS_CONFIG_AIO_PRIO_DELTA_MAX:	return -EINVAL;
+	case SOLARIS_CONFIG_DELAYTIMER_MAX:	return -EINVAL;
+	case SOLARIS_CONFIG_MQ_OPEN_MAX:	return -EINVAL;
+	case SOLARIS_CONFIG_MQ_PRIO_MAX:	return -EINVAL;
+	case SOLARIS_CONFIG_RTSIG_MAX:		return -EINVAL;
+	case SOLARIS_CONFIG_SEM_NSEMS_MAX:	return -EINVAL;
+	case SOLARIS_CONFIG_SEM_VALUE_MAX:	return -EINVAL;
+	case SOLARIS_CONFIG_SIGQUEUE_MAX:	return -EINVAL;
+	case SOLARIS_CONFIG_TIMER_MAX:		return -EINVAL;
+	default: return -EINVAL;
+	}
+}
+
+asmlinkage int solaris_procids(int cmd, s32 pid, s32 pgid)
+{
+	int ret;
+	
+	switch (cmd) {
+	case 0: /* getpgrp */
+		return process_group(current);
+	case 1: /* setpgrp */
+		{
+			int (*sys_setpgid)(pid_t,pid_t) =
+				(int (*)(pid_t,pid_t))SYS(setpgid);
+				
+			/* can anyone explain me the difference between
+			   Solaris setpgrp and setsid? */
+			ret = sys_setpgid(0, 0);
+			if (ret) return ret;
+			current->signal->tty = NULL;
+			return process_group(current);
+		}
+	case 2: /* getsid */
+		{
+			int (*sys_getsid)(pid_t) = (int (*)(pid_t))SYS(getsid);
+			return sys_getsid(pid);
+		}
+	case 3: /* setsid */
+		{
+			int (*sys_setsid)(void) = (int (*)(void))SYS(setsid);
+			return sys_setsid();
+		}
+	case 4: /* getpgid */
+		{
+			int (*sys_getpgid)(pid_t) = (int (*)(pid_t))SYS(getpgid);
+			return sys_getpgid(pid);
+		}
+	case 5: /* setpgid */
+		{
+			int (*sys_setpgid)(pid_t,pid_t) = 
+				(int (*)(pid_t,pid_t))SYS(setpgid);
+			return sys_setpgid(pid,pgid);
+		}
+	}
+	return -EINVAL;
+}
+
+asmlinkage int solaris_gettimeofday(u32 tim)
+{
+	int (*sys_gettimeofday)(struct timeval *, struct timezone *) =
+		(int (*)(struct timeval *, struct timezone *))SYS(gettimeofday);
+		
+	return sys_gettimeofday((struct timeval *)(u64)tim, NULL);
+}
+
+#define RLIM_SOL_INFINITY32	0x7fffffff
+#define RLIM_SOL_SAVED_MAX32	0x7ffffffe
+#define RLIM_SOL_SAVED_CUR32	0x7ffffffd
+#define RLIM_SOL_INFINITY	((u64)-3)
+#define RLIM_SOL_SAVED_MAX	((u64)-2)
+#define RLIM_SOL_SAVED_CUR	((u64)-1)
+#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
+#define RLIMIT_SOL_NOFILE	5
+#define RLIMIT_SOL_VMEM		6
+
+struct rlimit32 {
+	u32	rlim_cur;
+	u32	rlim_max;
+};
+
+asmlinkage int solaris_getrlimit(unsigned int resource, struct rlimit32 __user *rlim)
+{
+	struct rlimit r;
+	int ret;
+	mm_segment_t old_fs = get_fs ();
+	int (*sys_getrlimit)(unsigned int, struct rlimit *) =
+		(int (*)(unsigned int, struct rlimit *))SYS(getrlimit);
+
+	if (resource > RLIMIT_SOL_VMEM)
+		return -EINVAL;	
+	switch (resource) {
+	case RLIMIT_SOL_NOFILE: resource = RLIMIT_NOFILE; break;
+	case RLIMIT_SOL_VMEM: resource = RLIMIT_AS; break;
+	default: break;
+	}
+	set_fs (KERNEL_DS);
+	ret = sys_getrlimit(resource, &r);
+	set_fs (old_fs);
+	if (!ret) {
+		if (r.rlim_cur == RLIM_INFINITY)
+			r.rlim_cur = RLIM_SOL_INFINITY32;
+		else if ((u64)r.rlim_cur > RLIM_SOL_INFINITY32)
+			r.rlim_cur = RLIM_SOL_SAVED_CUR32;
+		if (r.rlim_max == RLIM_INFINITY)
+			r.rlim_max = RLIM_SOL_INFINITY32;
+		else if ((u64)r.rlim_max > RLIM_SOL_INFINITY32)
+			r.rlim_max = RLIM_SOL_SAVED_MAX32;
+		ret = put_user (r.rlim_cur, &rlim->rlim_cur);
+		ret |= __put_user (r.rlim_max, &rlim->rlim_max);
+	}
+	return ret;
+}
+
+asmlinkage int solaris_setrlimit(unsigned int resource, struct rlimit32 __user *rlim)
+{
+	struct rlimit r, rold;
+	int ret;
+	mm_segment_t old_fs = get_fs ();
+	int (*sys_getrlimit)(unsigned int, struct rlimit __user *) =
+		(int (*)(unsigned int, struct rlimit __user *))SYS(getrlimit);
+	int (*sys_setrlimit)(unsigned int, struct rlimit __user *) =
+		(int (*)(unsigned int, struct rlimit __user *))SYS(setrlimit);
+
+	if (resource > RLIMIT_SOL_VMEM)
+		return -EINVAL;	
+	switch (resource) {
+	case RLIMIT_SOL_NOFILE: resource = RLIMIT_NOFILE; break;
+	case RLIMIT_SOL_VMEM: resource = RLIMIT_AS; break;
+	default: break;
+	}
+	if (get_user (r.rlim_cur, &rlim->rlim_cur) ||
+	    __get_user (r.rlim_max, &rlim->rlim_max))
+		return -EFAULT;
+	set_fs (KERNEL_DS);
+	ret = sys_getrlimit(resource, &rold);
+	if (!ret) {
+		if (r.rlim_cur == RLIM_SOL_INFINITY32)
+			r.rlim_cur = RLIM_INFINITY;
+		else if (r.rlim_cur == RLIM_SOL_SAVED_CUR32)
+			r.rlim_cur = rold.rlim_cur;
+		else if (r.rlim_cur == RLIM_SOL_SAVED_MAX32)
+			r.rlim_cur = rold.rlim_max;
+		if (r.rlim_max == RLIM_SOL_INFINITY32)
+			r.rlim_max = RLIM_INFINITY;
+		else if (r.rlim_max == RLIM_SOL_SAVED_CUR32)
+			r.rlim_max = rold.rlim_cur;
+		else if (r.rlim_max == RLIM_SOL_SAVED_MAX32)
+			r.rlim_max = rold.rlim_max;
+		ret = sys_setrlimit(resource, &r);
+	}
+	set_fs (old_fs);
+	return ret;
+}
+
+asmlinkage int solaris_getrlimit64(unsigned int resource, struct rlimit __user *rlim)
+{
+	struct rlimit r;
+	int ret;
+	mm_segment_t old_fs = get_fs ();
+	int (*sys_getrlimit)(unsigned int, struct rlimit __user *) =
+		(int (*)(unsigned int, struct rlimit __user *))SYS(getrlimit);
+
+	if (resource > RLIMIT_SOL_VMEM)
+		return -EINVAL;	
+	switch (resource) {
+	case RLIMIT_SOL_NOFILE: resource = RLIMIT_NOFILE; break;
+	case RLIMIT_SOL_VMEM: resource = RLIMIT_AS; break;
+	default: break;
+	}
+	set_fs (KERNEL_DS);
+	ret = sys_getrlimit(resource, &r);
+	set_fs (old_fs);
+	if (!ret) {
+		if (r.rlim_cur == RLIM_INFINITY)
+			r.rlim_cur = RLIM_SOL_INFINITY;
+		if (r.rlim_max == RLIM_INFINITY)
+			r.rlim_max = RLIM_SOL_INFINITY;
+		ret = put_user (r.rlim_cur, &rlim->rlim_cur);
+		ret |= __put_user (r.rlim_max, &rlim->rlim_max);
+	}
+	return ret;
+}
+
+asmlinkage int solaris_setrlimit64(unsigned int resource, struct rlimit __user *rlim)
+{
+	struct rlimit r, rold;
+	int ret;
+	mm_segment_t old_fs = get_fs ();
+	int (*sys_getrlimit)(unsigned int, struct rlimit __user *) =
+		(int (*)(unsigned int, struct rlimit __user *))SYS(getrlimit);
+	int (*sys_setrlimit)(unsigned int, struct rlimit __user *) =
+		(int (*)(unsigned int, struct rlimit __user *))SYS(setrlimit);
+
+	if (resource > RLIMIT_SOL_VMEM)
+		return -EINVAL;	
+	switch (resource) {
+	case RLIMIT_SOL_NOFILE: resource = RLIMIT_NOFILE; break;
+	case RLIMIT_SOL_VMEM: resource = RLIMIT_AS; break;
+	default: break;
+	}
+	if (get_user (r.rlim_cur, &rlim->rlim_cur) ||
+	    __get_user (r.rlim_max, &rlim->rlim_max))
+		return -EFAULT;
+	set_fs (KERNEL_DS);
+	ret = sys_getrlimit(resource, &rold);
+	if (!ret) {
+		if (r.rlim_cur == RLIM_SOL_INFINITY)
+			r.rlim_cur = RLIM_INFINITY;
+		else if (r.rlim_cur == RLIM_SOL_SAVED_CUR)
+			r.rlim_cur = rold.rlim_cur;
+		else if (r.rlim_cur == RLIM_SOL_SAVED_MAX)
+			r.rlim_cur = rold.rlim_max;
+		if (r.rlim_max == RLIM_SOL_INFINITY)
+			r.rlim_max = RLIM_INFINITY;
+		else if (r.rlim_max == RLIM_SOL_SAVED_CUR)
+			r.rlim_max = rold.rlim_cur;
+		else if (r.rlim_max == RLIM_SOL_SAVED_MAX)
+			r.rlim_max = rold.rlim_max;
+		ret = sys_setrlimit(resource, &r);
+	}
+	set_fs (old_fs);
+	return ret;
+}
+
+struct sol_ntptimeval {
+	struct compat_timeval time;
+	s32 maxerror;
+	s32 esterror;
+};
+
+struct sol_timex {
+	u32 modes;
+	s32 offset;
+	s32 freq;
+	s32 maxerror;
+	s32 esterror;
+	s32 status;
+	s32 constant;
+	s32 precision;
+	s32 tolerance;
+	s32 ppsfreq;
+	s32 jitter;
+	s32 shift;
+	s32 stabil;
+	s32 jitcnt;
+	s32 calcnt;
+	s32 errcnt;
+	s32 stbcnt;
+};
+
+asmlinkage int solaris_ntp_gettime(struct sol_ntptimeval __user *ntp)
+{
+	int (*sys_adjtimex)(struct timex __user *) =
+		(int (*)(struct timex __user *))SYS(adjtimex);
+	struct timex t;
+	int ret;
+	mm_segment_t old_fs = get_fs();
+	
+	set_fs(KERNEL_DS);
+	t.modes = 0;
+	ret = sys_adjtimex(&t);
+	set_fs(old_fs);
+	if (ret < 0)
+		return ret;
+	ret = put_user (t.time.tv_sec, &ntp->time.tv_sec);
+	ret |= __put_user (t.time.tv_usec, &ntp->time.tv_usec);
+	ret |= __put_user (t.maxerror, &ntp->maxerror);
+	ret |= __put_user (t.esterror, &ntp->esterror);
+	return ret;	                        
+}
+
+asmlinkage int solaris_ntp_adjtime(struct sol_timex __user *txp)
+{
+	int (*sys_adjtimex)(struct timex __user *) =
+		(int (*)(struct timex __user *))SYS(adjtimex);
+	struct timex t;
+	int ret, err;
+	mm_segment_t old_fs = get_fs();
+
+	ret = get_user (t.modes, &txp->modes);
+	ret |= __get_user (t.offset, &txp->offset);
+	ret |= __get_user (t.freq, &txp->freq);
+	ret |= __get_user (t.maxerror, &txp->maxerror);
+	ret |= __get_user (t.esterror, &txp->esterror);
+	ret |= __get_user (t.status, &txp->status);
+	ret |= __get_user (t.constant, &txp->constant);
+	set_fs(KERNEL_DS);
+	ret = sys_adjtimex(&t);
+	set_fs(old_fs);
+	if (ret < 0)
+		return ret;
+	err = put_user (t.offset, &txp->offset);
+	err |= __put_user (t.freq, &txp->freq);
+	err |= __put_user (t.maxerror, &txp->maxerror);
+	err |= __put_user (t.esterror, &txp->esterror);
+	err |= __put_user (t.status, &txp->status);
+	err |= __put_user (t.constant, &txp->constant);
+	err |= __put_user (t.precision, &txp->precision);
+	err |= __put_user (t.tolerance, &txp->tolerance);
+	err |= __put_user (t.ppsfreq, &txp->ppsfreq);
+	err |= __put_user (t.jitter, &txp->jitter);
+	err |= __put_user (t.shift, &txp->shift);
+	err |= __put_user (t.stabil, &txp->stabil);
+	err |= __put_user (t.jitcnt, &txp->jitcnt);
+	err |= __put_user (t.calcnt, &txp->calcnt);
+	err |= __put_user (t.errcnt, &txp->errcnt);
+	err |= __put_user (t.stbcnt, &txp->stbcnt);
+	if (err)
+		return -EFAULT;
+	return ret;
+}
+
+asmlinkage int do_sol_unimplemented(struct pt_regs *regs)
+{
+	printk ("Unimplemented Solaris syscall %d %08x %08x %08x %08x\n", 
+			(int)regs->u_regs[UREG_G1], 
+			(int)regs->u_regs[UREG_I0],
+			(int)regs->u_regs[UREG_I1],
+			(int)regs->u_regs[UREG_I2],
+			(int)regs->u_regs[UREG_I3]);
+	return -ENOSYS;
+}
+
+asmlinkage void solaris_register(void)
+{
+	set_personality(PER_SVR4);
+}
+
+extern long solaris_to_linux_signals[], linux_to_solaris_signals[];
+
+struct exec_domain solaris_exec_domain = {
+	.name =		"Solaris",
+	.handler =	NULL,
+	.pers_low =	1,		/* PER_SVR4 personality */
+	.pers_high =	1,
+	.signal_map =	solaris_to_linux_signals,
+	.signal_invmap =linux_to_solaris_signals,
+	.module =	THIS_MODULE,
+	.next =		NULL
+};
+
+extern int init_socksys(void);
+
+#ifdef MODULE
+
+MODULE_AUTHOR("Jakub Jelinek (jj@ultra.linux.cz), Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)");
+MODULE_DESCRIPTION("Solaris binary emulation module");
+MODULE_LICENSE("GPL");
+
+#ifdef __sparc_v9__
+extern u32 tl0_solaris[8];
+#define update_ttable(x) 										\
+	tl0_solaris[3] = (((long)(x) - (long)tl0_solaris - 3) >> 2) | 0x40000000;			\
+	__asm__ __volatile__ ("membar #StoreStore; flush %0" : : "r" (&tl0_solaris[3]))
+#else
+#endif	
+
+extern u32 solaris_sparc_syscall[];
+extern u32 solaris_syscall[];
+extern void cleanup_socksys(void);
+
+extern u32 entry64_personality_patch;
+
+int init_module(void)
+{
+	int ret;
+
+	SOLDD(("Solaris module at %p\n", solaris_sparc_syscall));
+	register_exec_domain(&solaris_exec_domain);
+	if ((ret = init_socksys())) {
+		unregister_exec_domain(&solaris_exec_domain);
+		return ret;
+	}
+	update_ttable(solaris_sparc_syscall);
+	entry64_personality_patch |=
+		(offsetof(struct task_struct, personality) +
+		 (sizeof(unsigned long) - 1));
+	__asm__ __volatile__("membar #StoreStore; flush %0"
+			     : : "r" (&entry64_personality_patch));
+	return 0;
+}
+
+void cleanup_module(void)
+{
+	update_ttable(solaris_syscall);
+	cleanup_socksys();
+	unregister_exec_domain(&solaris_exec_domain);
+}
+
+#else
+int init_solaris_emul(void)
+{
+	register_exec_domain(&solaris_exec_domain);
+	init_socksys();
+	return 0;
+}
+#endif
+
diff --git a/arch/sparc64/solaris/signal.c b/arch/sparc64/solaris/signal.c
new file mode 100644
index 0000000..7fa2634
--- /dev/null
+++ b/arch/sparc64/solaris/signal.c
@@ -0,0 +1,430 @@
+/* $Id: signal.c,v 1.7 2000/09/05 21:44:54 davem Exp $
+ * signal.c: Signal emulation for Solaris
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/types.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+
+#include <asm/uaccess.h>
+#include <asm/svr4.h>
+#include <asm/string.h>
+
+#include "conv.h"
+#include "signal.h"
+
+#define _S(nr) (1L<<((nr)-1))
+
+#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP)))
+
+long linux_to_solaris_signals[] = {
+        0,
+	SOLARIS_SIGHUP,		SOLARIS_SIGINT,	
+	SOLARIS_SIGQUIT,	SOLARIS_SIGILL,
+	SOLARIS_SIGTRAP,	SOLARIS_SIGIOT,
+	SOLARIS_SIGEMT,		SOLARIS_SIGFPE,
+	SOLARIS_SIGKILL,	SOLARIS_SIGBUS,
+	SOLARIS_SIGSEGV,	SOLARIS_SIGSYS,
+	SOLARIS_SIGPIPE,	SOLARIS_SIGALRM,
+	SOLARIS_SIGTERM,	SOLARIS_SIGURG,
+	SOLARIS_SIGSTOP,	SOLARIS_SIGTSTP,
+	SOLARIS_SIGCONT,	SOLARIS_SIGCLD,
+	SOLARIS_SIGTTIN,	SOLARIS_SIGTTOU,
+	SOLARIS_SIGPOLL,	SOLARIS_SIGXCPU,
+	SOLARIS_SIGXFSZ,	SOLARIS_SIGVTALRM,
+	SOLARIS_SIGPROF,	SOLARIS_SIGWINCH,
+	SOLARIS_SIGUSR1,	SOLARIS_SIGUSR1,
+	SOLARIS_SIGUSR2,	-1,
+};
+
+long solaris_to_linux_signals[] = {
+        0,
+        SIGHUP,		SIGINT,		SIGQUIT,	SIGILL,
+        SIGTRAP,	SIGIOT,		SIGEMT,		SIGFPE,
+        SIGKILL,	SIGBUS,		SIGSEGV,	SIGSYS,
+        SIGPIPE,	SIGALRM,	SIGTERM,	SIGUSR1,
+        SIGUSR2,	SIGCHLD,	-1,		SIGWINCH,
+        SIGURG,		SIGPOLL,	SIGSTOP,	SIGTSTP,
+        SIGCONT,	SIGTTIN,	SIGTTOU,	SIGVTALRM,
+        SIGPROF,	SIGXCPU,	SIGXFSZ,        -1,
+	-1,		-1,		-1,		-1,
+	-1,		-1,		-1,		-1,
+	-1,		-1,		-1,		-1,
+};
+
+static inline long mapsig(long sig)
+{
+	if ((unsigned long)sig > SOLARIS_NSIGNALS)
+		return -EINVAL;
+	return solaris_to_linux_signals[sig];
+}
+
+asmlinkage int solaris_kill(int pid, int sig)
+{
+	int (*sys_kill)(int,int) = 
+		(int (*)(int,int))SYS(kill);
+	int s = mapsig(sig);
+	
+	if (s < 0) return s;
+	return sys_kill(pid, s);
+}
+
+static long sig_handler(int sig, u32 arg, int one_shot)
+{
+	struct sigaction sa, old;
+	int ret;
+	mm_segment_t old_fs = get_fs();
+	int (*sys_sigaction)(int,struct sigaction __user *,struct sigaction __user *) = 
+		(int (*)(int,struct sigaction __user *,struct sigaction __user *))SYS(sigaction);
+	
+	sigemptyset(&sa.sa_mask);
+	sa.sa_restorer = NULL;
+	sa.sa_handler = (__sighandler_t)A(arg);
+	sa.sa_flags = 0;
+	if (one_shot) sa.sa_flags = SA_ONESHOT | SA_NOMASK;
+	set_fs (KERNEL_DS);
+	ret = sys_sigaction(sig, (void __user *)&sa, (void __user *)&old);
+	set_fs (old_fs);
+	if (ret < 0) return ret;
+	return (u32)(unsigned long)old.sa_handler;
+}
+
+static inline long solaris_signal(int sig, u32 arg)
+{
+	return sig_handler (sig, arg, 1);
+}
+
+static long solaris_sigset(int sig, u32 arg)
+{
+	if (arg != 2) /* HOLD */ {
+		spin_lock_irq(&current->sighand->siglock);
+		sigdelsetmask(&current->blocked, _S(sig));
+		recalc_sigpending();
+		spin_unlock_irq(&current->sighand->siglock);
+		return sig_handler (sig, arg, 0);
+	} else {
+		spin_lock_irq(&current->sighand->siglock);
+		sigaddsetmask(&current->blocked, (_S(sig) & ~_BLOCKABLE));
+		recalc_sigpending();
+		spin_unlock_irq(&current->sighand->siglock);
+		return 0;
+	}
+}
+
+static inline long solaris_sighold(int sig)
+{
+	return solaris_sigset(sig, 2);
+}
+
+static inline long solaris_sigrelse(int sig)
+{
+	spin_lock_irq(&current->sighand->siglock);
+	sigdelsetmask(&current->blocked, _S(sig));
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+	return 0;
+}
+
+static inline long solaris_sigignore(int sig)
+{
+	return sig_handler(sig, (u32)(unsigned long)SIG_IGN, 0);
+}
+
+static inline long solaris_sigpause(int sig)
+{
+	printk ("Need to support solaris sigpause\n");
+	return -ENOSYS;
+}
+
+asmlinkage long solaris_sigfunc(int sig, u32 arg)
+{
+	int func = sig & ~0xff;
+	
+	sig = mapsig(sig & 0xff); 
+	if (sig < 0) return sig; 
+	switch (func) {
+	case 0: return solaris_signal(sig, arg); 
+	case 0x100: return solaris_sigset(sig, arg); 
+	case 0x200: return solaris_sighold(sig);
+	case 0x400: return solaris_sigrelse(sig); 
+	case 0x800: return solaris_sigignore(sig); 
+	case 0x1000: return solaris_sigpause(sig);
+	}
+	return -EINVAL;
+}
+
+typedef struct {
+	u32 __sigbits[4];
+} sol_sigset_t;
+
+static inline int mapin(u32 *p, sigset_t *q)
+{
+	int i;
+	u32 x;
+	int sig;
+	
+	sigemptyset(q);
+	x = p[0];
+	for (i = 1; i <= SOLARIS_NSIGNALS; i++) {
+		if (x & 1) {
+			sig = solaris_to_linux_signals[i];
+			if (sig == -1)
+				return -EINVAL;
+			sigaddsetmask(q, (1L << (sig - 1)));
+		}
+		x >>= 1;
+		if (i == 32)
+			x = p[1];
+	}
+	return 0;
+}
+
+static inline int mapout(sigset_t *q, u32 *p)
+{
+	int i;
+	int sig;
+	
+	p[0] = 0;
+	p[1] = 0;
+	for (i = 1; i <= 32; i++) {
+		if (sigismember(q, sigmask(i))) {
+			sig = linux_to_solaris_signals[i];
+			if (sig == -1)
+				return -EINVAL;
+			if (sig > 32)
+				p[1] |= 1L << (sig - 33);
+			else
+				p[0] |= 1L << (sig - 1);
+		}
+	}
+	return 0;
+}
+
+asmlinkage int solaris_sigprocmask(int how, u32 in, u32 out)
+{
+	sigset_t in_s, *ins, out_s, *outs;
+	mm_segment_t old_fs = get_fs();
+	int ret;
+	int (*sys_sigprocmask)(int,sigset_t __user *,sigset_t __user *) = 
+		(int (*)(int,sigset_t __user *,sigset_t __user *))SYS(sigprocmask);
+	
+	ins = NULL; outs = NULL;
+	if (in) {
+		u32 tmp[2];
+		
+		if (copy_from_user (tmp, (void __user *)A(in), 2*sizeof(u32)))
+			return -EFAULT;
+		ins = &in_s;
+		if (mapin (tmp, ins)) return -EINVAL;
+	}
+	if (out) outs = &out_s;
+	set_fs (KERNEL_DS);
+	ret = sys_sigprocmask((how == 3) ? SIG_SETMASK : how,
+				(void __user *)ins, (void __user *)outs);
+	set_fs (old_fs);
+	if (ret) return ret;
+	if (out) {
+		u32 tmp[4];
+		
+		tmp[2] = 0; tmp[3] = 0;
+		if (mapout (outs, tmp)) return -EINVAL;
+		if (copy_to_user((void __user *)A(out), tmp, 4*sizeof(u32)))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+asmlinkage long do_sol_sigsuspend(u32 mask)
+{
+	sigset_t s;
+	u32 tmp[2];
+		
+	if (copy_from_user (tmp, (sol_sigset_t __user *)A(mask), 2*sizeof(u32)))
+		return -EFAULT;
+	if (mapin (tmp, &s)) return -EINVAL;
+	return (long)s.sig[0];
+}
+
+struct sol_sigaction {
+	int	sa_flags;
+	u32	sa_handler;
+	u32	sa_mask[4];
+	int	sa_resv[2];
+};
+
+asmlinkage int solaris_sigaction(int sig, u32 act, u32 old)
+{
+	u32 tmp, tmp2[4];
+	struct sigaction s, s2;
+	int ret;
+	mm_segment_t old_fs = get_fs();
+	struct sol_sigaction __user *p = (void __user *)A(old);
+	int (*sys_sigaction)(int,struct sigaction __user *,struct sigaction __user *) = 
+		(int (*)(int,struct sigaction __user *,struct sigaction __user *))SYS(sigaction);
+	
+	sig = mapsig(sig); 
+	if (sig < 0) {
+		/* We cheat a little bit for Solaris only signals */
+		if (old && clear_user(p, sizeof(struct sol_sigaction)))
+			return -EFAULT;
+		return 0;
+	}
+	if (act) {
+		if (get_user (tmp, &p->sa_flags))
+			return -EFAULT;
+		s.sa_flags = 0;
+		if (tmp & SOLARIS_SA_ONSTACK) s.sa_flags |= SA_STACK;
+		if (tmp & SOLARIS_SA_RESTART) s.sa_flags |= SA_RESTART;
+		if (tmp & SOLARIS_SA_NODEFER) s.sa_flags |= SA_NOMASK;
+		if (tmp & SOLARIS_SA_RESETHAND) s.sa_flags |= SA_ONESHOT;
+		if (tmp & SOLARIS_SA_NOCLDSTOP) s.sa_flags |= SA_NOCLDSTOP;
+		if (get_user (tmp, &p->sa_handler) ||
+		    copy_from_user (tmp2, &p->sa_mask, 2*sizeof(u32)))
+			return -EFAULT;
+		s.sa_handler = (__sighandler_t)A(tmp);
+		if (mapin (tmp2, &s.sa_mask)) return -EINVAL;
+		s.sa_restorer = NULL;
+	}
+	set_fs(KERNEL_DS);
+	ret = sys_sigaction(sig, act ? (void __user *)&s : NULL,
+				 old ? (void __user *)&s2 : NULL);
+	set_fs(old_fs);
+	if (ret) return ret;
+	if (old) {
+		if (mapout (&s2.sa_mask, tmp2)) return -EINVAL;
+		tmp = 0; tmp2[2] = 0; tmp2[3] = 0;
+		if (s2.sa_flags & SA_STACK) tmp |= SOLARIS_SA_ONSTACK;
+		if (s2.sa_flags & SA_RESTART) tmp |= SOLARIS_SA_RESTART;
+		if (s2.sa_flags & SA_NOMASK) tmp |= SOLARIS_SA_NODEFER;
+		if (s2.sa_flags & SA_ONESHOT) tmp |= SOLARIS_SA_RESETHAND;
+		if (s2.sa_flags & SA_NOCLDSTOP) tmp |= SOLARIS_SA_NOCLDSTOP;
+		if (put_user (tmp, &p->sa_flags) ||
+		    __put_user ((u32)(unsigned long)s2.sa_handler, &p->sa_handler) ||
+		    copy_to_user (&p->sa_mask, tmp2, 4*sizeof(u32)))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+asmlinkage int solaris_sigpending(int which, u32 set)
+{
+	sigset_t s;
+	u32 tmp[4];
+	switch (which) {
+	case 1: /* sigpending */
+		spin_lock_irq(&current->sighand->siglock);
+		sigandsets(&s, &current->blocked, &current->pending.signal);
+		recalc_sigpending();
+		spin_unlock_irq(&current->sighand->siglock);
+		break;
+	case 2: /* sigfillset - I just set signals which have linux equivalents */
+		sigfillset(&s);
+		break;
+	default: return -EINVAL;
+	}
+	if (mapout (&s, tmp)) return -EINVAL;
+	tmp[2] = 0; tmp[3] = 0;
+	if (copy_to_user ((u32 __user *)A(set), tmp, sizeof(tmp)))
+		return -EFAULT;
+	return 0;
+}
+
+asmlinkage int solaris_wait(u32 stat_loc)
+{
+	unsigned __user *p = (unsigned __user *)A(stat_loc);
+	int (*sys_wait4)(pid_t,unsigned __user *, int, struct rusage __user *) =
+		(int (*)(pid_t,unsigned __user *, int, struct rusage __user *))SYS(wait4);
+	int ret, status;
+	
+	ret = sys_wait4(-1, p, WUNTRACED, NULL);
+	if (ret >= 0 && stat_loc) {
+		if (get_user (status, p))
+			return -EFAULT;
+		if (((status - 1) & 0xffff) < 0xff)
+			status = linux_to_solaris_signals[status & 0x7f] & 0x7f;
+		else if ((status & 0xff) == 0x7f)
+			status = (linux_to_solaris_signals[(status >> 8) & 0xff] << 8) | 0x7f;
+		if (__put_user (status, p))
+			return -EFAULT;
+	}
+	return ret;
+}
+
+asmlinkage int solaris_waitid(int idtype, s32 pid, u32 info, int options)
+{
+	int (*sys_wait4)(pid_t,unsigned __user *, int, struct rusage __user *) =
+		(int (*)(pid_t,unsigned __user *, int, struct rusage __user *))SYS(wait4);
+	int opts, status, ret;
+	
+	switch (idtype) {
+	case 0: /* P_PID */ break;
+	case 1: /* P_PGID */ pid = -pid; break;
+	case 7: /* P_ALL */ pid = -1; break;
+	default: return -EINVAL;
+	}
+	opts = 0;
+	if (options & SOLARIS_WUNTRACED) opts |= WUNTRACED;
+	if (options & SOLARIS_WNOHANG) opts |= WNOHANG;
+	current->state = TASK_RUNNING;
+	ret = sys_wait4(pid, (unsigned int __user *)A(info), opts, NULL);
+	if (ret < 0) return ret;
+	if (info) {
+		struct sol_siginfo __user *s = (void __user *)A(info);
+	
+		if (get_user (status, (unsigned int __user *)A(info)))
+			return -EFAULT;
+
+		if (__put_user (SOLARIS_SIGCLD, &s->si_signo) ||
+		    __put_user (ret, &s->_data._proc._pid))
+			return -EFAULT;
+
+		switch (status & 0xff) {
+		case 0: ret = SOLARIS_CLD_EXITED;
+			status = (status >> 8) & 0xff;
+			break;
+		case 0x7f:
+			status = (status >> 8) & 0xff;
+			switch (status) {
+			case SIGSTOP:
+			case SIGTSTP: ret = SOLARIS_CLD_STOPPED;
+			default: ret = SOLARIS_CLD_EXITED;
+			}
+			status = linux_to_solaris_signals[status];
+			break;
+		default:
+			if (status & 0x80) ret = SOLARIS_CLD_DUMPED;
+			else ret = SOLARIS_CLD_KILLED;
+			status = linux_to_solaris_signals[status & 0x7f];
+			break;
+		}
+
+		if (__put_user (ret, &s->si_code) ||
+		    __put_user (status, &s->_data._proc._pdata._cld._status))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+extern int svr4_setcontext(svr4_ucontext_t *c, struct pt_regs *regs);
+extern int svr4_getcontext(svr4_ucontext_t *c, struct pt_regs *regs);
+
+asmlinkage int solaris_context(struct pt_regs *regs)
+{
+	switch ((unsigned)regs->u_regs[UREG_I0]) {
+	case 0: /* getcontext */
+		return svr4_getcontext((svr4_ucontext_t *)(long)(u32)regs->u_regs[UREG_I1], regs);
+	case 1: /* setcontext */
+		return svr4_setcontext((svr4_ucontext_t *)(long)(u32)regs->u_regs[UREG_I1], regs);
+	default:
+		return -EINVAL;
+
+	}
+}
+
+asmlinkage int solaris_sigaltstack(u32 ss, u32 oss)
+{
+/* XXX Implement this soon */
+	return 0;
+}
diff --git a/arch/sparc64/solaris/signal.h b/arch/sparc64/solaris/signal.h
new file mode 100644
index 0000000..e915708
--- /dev/null
+++ b/arch/sparc64/solaris/signal.h
@@ -0,0 +1,108 @@
+/* $Id: signal.h,v 1.3 1998/04/12 06:20:33 davem Exp $
+ * signal.h: Signal emulation for Solaris
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+    
+#define SOLARIS_SIGHUP		1
+#define SOLARIS_SIGINT		2
+#define SOLARIS_SIGQUIT		3
+#define SOLARIS_SIGILL		4
+#define SOLARIS_SIGTRAP		5
+#define SOLARIS_SIGIOT		6
+#define SOLARIS_SIGEMT		7
+#define SOLARIS_SIGFPE		8
+#define SOLARIS_SIGKILL		9
+#define SOLARIS_SIGBUS		10
+#define SOLARIS_SIGSEGV		11
+#define SOLARIS_SIGSYS		12
+#define SOLARIS_SIGPIPE		13
+#define SOLARIS_SIGALRM		14
+#define SOLARIS_SIGTERM		15
+#define SOLARIS_SIGUSR1		16
+#define SOLARIS_SIGUSR2		17
+#define SOLARIS_SIGCLD		18
+#define SOLARIS_SIGPWR		19
+#define SOLARIS_SIGWINCH	20
+#define SOLARIS_SIGURG		21
+#define SOLARIS_SIGPOLL		22
+#define SOLARIS_SIGSTOP		23
+#define SOLARIS_SIGTSTP		24
+#define SOLARIS_SIGCONT		25
+#define SOLARIS_SIGTTIN		26
+#define SOLARIS_SIGTTOU		27
+#define SOLARIS_SIGVTALRM	28
+#define SOLARIS_SIGPROF		29
+#define SOLARIS_SIGXCPU		30
+#define SOLARIS_SIGXFSZ		31
+#define SOLARIS_SIGWAITING	32
+#define SOLARIS_SIGLWP		33
+#define SOLARIS_SIGFREEZE	34
+#define SOLARIS_SIGTHAW		35
+#define SOLARIS_SIGCANCEL	36
+#define SOLARIS_SIGRTMIN	37
+#define SOLARIS_SIGRTMAX	44
+#define SOLARIS_NSIGNALS	44
+
+
+#define SOLARIS_SA_ONSTACK	1
+#define SOLARIS_SA_RESETHAND	2
+#define SOLARIS_SA_RESTART	4
+#define SOLARIS_SA_SIGINFO	8
+#define SOLARIS_SA_NODEFER	16
+#define SOLARIS_SA_NOCLDWAIT	0x10000
+#define SOLARIS_SA_NOCLDSTOP	0x20000
+
+struct sol_siginfo {
+	int	si_signo;
+	int	si_code;
+	int	si_errno;
+	union	{
+		char	pad[128-3*sizeof(int)];
+		struct { 
+			s32	_pid;
+			union {
+				struct {
+					s32	_uid;
+					s32	_value;
+				} _kill;
+				struct {
+					s32	_utime;
+					int	_status;
+					s32	_stime;
+				} _cld;
+			} _pdata;
+		} _proc;
+		struct { /* SIGSEGV, SIGBUS, SIGILL and SIGFPE */
+			u32	_addr;
+			int	_trapno;
+		} _fault;
+		struct { /* SIGPOLL, SIGXFSZ */
+			int	_fd;
+			s32	_band;
+		} _file;
+	} _data;
+};
+
+#define SOLARIS_WUNTRACED	0x04
+#define SOLARIS_WNOHANG		0x40
+#define SOLARIS_WEXITED         0x01
+#define SOLARIS_WTRAPPED        0x02
+#define SOLARIS_WSTOPPED        WUNTRACED
+#define SOLARIS_WCONTINUED      0x08
+#define SOLARIS_WNOWAIT         0x80
+
+#define SOLARIS_TRAP_BRKPT      1
+#define SOLARIS_TRAP_TRACE      2
+#define SOLARIS_CLD_EXITED      1
+#define SOLARIS_CLD_KILLED      2
+#define SOLARIS_CLD_DUMPED      3
+#define SOLARIS_CLD_TRAPPED     4
+#define SOLARIS_CLD_STOPPED     5
+#define SOLARIS_CLD_CONTINUED   6
+#define SOLARIS_POLL_IN         1
+#define SOLARIS_POLL_OUT        2
+#define SOLARIS_POLL_MSG        3
+#define SOLARIS_POLL_ERR        4
+#define SOLARIS_POLL_PRI        5
+#define SOLARIS_POLL_HUP        6
diff --git a/arch/sparc64/solaris/socket.c b/arch/sparc64/solaris/socket.c
new file mode 100644
index 0000000..ec8e074
--- /dev/null
+++ b/arch/sparc64/solaris/socket.c
@@ -0,0 +1,415 @@
+/* $Id: socket.c,v 1.6 2002/02/08 03:57:14 davem Exp $
+ * socket.c: Socket syscall emulation for Solaris 2.6+
+ *
+ * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
+ *
+ * 1999-08-19 Fixed socketpair code 
+ *            Jason Rappleye (rappleye@ccr.buffalo.edu)
+ */
+
+#include <linux/types.h>
+#include <linux/smp_lock.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+#include <linux/file.h>
+#include <linux/net.h>
+#include <linux/compat.h>
+#include <net/compat.h>
+
+#include <asm/uaccess.h>
+#include <asm/string.h>
+#include <asm/oplib.h>
+#include <asm/idprom.h>
+
+#include "conv.h"
+
+#define SOCK_SOL_STREAM		2
+#define SOCK_SOL_DGRAM		1
+#define SOCK_SOL_RAW		4
+#define SOCK_SOL_RDM		5
+#define SOCK_SOL_SEQPACKET	6
+
+#define SOL_SO_SNDLOWAT		0x1003
+#define SOL_SO_RCVLOWAT		0x1004
+#define SOL_SO_SNDTIMEO		0x1005
+#define SOL_SO_RCVTIMEO		0x1006
+#define SOL_SO_STATE		0x2000
+
+#define SOL_SS_NDELAY		0x040
+#define SOL_SS_NONBLOCK		0x080
+#define SOL_SS_ASYNC		0x100
+
+#define SO_STATE		0x000e
+
+static int socket_check(int family, int type)
+{
+	if (family != PF_UNIX && family != PF_INET)
+		return -ESOCKTNOSUPPORT;
+	switch (type) {
+	case SOCK_SOL_STREAM: type = SOCK_STREAM; break;
+	case SOCK_SOL_DGRAM: type = SOCK_DGRAM; break;
+	case SOCK_SOL_RAW: type = SOCK_RAW; break;
+	case SOCK_SOL_RDM: type = SOCK_RDM; break;
+	case SOCK_SOL_SEQPACKET: type = SOCK_SEQPACKET; break;
+	default: return -EINVAL;
+	}
+	return type;
+}
+
+static int solaris_to_linux_sockopt(int optname) 
+{
+	switch (optname) {
+	case SOL_SO_SNDLOWAT: optname = SO_SNDLOWAT; break;
+	case SOL_SO_RCVLOWAT: optname = SO_RCVLOWAT; break;
+	case SOL_SO_SNDTIMEO: optname = SO_SNDTIMEO; break;
+	case SOL_SO_RCVTIMEO: optname = SO_RCVTIMEO; break;
+	case SOL_SO_STATE: optname = SO_STATE; break;
+	};
+	
+	return optname;
+}
+	
+asmlinkage int solaris_socket(int family, int type, int protocol)
+{
+	int (*sys_socket)(int, int, int) =
+		(int (*)(int, int, int))SYS(socket);
+
+	type = socket_check (family, type);
+	if (type < 0) return type;
+	return sys_socket(family, type, protocol);
+}
+
+asmlinkage int solaris_socketpair(int *usockvec)
+{
+	int (*sys_socketpair)(int, int, int, int *) =
+		(int (*)(int, int, int, int *))SYS(socketpair);
+
+	/* solaris socketpair really only takes one arg at the syscall
+	 * level, int * usockvec. The libs apparently take care of 
+	 * making sure that family==AF_UNIX and type==SOCK_STREAM. The 
+	 * pointer we really want ends up residing in the first (and
+	 * supposedly only) argument.
+	 */
+
+	return sys_socketpair(AF_UNIX, SOCK_STREAM, 0, (int *)usockvec);
+}
+
+asmlinkage int solaris_bind(int fd, struct sockaddr *addr, int addrlen)
+{
+	int (*sys_bind)(int, struct sockaddr *, int) =
+		(int (*)(int, struct sockaddr *, int))SUNOS(104);
+
+	return sys_bind(fd, addr, addrlen);
+}
+
+asmlinkage int solaris_setsockopt(int fd, int level, int optname, u32 optval, int optlen)
+{
+	int (*sunos_setsockopt)(int, int, int, u32, int) =
+		(int (*)(int, int, int, u32, int))SUNOS(105);
+
+	optname = solaris_to_linux_sockopt(optname);
+	if (optname < 0)
+		return optname;
+	if (optname == SO_STATE)
+		return 0;
+
+	return sunos_setsockopt(fd, level, optname, optval, optlen);
+}
+
+asmlinkage int solaris_getsockopt(int fd, int level, int optname, u32 optval, u32 optlen)
+{
+	int (*sunos_getsockopt)(int, int, int, u32, u32) =
+		(int (*)(int, int, int, u32, u32))SUNOS(118);
+
+	optname = solaris_to_linux_sockopt(optname);
+	if (optname < 0)
+		return optname;
+
+	if (optname == SO_STATE)
+		optname = SOL_SO_STATE;
+
+	return sunos_getsockopt(fd, level, optname, optval, optlen);
+}
+
+asmlinkage int solaris_connect(int fd, struct sockaddr __user *addr, int addrlen)
+{
+	int (*sys_connect)(int, struct sockaddr __user *, int) =
+		(int (*)(int, struct sockaddr __user *, int))SYS(connect);
+
+	return sys_connect(fd, addr, addrlen);
+}
+
+asmlinkage int solaris_accept(int fd, struct sockaddr __user *addr, int __user *addrlen)
+{
+	int (*sys_accept)(int, struct sockaddr __user *, int __user *) =
+		(int (*)(int, struct sockaddr __user *, int __user *))SYS(accept);
+
+	return sys_accept(fd, addr, addrlen);
+}
+
+asmlinkage int solaris_listen(int fd, int backlog)
+{
+	int (*sys_listen)(int, int) =
+		(int (*)(int, int))SUNOS(106);
+
+	return sys_listen(fd, backlog);
+}
+
+asmlinkage int solaris_shutdown(int fd, int how)
+{
+	int (*sys_shutdown)(int, int) =
+		(int (*)(int, int))SYS(shutdown);
+
+	return sys_shutdown(fd, how);
+}
+
+#define MSG_SOL_OOB		0x1
+#define MSG_SOL_PEEK		0x2
+#define MSG_SOL_DONTROUTE	0x4
+#define MSG_SOL_EOR		0x8
+#define MSG_SOL_CTRUNC		0x10
+#define MSG_SOL_TRUNC		0x20
+#define MSG_SOL_WAITALL		0x40
+#define MSG_SOL_DONTWAIT	0x80
+
+static int solaris_to_linux_msgflags(int flags)
+{
+	int fl = flags & (MSG_OOB|MSG_PEEK|MSG_DONTROUTE);
+	
+	if (flags & MSG_SOL_EOR) fl |= MSG_EOR;
+	if (flags & MSG_SOL_CTRUNC) fl |= MSG_CTRUNC;
+	if (flags & MSG_SOL_TRUNC) fl |= MSG_TRUNC;
+	if (flags & MSG_SOL_WAITALL) fl |= MSG_WAITALL;
+	if (flags & MSG_SOL_DONTWAIT) fl |= MSG_DONTWAIT;
+	return fl;
+}
+
+static int linux_to_solaris_msgflags(int flags)
+{
+	int fl = flags & (MSG_OOB|MSG_PEEK|MSG_DONTROUTE);
+	
+	if (flags & MSG_EOR) fl |= MSG_SOL_EOR;
+	if (flags & MSG_CTRUNC) fl |= MSG_SOL_CTRUNC;
+	if (flags & MSG_TRUNC) fl |= MSG_SOL_TRUNC;
+	if (flags & MSG_WAITALL) fl |= MSG_SOL_WAITALL;
+	if (flags & MSG_DONTWAIT) fl |= MSG_SOL_DONTWAIT;
+	return fl;
+}
+
+asmlinkage int solaris_recvfrom(int s, char __user *buf, int len, int flags, u32 from, u32 fromlen)
+{
+	int (*sys_recvfrom)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *) =
+		(int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(recvfrom);
+	
+	return sys_recvfrom(s, buf, len, solaris_to_linux_msgflags(flags), A(from), A(fromlen));
+}
+
+asmlinkage int solaris_recv(int s, char __user *buf, int len, int flags)
+{
+	int (*sys_recvfrom)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *) =
+		(int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(recvfrom);
+	
+	return sys_recvfrom(s, buf, len, solaris_to_linux_msgflags(flags), NULL, NULL);
+}
+
+asmlinkage int solaris_sendto(int s, char __user *buf, int len, int flags, u32 to, u32 tolen)
+{
+	int (*sys_sendto)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *) =
+		(int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(sendto);
+	
+	return sys_sendto(s, buf, len, solaris_to_linux_msgflags(flags), A(to), A(tolen));
+}
+
+asmlinkage int solaris_send(int s, char *buf, int len, int flags)
+{
+	int (*sys_sendto)(int, void *, size_t, unsigned, struct sockaddr *, int *) =
+		(int (*)(int, void *, size_t, unsigned, struct sockaddr *, int *))SYS(sendto);
+	
+	return sys_sendto(s, buf, len, solaris_to_linux_msgflags(flags), NULL, NULL);
+}
+
+asmlinkage int solaris_getpeername(int fd, struct sockaddr *addr, int *addrlen)
+{
+	int (*sys_getpeername)(int, struct sockaddr *, int *) =
+		(int (*)(int, struct sockaddr *, int *))SYS(getpeername);
+
+	return sys_getpeername(fd, addr, addrlen);
+}
+
+asmlinkage int solaris_getsockname(int fd, struct sockaddr *addr, int *addrlen)
+{
+	int (*sys_getsockname)(int, struct sockaddr *, int *) =
+		(int (*)(int, struct sockaddr *, int *))SYS(getsockname);
+
+	return sys_getsockname(fd, addr, addrlen);
+}
+
+/* XXX This really belongs in some header file... -DaveM */
+#define MAX_SOCK_ADDR	128		/* 108 for Unix domain - 
+					   16 for IP, 16 for IPX,
+					   24 for IPv6,
+					   about 80 for AX.25 */
+
+struct sol_nmsghdr {
+	u32		msg_name;
+	int		msg_namelen;
+	u32		msg_iov;
+	u32		msg_iovlen;
+	u32		msg_control;
+	u32		msg_controllen;
+	u32		msg_flags;
+};
+
+struct sol_cmsghdr {
+	u32		cmsg_len;
+	int		cmsg_level;
+	int		cmsg_type;
+	unsigned char	cmsg_data[0];
+};
+
+static inline int msghdr_from_user32_to_kern(struct msghdr *kmsg,
+					     struct sol_nmsghdr __user *umsg)
+{
+	u32 tmp1, tmp2, tmp3;
+	int err;
+
+	err = get_user(tmp1, &umsg->msg_name);
+	err |= __get_user(tmp2, &umsg->msg_iov);
+	err |= __get_user(tmp3, &umsg->msg_control);
+	if (err)
+		return -EFAULT;
+
+	kmsg->msg_name = A(tmp1);
+	kmsg->msg_iov = A(tmp2);
+	kmsg->msg_control = A(tmp3);
+
+	err = get_user(kmsg->msg_namelen, &umsg->msg_namelen);
+	err |= get_user(kmsg->msg_controllen, &umsg->msg_controllen);
+	err |= get_user(kmsg->msg_flags, &umsg->msg_flags);
+	
+	kmsg->msg_flags = solaris_to_linux_msgflags(kmsg->msg_flags);
+	
+	return err;
+}
+
+asmlinkage int solaris_sendmsg(int fd, struct sol_nmsghdr __user *user_msg, unsigned user_flags)
+{
+	struct socket *sock;
+	char address[MAX_SOCK_ADDR];
+	struct iovec iov[UIO_FASTIOV];
+	unsigned char ctl[sizeof(struct cmsghdr) + 20];
+	unsigned char *ctl_buf = ctl;
+	struct msghdr kern_msg;
+	int err, total_len;
+
+	if(msghdr_from_user32_to_kern(&kern_msg, user_msg))
+		return -EFAULT;
+	if(kern_msg.msg_iovlen > UIO_MAXIOV)
+		return -EINVAL;
+	err = verify_compat_iovec(&kern_msg, iov, address, VERIFY_READ);
+	if (err < 0)
+		goto out;
+	total_len = err;
+
+	if(kern_msg.msg_controllen) {
+		struct sol_cmsghdr __user *ucmsg = kern_msg.msg_control;
+		unsigned long *kcmsg;
+		compat_size_t cmlen;
+
+		if(kern_msg.msg_controllen > sizeof(ctl) &&
+		   kern_msg.msg_controllen <= 256) {
+			err = -ENOBUFS;
+			ctl_buf = kmalloc(kern_msg.msg_controllen, GFP_KERNEL);
+			if(!ctl_buf)
+				goto out_freeiov;
+		}
+		__get_user(cmlen, &ucmsg->cmsg_len);
+		kcmsg = (unsigned long *) ctl_buf;
+		*kcmsg++ = (unsigned long)cmlen;
+		err = -EFAULT;
+		if(copy_from_user(kcmsg, &ucmsg->cmsg_level,
+				  kern_msg.msg_controllen - sizeof(compat_size_t)))
+			goto out_freectl;
+		kern_msg.msg_control = ctl_buf;
+	}
+	kern_msg.msg_flags = solaris_to_linux_msgflags(user_flags);
+
+	lock_kernel();
+	sock = sockfd_lookup(fd, &err);
+	if (sock != NULL) {
+		if (sock->file->f_flags & O_NONBLOCK)
+			kern_msg.msg_flags |= MSG_DONTWAIT;
+		err = sock_sendmsg(sock, &kern_msg, total_len);
+		sockfd_put(sock);
+	}
+	unlock_kernel();
+
+out_freectl:
+	/* N.B. Use kfree here, as kern_msg.msg_controllen might change? */
+	if(ctl_buf != ctl)
+		kfree(ctl_buf);
+out_freeiov:
+	if(kern_msg.msg_iov != iov)
+		kfree(kern_msg.msg_iov);
+out:
+	return err;
+}
+
+asmlinkage int solaris_recvmsg(int fd, struct sol_nmsghdr __user *user_msg, unsigned int user_flags)
+{
+	struct iovec iovstack[UIO_FASTIOV];
+	struct msghdr kern_msg;
+	char addr[MAX_SOCK_ADDR];
+	struct socket *sock;
+	struct iovec *iov = iovstack;
+	struct sockaddr __user *uaddr;
+	int __user *uaddr_len;
+	unsigned long cmsg_ptr;
+	int err, total_len, len = 0;
+
+	if(msghdr_from_user32_to_kern(&kern_msg, user_msg))
+		return -EFAULT;
+	if(kern_msg.msg_iovlen > UIO_MAXIOV)
+		return -EINVAL;
+
+	uaddr = kern_msg.msg_name;
+	uaddr_len = &user_msg->msg_namelen;
+	err = verify_compat_iovec(&kern_msg, iov, addr, VERIFY_WRITE);
+	if (err < 0)
+		goto out;
+	total_len = err;
+
+	cmsg_ptr = (unsigned long) kern_msg.msg_control;
+	kern_msg.msg_flags = 0;
+
+	lock_kernel();
+	sock = sockfd_lookup(fd, &err);
+	if (sock != NULL) {
+		if (sock->file->f_flags & O_NONBLOCK)
+			user_flags |= MSG_DONTWAIT;
+		err = sock_recvmsg(sock, &kern_msg, total_len, user_flags);
+		if(err >= 0)
+			len = err;
+		sockfd_put(sock);
+	}
+	unlock_kernel();
+
+	if(uaddr != NULL && err >= 0)
+		err = move_addr_to_user(addr, kern_msg.msg_namelen, uaddr, uaddr_len);
+	if(err >= 0) {
+		err = __put_user(linux_to_solaris_msgflags(kern_msg.msg_flags), &user_msg->msg_flags);
+		if(!err) {
+			/* XXX Convert cmsg back into userspace 32-bit format... */
+			err = __put_user((unsigned long)kern_msg.msg_control - cmsg_ptr,
+					 &user_msg->msg_controllen);
+		}
+	}
+
+	if(kern_msg.msg_iov != iov)
+		kfree(kern_msg.msg_iov);
+out:
+	if(err < 0)
+		return err;
+	return len;
+}
diff --git a/arch/sparc64/solaris/socksys.c b/arch/sparc64/solaris/socksys.c
new file mode 100644
index 0000000..d7c1c76
--- /dev/null
+++ b/arch/sparc64/solaris/socksys.c
@@ -0,0 +1,211 @@
+/* $Id: socksys.c,v 1.21 2002/02/08 03:57:14 davem Exp $
+ * socksys.c: /dev/inet/ stuff for Solaris emulation.
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997, 1998 Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)
+ * Copyright (C) 1995, 1996 Mike Jagdis (jaggy@purplet.demon.co.uk)
+ */
+
+/*
+ *  Dave, _please_ give me specifications on this fscking mess so that I
+ * could at least get it into the state when it wouldn't screw the rest of
+ * the kernel over.  socksys.c and timod.c _stink_ and we are not talking
+ * H2S here, it's isopropilmercaptan in concentrations way over LD50. -- AV
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/in.h>
+#include <linux/devfs_fs_kernel.h>
+
+#include <net/sock.h>
+
+#include <asm/uaccess.h>
+#include <asm/termios.h>
+
+#include "conv.h"
+#include "socksys.h"
+
+static int af_inet_protocols[] = {
+IPPROTO_ICMP, IPPROTO_ICMP, IPPROTO_IGMP, IPPROTO_IPIP, IPPROTO_TCP,
+IPPROTO_EGP, IPPROTO_PUP, IPPROTO_UDP, IPPROTO_IDP, IPPROTO_RAW,
+0, 0, 0, 0, 0, 0,
+};
+
+#ifndef DEBUG_SOLARIS_KMALLOC
+
+#define mykmalloc kmalloc
+#define mykfree kfree
+
+#else
+
+extern void * mykmalloc(size_t s, int gfp);
+extern void mykfree(void *);
+
+#endif
+
+static unsigned int (*sock_poll)(struct file *, poll_table *);
+
+static struct file_operations socksys_file_ops = {
+	/* Currently empty */
+};
+
+static int socksys_open(struct inode * inode, struct file * filp)
+{
+	int family, type, protocol, fd;
+	struct dentry *dentry;
+	int (*sys_socket)(int,int,int) =
+		(int (*)(int,int,int))SUNOS(97);
+        struct sol_socket_struct * sock;
+	
+	family = ((iminor(inode) >> 4) & 0xf);
+	switch (family) {
+	case AF_UNIX:
+		type = SOCK_STREAM;
+		protocol = 0;
+		break;
+	case AF_INET:
+		protocol = af_inet_protocols[iminor(inode) & 0xf];
+		switch (protocol) {
+		case IPPROTO_TCP: type = SOCK_STREAM; break;
+		case IPPROTO_UDP: type = SOCK_DGRAM; break;
+		default: type = SOCK_RAW; break;
+		}
+		break;
+	default:
+		type = SOCK_RAW;
+		protocol = 0;
+		break;
+	}
+
+	fd = sys_socket(family, type, protocol);
+	if (fd < 0)
+		return fd;
+	/*
+	 * N.B. The following operations are not legal!
+	 *
+	 * No shit.  WTF is it supposed to do, anyway?
+	 *
+	 * Try instead:
+	 * d_delete(filp->f_dentry), then d_instantiate with sock inode
+	 */
+	dentry = filp->f_dentry;
+	filp->f_dentry = dget(fcheck(fd)->f_dentry);
+	filp->f_dentry->d_inode->i_rdev = inode->i_rdev;
+	filp->f_dentry->d_inode->i_flock = inode->i_flock;
+	SOCKET_I(filp->f_dentry->d_inode)->file = filp;
+	filp->f_op = &socksys_file_ops;
+        sock = (struct sol_socket_struct*) 
+        	mykmalloc(sizeof(struct sol_socket_struct), GFP_KERNEL);
+        if (!sock) return -ENOMEM;
+	SOLDD(("sock=%016lx(%016lx)\n", sock, filp));
+        sock->magic = SOLARIS_SOCKET_MAGIC;
+        sock->modcount = 0;
+        sock->state = TS_UNBND;
+        sock->offset = 0;
+        sock->pfirst = sock->plast = NULL;
+        filp->private_data = sock;
+	SOLDD(("filp->private_data %016lx\n", filp->private_data));
+
+	sys_close(fd);
+	dput(dentry);
+	return 0;
+}
+
+static int socksys_release(struct inode * inode, struct file * filp)
+{
+        struct sol_socket_struct * sock;
+        struct T_primsg *it;
+
+	/* XXX: check this */
+	sock = (struct sol_socket_struct *)filp->private_data;
+	SOLDD(("sock release %016lx(%016lx)\n", sock, filp));
+	it = sock->pfirst;
+	while (it) {
+		struct T_primsg *next = it->next;
+		
+		SOLDD(("socksys_release %016lx->%016lx\n", it, next));
+		mykfree((char*)it);
+		it = next;
+	}
+	filp->private_data = NULL;
+	SOLDD(("socksys_release %016lx\n", sock));
+	mykfree((char*)sock);
+	return 0;
+}
+
+static unsigned int socksys_poll(struct file * filp, poll_table * wait)
+{
+	struct inode *ino;
+	unsigned int mask = 0;
+
+	ino=filp->f_dentry->d_inode;
+	if (ino && S_ISSOCK(ino->i_mode)) {
+		struct sol_socket_struct *sock;
+		sock = (struct sol_socket_struct*)filp->private_data;
+		if (sock && sock->pfirst) {
+			mask |= POLLIN | POLLRDNORM;
+			if (sock->pfirst->pri == MSG_HIPRI)
+				mask |= POLLPRI;
+		}
+	}
+	if (sock_poll)
+		mask |= (*sock_poll)(filp, wait);
+	return mask;
+}
+	
+static struct file_operations socksys_fops = {
+	.open =		socksys_open,
+	.release =	socksys_release,
+};
+
+int __init
+init_socksys(void)
+{
+	int ret;
+	struct file * file;
+	int (*sys_socket)(int,int,int) =
+		(int (*)(int,int,int))SUNOS(97);
+	int (*sys_close)(unsigned int) = 
+		(int (*)(unsigned int))SYS(close);
+	
+	ret = register_chrdev (30, "socksys", &socksys_fops);
+	if (ret < 0) {
+		printk ("Couldn't register socksys character device\n");
+		return ret;
+	}
+	ret = sys_socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+	if (ret < 0) {
+		printk ("Couldn't create socket\n");
+		return ret;
+	}
+
+	devfs_mk_cdev(MKDEV(30, 0), S_IFCHR|S_IRUSR|S_IWUSR, "socksys");
+
+	file = fcheck(ret);
+	/* N.B. Is this valid? Suppose the f_ops are in a module ... */
+	socksys_file_ops = *file->f_op;
+	sys_close(ret);
+	sock_poll = socksys_file_ops.poll;
+	socksys_file_ops.poll = socksys_poll;
+	socksys_file_ops.release = socksys_release;
+	return 0;
+}
+
+void
+cleanup_socksys(void)
+{
+	if (unregister_chrdev(30, "socksys"))
+		printk ("Couldn't unregister socksys character device\n");
+	devfs_remove ("socksys");
+}
diff --git a/arch/sparc64/solaris/socksys.h b/arch/sparc64/solaris/socksys.h
new file mode 100644
index 0000000..5d1b78e
--- /dev/null
+++ b/arch/sparc64/solaris/socksys.h
@@ -0,0 +1,208 @@
+/* $Id: socksys.h,v 1.2 1998/03/26 08:46:07 jj Exp $
+ * socksys.h: Definitions for STREAMS modules emulation code.
+ *
+ * Copyright (C) 1998 Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)
+ */
+
+#define MSG_HIPRI	0x01
+#define MSG_ANY		0x02
+#define MSG_BAND	0x04
+
+#define MORECTL		1
+#define MOREDATA	2
+
+#define	TBADADDR		1
+#define	TBADOPT			2
+#define	TACCES			3
+#define TBADF			4
+#define TNOADDR			5
+#define TOUTSTATE	        6
+#define TBADSEQ		        7
+#define TSYSERR			8
+#define TLOOK		        9
+#define TBADDATA	       10
+#define TBUFOVFLW	       11
+#define TFLOW		       12
+#define	TNODATA		       13
+#define TNODIS		       14
+#define TNOUDERR	       15
+#define TBADFLAG	       16
+#define TNOREL		       17
+#define TNOTSUPPORT	       18
+#define TSTATECHNG	       19
+
+#define T_CONN_REQ      0
+#define T_CONN_RES      1
+#define T_DISCON_REQ    2
+#define T_DATA_REQ      3
+#define T_EXDATA_REQ    4
+#define T_INFO_REQ      5
+#define T_BIND_REQ      6
+#define T_UNBIND_REQ    7
+#define T_UNITDATA_REQ  8
+#define T_OPTMGMT_REQ   9
+#define T_ORDREL_REQ    10
+
+#define T_CONN_IND      11
+#define T_CONN_CON      12
+#define T_DISCON_IND    13
+#define T_DATA_IND      14
+#define T_EXDATA_IND    15
+#define T_INFO_ACK      16
+#define T_BIND_ACK      17
+#define T_ERROR_ACK     18
+#define T_OK_ACK        19
+#define T_UNITDATA_IND  20
+#define T_UDERROR_IND   21
+#define T_OPTMGMT_ACK   22
+#define T_ORDREL_IND    23
+
+#define T_NEGOTIATE	0x0004
+#define T_FAILURE	0x0040
+
+#define TS_UNBND	0	/* unbound */
+#define	TS_WACK_BREQ	1	/* waiting for T_BIND_REQ ack  */
+#define TS_WACK_UREQ	2	/* waiting for T_UNBIND_REQ ack */
+#define TS_IDLE		3	/* idle */
+#define TS_WACK_OPTREQ	4	/* waiting for T_OPTMGMT_REQ ack */
+#define TS_WACK_CREQ	5	/* waiting for T_CONN_REQ ack */
+#define TS_WCON_CREQ	6	/* waiting for T_CONN_REQ confirmation */
+#define	TS_WRES_CIND	7	/* waiting for T_CONN_IND */
+#define TS_WACK_CRES	8	/* waiting for T_CONN_RES ack */
+#define TS_DATA_XFER	9	/* data transfer */
+#define TS_WIND_ORDREL	10	/* releasing read but not write */
+#define TS_WREQ_ORDREL	11      /* wait to release write but not read */
+#define TS_WACK_DREQ6	12	/* waiting for T_DISCON_REQ ack */
+#define TS_WACK_DREQ7	13	/* waiting for T_DISCON_REQ ack */
+#define TS_WACK_DREQ9	14	/* waiting for T_DISCON_REQ ack */
+#define TS_WACK_DREQ10	15	/* waiting for T_DISCON_REQ ack */
+#define TS_WACK_DREQ11	16	/* waiting for T_DISCON_REQ ack */
+#define TS_NOSTATES	17
+
+struct T_conn_req {
+	s32 PRIM_type; 
+	s32 DEST_length;
+	s32 DEST_offset;
+	s32 OPT_length;
+	s32 OPT_offset;
+};
+
+struct T_bind_req {
+	s32 PRIM_type;
+	s32 ADDR_length;
+	s32 ADDR_offset;
+	u32 CONIND_number;
+};
+
+struct T_unitdata_req {
+	s32 PRIM_type; 
+	s32 DEST_length;
+	s32 DEST_offset;
+	s32 OPT_length;
+	s32 OPT_offset;
+};
+
+struct T_optmgmt_req {
+	s32 PRIM_type; 
+	s32 OPT_length;
+	s32 OPT_offset;
+	s32 MGMT_flags;
+};
+
+struct T_bind_ack {
+	s32 PRIM_type;
+	s32 ADDR_length;
+	s32 ADDR_offset;
+	u32 CONIND_number;
+};
+
+struct T_error_ack {
+	s32 PRIM_type;
+	s32 ERROR_prim;
+	s32 TLI_error;
+	s32 UNIX_error;
+};
+
+struct T_ok_ack {
+	s32 PRIM_type;
+	s32 CORRECT_prim;
+};
+
+struct T_conn_ind {
+	s32 PRIM_type;
+	s32 SRC_length;
+	s32 SRC_offset;
+	s32 OPT_length;
+	s32 OPT_offset;
+	s32 SEQ_number;
+};
+
+struct T_conn_con {
+	s32 PRIM_type;
+	s32 RES_length;
+	s32 RES_offset;
+	s32 OPT_length;
+	s32 OPT_offset;
+};
+
+struct T_discon_ind {
+	s32 PRIM_type;
+	s32 DISCON_reason;
+	s32 SEQ_number;
+};
+
+struct T_unitdata_ind {
+	s32 PRIM_type;
+	s32 SRC_length;
+	s32 SRC_offset;
+	s32 OPT_length;
+	s32 OPT_offset;
+};
+
+struct T_optmgmt_ack {
+	s32 PRIM_type; 
+	s32 OPT_length;
+	s32 OPT_offset;
+	s32 MGMT_flags;
+};
+
+struct opthdr {
+	s32 level;
+	s32 name;
+	s32 len;
+	char value[0];	
+};
+
+struct T_primsg {
+	struct T_primsg *next;
+	unsigned char pri;
+	unsigned char band;
+	int length;
+	s32 type;
+};
+
+struct strbuf {
+	s32 maxlen;
+	s32 len;
+	u32 buf;
+} ;
+
+/* Constants used by STREAMS modules emulation code */
+
+typedef char sol_module;
+
+#define MAX_NR_STREAM_MODULES   16
+
+/* Private data structure assigned to sockets. */
+
+struct sol_socket_struct {
+        int magic;
+        int modcount;
+        sol_module module[MAX_NR_STREAM_MODULES];
+        long state;
+        int offset;
+        struct T_primsg *pfirst, *plast;
+};
+
+#define SOLARIS_SOCKET_MAGIC    0xADDED
+
diff --git a/arch/sparc64/solaris/systbl.S b/arch/sparc64/solaris/systbl.S
new file mode 100644
index 0000000..d25667e
--- /dev/null
+++ b/arch/sparc64/solaris/systbl.S
@@ -0,0 +1,314 @@
+/* $Id: systbl.S,v 1.11 2000/03/13 21:57:35 davem Exp $
+ * systbl.S: System call entry point table for Solaris compatibility.
+ *
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ */
+
+#include <asm/unistd.h>
+
+/* Fall back to sys_call_table32 entry */
+#define CHAIN(name)	__NR_##name
+
+/* Pass pt_regs pointer as first argument */
+#define REGS(name)	name+1
+
+/* Hack till all be implemented */
+#define solaris_getpmsg		solaris_unimplemented
+#define solaris_hrtsys		solaris_unimplemented
+#define solaris_msgsys		solaris_unimplemented
+#define solaris_putpmsg		solaris_unimplemented
+#define solaris_semsys		solaris_unimplemented
+
+        .data
+	.globl		solaris_sys_table
+solaris_sys_table:
+	.word solaris_unimplemented	/* nosys		0	*/
+	.word CHAIN(exit)		/* exit		d	1	*/
+	.word CHAIN(fork)		/* fork			2	*/
+	.word CHAIN(read)		/* read		dpd	3	*/
+	.word CHAIN(write)		/* write	dpd	4	*/
+	.word solaris_open		/* open		soo	5	*/
+	.word CHAIN(close)		/* close	d	6	*/
+	.word solaris_wait		/* wait		xxx	7	*/
+	.word CHAIN(creat)		/* creat	so	8	*/
+	.word CHAIN(link)		/* link		ss	9	*/
+	.word CHAIN(unlink)		/* unlink	s	10	*/
+	.word solaris_unimplemented	/* exec		sxx	11	*/
+	.word CHAIN(chdir)		/* chdir	s	12	*/
+	.word CHAIN(time)		/* time			13	*/
+	.word solaris_mknod		/* mknod	sox	14	*/
+	.word CHAIN(chmod)		/* chmod	so	15	*/ 
+	.word CHAIN(chown)		/* chown	sdd	16	*/
+	.word solaris_brk		/* brk/break	x	17	*/
+	.word solaris_stat		/* stat		sp	18	*/
+	.word CHAIN(lseek)		/* seek/lseek	ddd	19	*/
+	.word solaris_getpid		/* getpid		20	*/
+	.word solaris_unimplemented	/* mount		21	*/
+	.word CHAIN(umount)		/* umount	s	22	*/
+	.word CHAIN(setuid)		/* setuid	d	23	*/
+	.word solaris_getuid		/* getuid		24	*/
+	.word CHAIN(stime)		/* stime	d	25	*/
+#if 0
+	.word solaris_ptrace		/* ptrace	xdxx	26	*/
+#else
+	.word CHAIN(ptrace)		/* ptrace	xdxx	26	*/
+#endif
+	.word CHAIN(alarm)		/* alarm	d	27	*/
+	.word solaris_fstat		/* fstat	dp	28	*/
+	.word CHAIN(pause)		/* pause		29	*/
+	.word CHAIN(utime)		/* utime	xx	30	*/
+	.word solaris_unimplemented	/* stty			31	*/
+	.word solaris_unimplemented	/* gtty			32	*/
+	.word solaris_access		/* access	so	33	*/
+	.word CHAIN(nice)		/* nice		d	34	*/
+	.word solaris_statfs		/* statfs	spdd	35	*/
+	.word CHAIN(sync)		/* sync			36	*/
+	.word solaris_kill		/* kill		dd	37	*/
+	.word solaris_fstatfs		/* fstatfs	dpdd	38	*/
+	.word solaris_procids		/* pgrpsys	ddd	39	*/
+	.word solaris_unimplemented	/* xenix		40	*/
+	.word CHAIN(dup)		/* dup		d	41	*/
+	.word CHAIN(pipe)		/* pipe			42	*/
+	.word CHAIN(times)		/* times	p	43	*/
+	.word 44 /*CHAIN(profil)*/	/* prof		xxxx	44	*/
+	.word solaris_unimplemented	/* lock/plock		45	*/
+	.word CHAIN(setgid)		/* setgid	d	46	*/
+	.word solaris_getgid		/* getgid		47	*/
+	.word solaris_sigfunc		/* sigfunc	xx	48	*/
+	.word REGS(solaris_msgsys)	/* msgsys	dxddd	49	*/
+	.word solaris_unimplemented	/* syssun/3b		50	*/
+	.word CHAIN(acct)		/* acct/sysacct	x	51	*/
+	.word solaris_shmsys		/* shmsys	ddxo	52	*/
+	.word REGS(solaris_semsys)	/* semsys	dddx	53	*/
+	.word solaris_ioctl		/* ioctl	dxx	54	*/
+	.word solaris_unimplemented	/* uadmin	xxx	55	*/
+	.word solaris_unimplemented	/* reserved:exch	56	*/
+	.word solaris_utssys		/* utssys	x	57	*/
+	.word CHAIN(fsync)		/* fsync	d	58	*/
+	.word CHAIN(execve)		/* execv	spp	59	*/
+	.word CHAIN(umask)		/* umask	o	60	*/
+	.word CHAIN(chroot)		/* chroot	s	61	*/
+	.word solaris_fcntl		/* fcntl	dxx	62	*/
+	.word solaris_ulimit		/* ulimit	xx	63	*/
+	.word solaris_unimplemented	/* ?			64	*/
+	.word solaris_unimplemented	/* ?			65	*/
+	.word solaris_unimplemented	/* ?			66	*/
+	.word solaris_unimplemented	/* ?			67	*/
+	.word solaris_unimplemented	/* ?			68	*/
+	.word solaris_unimplemented	/* ?			69	*/
+	.word solaris_unimplemented	/* advfs		70	*/
+	.word solaris_unimplemented	/* unadvfs		71	*/
+	.word solaris_unimplemented	/* rmount		72	*/
+	.word solaris_unimplemented	/* rumount		73	*/
+	.word solaris_unimplemented	/* rfstart		74	*/
+	.word solaris_unimplemented	/* ?			75	*/
+	.word solaris_unimplemented	/* rdebug		76	*/
+	.word solaris_unimplemented	/* rfstop		77	*/
+	.word solaris_unimplemented	/* rfsys		78	*/
+	.word CHAIN(rmdir)		/* rmdir	s	79	*/
+	.word CHAIN(mkdir)		/* mkdir	so	80	*/
+	.word CHAIN(getdents)		/* getdents	dxd	81	*/
+	.word solaris_unimplemented	/* libattach		82	*/
+	.word solaris_unimplemented	/* libdetach		83	*/
+	.word CHAIN(sysfs)		/* sysfs	dxx	84	*/
+	.word solaris_getmsg		/* getmsg	dxxx	85	*/
+	.word solaris_putmsg		/* putmsg	dxxd	86	*/
+	.word CHAIN(poll)		/* poll		xdd	87	*/
+	.word solaris_lstat		/* lstat	sp	88	*/
+	.word CHAIN(symlink)		/* symlink	ss	89	*/
+	.word CHAIN(readlink)		/* readlink	spd	90	*/
+	.word CHAIN(setgroups)		/* setgroups	dp	91	*/
+	.word CHAIN(getgroups)		/* getgroups	dp	92	*/
+	.word CHAIN(fchmod)		/* fchmod	do	93	*/
+	.word CHAIN(fchown)		/* fchown	ddd	94	*/
+	.word solaris_sigprocmask	/* sigprocmask	dxx	95	*/
+	.word solaris_sigsuspend	/* sigsuspend	x	96	*/
+	.word solaris_sigaltstack	/* sigaltstack	xx	97	*/
+	.word solaris_sigaction		/* sigaction	dxx	98	*/
+	.word solaris_sigpending	/* sigpending	dd	99	*/
+	.word REGS(solaris_context)	/* context		100	*/
+	.word solaris_unimplemented	/* evsys		101	*/
+	.word solaris_unimplemented	/* evtrapret		102	*/
+	.word solaris_statvfs		/* statvfs	sp	103	*/
+	.word solaris_fstatvfs		/* fstatvfs	dp	104	*/
+	.word solaris_unimplemented	/* unknown		105	*/
+	.word solaris_unimplemented	/* nfssys		106	*/
+	.word solaris_waitid		/* waitid	ddxd	107	*/
+	.word solaris_unimplemented	/* sigsendsys	ddd	108	*/
+	.word REGS(solaris_hrtsys)	/* hrtsys	xxx	109	*/
+	.word solaris_unimplemented	/* acancel	dxd	110	*/
+	.word solaris_unimplemented	/* async		111	*/
+	.word solaris_unimplemented	/* priocntlsys		112	*/
+	.word solaris_pathconf		/* pathconf	sd	113	*/
+	.word CHAIN(mincore)		/* mincore	d	114	*/
+	.word solaris_mmap		/* mmap		xxxxdx	115	*/
+	.word CHAIN(mprotect)		/* mprotect	xdx	116	*/
+	.word CHAIN(munmap)		/* munmap	xd	117	*/
+	.word solaris_fpathconf		/* fpathconf	dd	118	*/
+	.word CHAIN(fork)		/* fork			119	*/
+	.word solaris_unimplemented	/* fchdir	d	120	*/
+	.word CHAIN(readv)		/* readv	dxd	121	*/
+	.word CHAIN(writev)		/* writev	dxd	122	*/
+	.word solaris_xstat		/* xstat	dsx	123	*/
+	.word solaris_lxstat		/* lxstat	dsx	124	*/
+	.word solaris_fxstat		/* fxstat	ddx	125	*/
+	.word solaris_xmknod		/* xmknod	dsox	126	*/
+	.word solaris_unimplemented	/* syslocal	d	127	*/
+	.word solaris_setrlimit		/* setrlimit	dp	128	*/
+	.word solaris_getrlimit		/* getrlimit	dp	129	*/
+	.word CHAIN(chown)		/* lchown	sdd	130	*/
+	.word solaris_unimplemented	/* memcntl		131	*/
+	.word solaris_getpmsg		/* getpmsg	dxxxx	132	*/
+	.word solaris_putpmsg		/* putpmsg	dxxdd	133	*/
+	.word CHAIN(rename)		/* rename	ss	134	*/
+	.word solaris_utsname		/* uname	x	135	*/
+	.word solaris_unimplemented	/* setegid		136	*/
+	.word solaris_sysconf		/* sysconfig	d	137	*/
+	.word solaris_unimplemented	/* adjtime		138	*/
+	.word solaris_sysinfo		/* systeminfo	dsd	139	*/
+	.word solaris_unimplemented	/* ?			140	*/
+	.word solaris_unimplemented	/* seteuid		141	*/
+	.word solaris_unimplemented	/* ?			142	*/
+	.word solaris_unimplemented	/* ?			143	*/
+	.word solaris_unimplemented	/* secsys	dx	144	*/
+	.word solaris_unimplemented	/* filepriv	sdxd	145	*/
+	.word solaris_unimplemented	/* procpriv	dxd	146	*/
+	.word solaris_unimplemented	/* devstat	sdx	147	*/
+	.word solaris_unimplemented	/* aclipc	ddddx	148	*/
+	.word solaris_unimplemented	/* fdevstat	ddx	149	*/
+	.word solaris_unimplemented	/* flvlfile	ddx	150	*/
+	.word solaris_unimplemented	/* lvlfile	sdx	151	*/
+	.word solaris_unimplemented	/* ?			152	*/
+	.word solaris_unimplemented	/* fchroot	d	153	*/
+	.word solaris_unimplemented	/* lvlproc	dx	154	*/
+	.word solaris_unimplemented	/* ?			155	*/
+	.word solaris_gettimeofday	/* gettimeofday	x	156	*/
+	.word CHAIN(getitimer)		/* getitimer	dx	157	*/
+	.word CHAIN(setitimer)		/* setitimer	dxx	158	*/
+	.word solaris_unimplemented	/* lwp-xxx		159	*/
+	.word solaris_unimplemented	/* lwp-xxx		160	*/
+	.word solaris_unimplemented	/* lwp-xxx		161	*/
+	.word solaris_unimplemented	/* lwp-xxx		162	*/
+	.word solaris_unimplemented	/* lwp-xxx		163	*/
+	.word solaris_unimplemented	/* lwp-xxx		164	*/
+	.word solaris_unimplemented	/* lwp-xxx		165	*/
+	.word solaris_unimplemented	/* lwp-xxx		166	*/
+	.word solaris_unimplemented	/* lwp-xxx		167	*/
+	.word solaris_unimplemented	/* lwp-xxx		168	*/
+	.word solaris_unimplemented	/* lwp-xxx		169	*/
+	.word solaris_unimplemented	/* lwp-xxx		170	*/
+	.word solaris_unimplemented	/* lwp-xxx		171	*/
+	.word solaris_unimplemented	/* lwp-xxx		172	*/
+	.word solaris_pread		/* pread	dpdd	173	*/
+	.word solaris_pwrite		/* pwrite	dpdd	174	*/
+	.word REGS(solaris_llseek)	/* llseek	dLd	175	*/
+	.word solaris_unimplemented	/* lwpself		176	*/
+	.word solaris_unimplemented	/* lwpinfo		177	*/
+	.word solaris_unimplemented	/* lwpprivate		178	*/
+	.word solaris_unimplemented	/* processorbind	179	*/
+	.word solaris_unimplemented	/* processorexbind	180	*/
+	.word solaris_unimplemented	/* 			181	*/
+	.word solaris_unimplemented	/* sync_mailbox		182	*/
+	.word solaris_unimplemented	/* prepblock		183	*/
+	.word solaris_unimplemented	/* block		184	*/
+	.word solaris_acl		/* acl		sddp	185	*/
+	.word solaris_unimplemented	/* unblock		186	*/
+	.word solaris_unimplemented	/* cancelblock		187	*/
+	.word solaris_unimplemented	/* ?			188	*/
+	.word solaris_unimplemented	/* xxxxx		189	*/
+	.word solaris_unimplemented	/* xxxxxe		190	*/
+	.word solaris_unimplemented	/*			191	*/
+	.word solaris_unimplemented	/*			192	*/
+	.word solaris_unimplemented	/*			193	*/
+	.word solaris_unimplemented	/*			194	*/
+	.word solaris_unimplemented	/* 			195	*/
+	.word solaris_unimplemented	/* 			196	*/
+	.word solaris_unimplemented	/* 			197	*/
+	.word solaris_unimplemented	/* 			198	*/
+	.word CHAIN(nanosleep)		/* nanosleep	dd	199	*/
+	.word solaris_facl		/* facl		dddp	200	*/
+	.word solaris_unimplemented	/* 			201	*/
+	.word CHAIN(setreuid)		/* setreuid	dd	202	*/
+	.word CHAIN(setregid)		/* setregid	dd	203	*/
+	.word solaris_unimplemented	/* 			204	*/
+	.word solaris_unimplemented	/* 			205	*/
+	.word solaris_unimplemented	/* 			206	*/
+	.word solaris_unimplemented	/* 			207	*/
+	.word solaris_unimplemented	/* 			208	*/
+	.word solaris_unimplemented	/* 			209	*/
+	.word solaris_unimplemented	/* 			210	*/
+	.word solaris_unimplemented	/* 			211	*/
+	.word solaris_unimplemented	/* 			212	*/
+	.word solaris_getdents64	/* getdents64	dpd	213	*/
+	.word REGS(solaris_mmap64)	/* mmap64	xxxxdX	214	*/
+	.word solaris_stat64		/* stat64	sP	215	*/
+	.word solaris_lstat64		/* lstat64	sP	216	*/
+	.word solaris_fstat64		/* fstat64	dP	217	*/
+	.word solaris_statvfs64		/* statvfs64	sP	218	*/
+	.word solaris_fstatvfs64	/* fstatvfs64	dP	219	*/
+	.word solaris_setrlimit64	/* setrlimit64	dP	220	*/
+	.word solaris_getrlimit64	/* getrlimit64	dP	221	*/
+	.word CHAIN(pread64)		/* pread64	dpdD	222	*/
+	.word CHAIN(pwrite64)		/* pwrite64	dpdD	223	*/
+	.word CHAIN(creat)		/* creat64	so	224	*/
+	.word solaris_open		/* open64	soo	225	*/
+	.word solaris_unimplemented	/* 			226	*/
+	.word solaris_unimplemented	/* 			227	*/
+	.word solaris_unimplemented	/* 			228	*/
+	.word solaris_unimplemented	/* 			229	*/
+	.word solaris_socket		/* socket	ddd	230	*/
+	.word solaris_socketpair	/* socketpair	dddp	231	*/
+	.word solaris_bind		/* bind		dpd	232	*/
+	.word solaris_listen		/* listen	dd	233	*/
+	.word solaris_accept		/* accept	dpp	234	*/
+	.word solaris_connect		/* connect	dpd	235	*/
+	.word solaris_shutdown		/* shutdown	dd	236	*/
+	.word solaris_recv		/* recv		dpdd	237	*/
+	.word solaris_recvfrom		/* recvfrom	dpddpp	238	*/
+	.word solaris_recvmsg		/* recvmsg	dpd	239	*/
+	.word solaris_send		/* send		dpdd	240	*/
+	.word solaris_sendmsg		/* sendmsg	dpd	241	*/
+	.word solaris_sendto		/* sendto	dpddpd	242	*/
+	.word solaris_getpeername	/* getpeername	dpp	243	*/
+	.word solaris_getsockname	/* getsockname	dpp	244	*/
+	.word solaris_getsockopt	/* getsockopt	dddpp	245	*/
+	.word solaris_setsockopt	/* setsockopt	dddpp	246	*/
+	.word solaris_unimplemented	/* 			247	*/
+	.word solaris_ntp_gettime	/* ntp_gettime	p	248	*/
+	.word solaris_ntp_adjtime	/* ntp_adjtime	p	249	*/
+	.word solaris_unimplemented	/* 			250	*/
+	.word solaris_unimplemented	/* 			251	*/
+	.word solaris_unimplemented	/* 			252	*/
+	.word solaris_unimplemented	/* 			253	*/
+	.word solaris_unimplemented	/* 			254	*/
+	.word solaris_unimplemented	/* 			255	*/
+	.word solaris_unimplemented	/* 			256	*/
+	.word solaris_unimplemented	/* 			257	*/
+	.word solaris_unimplemented	/* 			258	*/
+	.word solaris_unimplemented	/* 			259	*/
+	.word solaris_unimplemented	/* 			260	*/
+	.word solaris_unimplemented	/* 			261	*/
+	.word solaris_unimplemented	/* 			262	*/
+	.word solaris_unimplemented	/* 			263	*/
+	.word solaris_unimplemented	/* 			264	*/
+	.word solaris_unimplemented	/* 			265	*/
+	.word solaris_unimplemented	/* 			266	*/
+	.word solaris_unimplemented	/* 			267	*/
+	.word solaris_unimplemented	/* 			268	*/
+	.word solaris_unimplemented	/* 			269	*/
+	.word solaris_unimplemented	/* 			270	*/
+	.word solaris_unimplemented	/* 			271	*/
+	.word solaris_unimplemented	/* 			272	*/
+	.word solaris_unimplemented	/* 			273	*/
+	.word solaris_unimplemented	/* 			274	*/
+	.word solaris_unimplemented	/* 			275	*/
+	.word solaris_unimplemented	/* 			276	*/
+	.word solaris_unimplemented	/* 			277	*/
+	.word solaris_unimplemented	/* 			278	*/
+	.word solaris_unimplemented	/* 			279	*/
+	.word solaris_unimplemented	/* 			280	*/
+	.word solaris_unimplemented	/* 			281	*/
+	.word solaris_unimplemented	/* 			282	*/
+	.word solaris_unimplemented	/* 			283	*/
+
diff --git a/arch/sparc64/solaris/timod.c b/arch/sparc64/solaris/timod.c
new file mode 100644
index 0000000..022c80f
--- /dev/null
+++ b/arch/sparc64/solaris/timod.c
@@ -0,0 +1,959 @@
+/* $Id: timod.c,v 1.19 2002/02/08 03:57:14 davem Exp $
+ * timod.c: timod emulation.
+ *
+ * Copyright (C) 1998 Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)
+ *
+ * Streams & timod emulation based on code
+ * Copyright (C) 1995, 1996 Mike Jagdis (jaggy@purplet.demon.co.uk)
+ *
+ */
+ 
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/poll.h>
+
+#include <net/sock.h>
+
+#include <asm/uaccess.h>
+#include <asm/termios.h>
+
+#include "conv.h"
+#include "socksys.h"
+
+asmlinkage int solaris_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
+
+static DEFINE_SPINLOCK(timod_pagelock);
+static char * page = NULL ;
+
+#ifndef DEBUG_SOLARIS_KMALLOC
+
+#define mykmalloc kmalloc
+#define mykfree kfree
+
+#else
+
+void * mykmalloc(size_t s, int gfp)
+{
+	static char * page;
+	static size_t free;
+	void * r;
+	s = ((s + 63) & ~63);
+	if( s > PAGE_SIZE ) {
+		SOLD("too big size, calling real kmalloc");
+		return kmalloc(s, gfp);
+	}
+	if( s > free ) {
+		/* we are wasting memory, but we don't care */
+		page = (char *)__get_free_page(gfp);
+		free = PAGE_SIZE;
+	}
+	r = page;
+	page += s;
+	free -= s;
+	return r;
+}
+
+void mykfree(void *p)
+{
+}
+
+#endif
+
+#ifndef DEBUG_SOLARIS
+
+#define BUF_SIZE	PAGE_SIZE
+#define PUT_MAGIC(a,m)
+#define SCHECK_MAGIC(a,m)
+#define BUF_OFFSET	0
+#define MKCTL_TRAILER	0
+
+#else
+
+#define BUF_SIZE	(PAGE_SIZE-2*sizeof(u64))
+#define BUFPAGE_MAGIC	0xBADC0DEDDEADBABEL
+#define MKCTL_MAGIC	0xDEADBABEBADC0DEDL
+#define PUT_MAGIC(a,m)	do{(*(u64*)(a))=(m);}while(0)
+#define SCHECK_MAGIC(a,m)	do{if((*(u64*)(a))!=(m))printk("%s,%u,%s(): magic %08x at %p corrupted!\n",\
+				__FILE__,__LINE__,__FUNCTION__,(m),(a));}while(0)
+#define BUF_OFFSET	sizeof(u64)
+#define MKCTL_TRAILER	sizeof(u64)
+
+#endif
+
+static char *getpage( void )
+{
+	char *r;
+	SOLD("getting page");
+	spin_lock(&timod_pagelock);
+	if (page) {
+		r = page;
+		page = NULL;
+		spin_unlock(&timod_pagelock);
+		SOLD("got cached");
+		return r + BUF_OFFSET;
+	}
+	spin_unlock(&timod_pagelock);
+	SOLD("getting new");
+	r = (char *)__get_free_page(GFP_KERNEL);
+	PUT_MAGIC(r,BUFPAGE_MAGIC);
+	PUT_MAGIC(r+PAGE_SIZE-sizeof(u64),BUFPAGE_MAGIC);
+	return r + BUF_OFFSET;
+}
+
+static void putpage(char *p)
+{
+	SOLD("putting page");
+	p = p - BUF_OFFSET;
+	SCHECK_MAGIC(p,BUFPAGE_MAGIC);
+	SCHECK_MAGIC(p+PAGE_SIZE-sizeof(u64),BUFPAGE_MAGIC);
+	spin_lock(&timod_pagelock);
+	if (page) {
+		spin_unlock(&timod_pagelock);
+		free_page((unsigned long)p);
+		SOLD("freed it");
+	} else {
+		page = p;
+		spin_unlock(&timod_pagelock);
+		SOLD("cached it");
+	}
+}
+
+static struct T_primsg *timod_mkctl(int size)
+{
+	struct T_primsg *it;
+
+	SOLD("creating primsg");
+	it = (struct T_primsg *)mykmalloc(size+sizeof(*it)-sizeof(s32)+2*MKCTL_TRAILER, GFP_KERNEL);
+	if (it) {
+		SOLD("got it");
+		it->pri = MSG_HIPRI;
+		it->length = size;
+		PUT_MAGIC((char*)((u64)(((char *)&it->type)+size+7)&~7),MKCTL_MAGIC);
+	}
+	return it;
+}
+
+static void timod_wake_socket(unsigned int fd)
+{
+	struct socket *sock;
+
+	SOLD("wakeing socket");
+	sock = SOCKET_I(current->files->fd[fd]->f_dentry->d_inode);
+	wake_up_interruptible(&sock->wait);
+	read_lock(&sock->sk->sk_callback_lock);
+	if (sock->fasync_list && !test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
+		__kill_fasync(sock->fasync_list, SIGIO, POLL_IN);
+	read_unlock(&sock->sk->sk_callback_lock);
+	SOLD("done");
+}
+
+static void timod_queue(unsigned int fd, struct T_primsg *it)
+{
+	struct sol_socket_struct *sock;
+
+	SOLD("queuing primsg");
+	sock = (struct sol_socket_struct *)current->files->fd[fd]->private_data;
+	it->next = sock->pfirst;
+	sock->pfirst = it;
+	if (!sock->plast)
+		sock->plast = it;
+	timod_wake_socket(fd);
+	SOLD("done");
+}
+
+static void timod_queue_end(unsigned int fd, struct T_primsg *it)
+{
+	struct sol_socket_struct *sock;
+
+	SOLD("queuing primsg at end");
+	sock = (struct sol_socket_struct *)current->files->fd[fd]->private_data;
+	it->next = NULL;
+	if (sock->plast)
+		sock->plast->next = it;
+	else
+		sock->pfirst = it;
+	sock->plast = it;
+	SOLD("done");
+}
+
+static void timod_error(unsigned int fd, int prim, int terr, int uerr)
+{
+	struct T_primsg *it;
+	
+	SOLD("making error");
+	it = timod_mkctl(sizeof(struct T_error_ack));
+	if (it) {
+		struct T_error_ack *err = (struct T_error_ack *)&it->type;
+		
+		SOLD("got it");
+		err->PRIM_type = T_ERROR_ACK;
+		err->ERROR_prim = prim;
+		err->TLI_error = terr;
+		err->UNIX_error = uerr; /* FIXME: convert this */
+		timod_queue(fd, it);
+	}
+	SOLD("done");
+}
+
+static void timod_ok(unsigned int fd, int prim)
+{
+	struct T_primsg *it;
+	struct T_ok_ack *ok;
+	
+	SOLD("creating ok ack");
+	it = timod_mkctl(sizeof(*ok));
+	if (it) {
+		SOLD("got it");
+		ok = (struct T_ok_ack *)&it->type;
+		ok->PRIM_type = T_OK_ACK;
+		ok->CORRECT_prim = prim;
+		timod_queue(fd, it);
+	}
+	SOLD("done");
+}
+
+static int timod_optmgmt(unsigned int fd, int flag, char __user *opt_buf, int opt_len, int do_ret)
+{
+	int error, failed;
+	int ret_space, ret_len;
+	long args[5];
+	char *ret_pos,*ret_buf;
+	int (*sys_socketcall)(int, unsigned long *) =
+		(int (*)(int, unsigned long *))SYS(socketcall);
+	mm_segment_t old_fs = get_fs();
+
+	SOLD("entry");
+	SOLDD(("fd %u flg %u buf %p len %u doret %u",fd,flag,opt_buf,opt_len,do_ret));
+	if (!do_ret && (!opt_buf || opt_len <= 0))
+		return 0;
+	SOLD("getting page");
+	ret_pos = ret_buf = getpage();
+	ret_space = BUF_SIZE;
+	ret_len = 0;
+	
+	error = failed = 0;
+	SOLD("looping");
+	while(opt_len >= sizeof(struct opthdr)) {
+		struct opthdr *opt;
+		int orig_opt_len; 
+		SOLD("loop start");
+		opt = (struct opthdr *)ret_pos; 
+		if (ret_space < sizeof(struct opthdr)) {
+			failed = TSYSERR;
+			break;
+		}
+		SOLD("getting opthdr");
+		if (copy_from_user(opt, opt_buf, sizeof(struct opthdr)) ||
+			opt->len > opt_len) {
+			failed = TBADOPT;
+			break;
+		}
+		SOLD("got opthdr");
+		if (flag == T_NEGOTIATE) {
+			char *buf;
+			
+			SOLD("handling T_NEGOTIATE");
+			buf = ret_pos + sizeof(struct opthdr);
+			if (ret_space < opt->len + sizeof(struct opthdr) ||
+				copy_from_user(buf, opt_buf+sizeof(struct opthdr), opt->len)) {
+				failed = TSYSERR;
+				break;
+			}
+			SOLD("got optdata");
+			args[0] = fd;
+			args[1] = opt->level;
+			args[2] = opt->name;
+			args[3] = (long)buf;
+			args[4] = opt->len;
+			SOLD("calling SETSOCKOPT");
+			set_fs(KERNEL_DS);
+			error = sys_socketcall(SYS_SETSOCKOPT, args);
+			set_fs(old_fs);
+			if (error) {
+				failed = TBADOPT;
+				break;
+			}
+			SOLD("SETSOCKOPT ok");
+		}
+		orig_opt_len = opt->len;
+		opt->len = ret_space - sizeof(struct opthdr);
+		if (opt->len < 0) {
+			failed = TSYSERR;
+			break;
+		}
+		args[0] = fd;
+		args[1] = opt->level;
+		args[2] = opt->name;
+		args[3] = (long)(ret_pos+sizeof(struct opthdr));
+		args[4] = (long)&opt->len;
+		SOLD("calling GETSOCKOPT");
+		set_fs(KERNEL_DS);
+		error = sys_socketcall(SYS_GETSOCKOPT, args);
+		set_fs(old_fs);
+		if (error) {
+			failed = TBADOPT;
+			break;
+		}
+		SOLD("GETSOCKOPT ok");
+		ret_space -= sizeof(struct opthdr) + opt->len;
+		ret_len += sizeof(struct opthdr) + opt->len;
+		ret_pos += sizeof(struct opthdr) + opt->len;
+		opt_len -= sizeof(struct opthdr) + orig_opt_len;
+		opt_buf += sizeof(struct opthdr) + orig_opt_len;
+		SOLD("loop end");
+	}
+	SOLD("loop done");
+	if (do_ret) {
+		SOLD("generating ret msg");
+		if (failed)
+			timod_error(fd, T_OPTMGMT_REQ, failed, -error);
+		else {
+			struct T_primsg *it;
+			it = timod_mkctl(sizeof(struct T_optmgmt_ack) + ret_len);
+			if (it) {
+				struct T_optmgmt_ack *ack =
+					(struct T_optmgmt_ack *)&it->type;
+				SOLD("got primsg");
+				ack->PRIM_type = T_OPTMGMT_ACK;
+				ack->OPT_length = ret_len;
+				ack->OPT_offset = sizeof(struct T_optmgmt_ack);
+				ack->MGMT_flags = (failed ? T_FAILURE : flag);
+				memcpy(((char*)ack)+sizeof(struct T_optmgmt_ack),
+					ret_buf, ret_len);
+				timod_queue(fd, it);
+			}
+		}
+	}
+	SOLDD(("put_page %p\n", ret_buf));
+	putpage(ret_buf);
+	SOLD("done");	
+	return 0;
+}
+
+int timod_putmsg(unsigned int fd, char __user *ctl_buf, int ctl_len,
+			char __user *data_buf, int data_len, int flags)
+{
+	int ret, error, terror;
+	char *buf;
+	struct file *filp;
+	struct inode *ino;
+	struct sol_socket_struct *sock;
+	mm_segment_t old_fs = get_fs();
+	long args[6];
+	int (*sys_socketcall)(int, unsigned long __user *) =
+		(int (*)(int, unsigned long __user *))SYS(socketcall);
+	int (*sys_sendto)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int) =
+		(int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int))SYS(sendto);
+	filp = current->files->fd[fd];
+	ino = filp->f_dentry->d_inode;
+	sock = (struct sol_socket_struct *)filp->private_data;
+	SOLD("entry");
+	if (get_user(ret, (int __user *)A(ctl_buf)))
+		return -EFAULT;
+	switch (ret) {
+	case T_BIND_REQ:
+	{
+		struct T_bind_req req;
+		
+		SOLDD(("bind %016lx(%016lx)\n", sock, filp));
+		SOLD("T_BIND_REQ");
+		if (sock->state != TS_UNBND) {
+			timod_error(fd, T_BIND_REQ, TOUTSTATE, 0);
+			return 0;
+		}
+		SOLD("state ok");
+		if (copy_from_user(&req, ctl_buf, sizeof(req))) {
+			timod_error(fd, T_BIND_REQ, TSYSERR, EFAULT);
+			return 0;
+		}
+		SOLD("got ctl req");
+		if (req.ADDR_offset && req.ADDR_length) {
+			if (req.ADDR_length > BUF_SIZE) {
+				timod_error(fd, T_BIND_REQ, TSYSERR, EFAULT);
+				return 0;
+			}
+			SOLD("req size ok");
+			buf = getpage();
+			if (copy_from_user(buf, ctl_buf + req.ADDR_offset, req.ADDR_length)) {
+				timod_error(fd, T_BIND_REQ, TSYSERR, EFAULT);
+				putpage(buf);
+				return 0;
+			}
+			SOLD("got ctl data");
+			args[0] = fd;
+			args[1] = (long)buf;
+			args[2] = req.ADDR_length;
+			SOLD("calling BIND");
+			set_fs(KERNEL_DS);
+			error = sys_socketcall(SYS_BIND, args);
+			set_fs(old_fs);
+			putpage(buf);
+			SOLD("BIND returned");
+		} else 
+			error = 0;
+		if (!error) {
+			struct T_primsg *it;
+			if (req.CONIND_number) {
+	  			args[0] = fd;
+  				args[1] = req.CONIND_number;
+  				SOLD("calling LISTEN");
+  				set_fs(KERNEL_DS);
+	  			error = sys_socketcall(SYS_LISTEN, args);
+  				set_fs(old_fs);
+  				SOLD("LISTEN done");
+  			}
+			it = timod_mkctl(sizeof(struct T_bind_ack)+sizeof(struct sockaddr));
+			if (it) {
+				struct T_bind_ack *ack;
+
+				ack = (struct T_bind_ack *)&it->type;
+				ack->PRIM_type = T_BIND_ACK;
+				ack->ADDR_offset = sizeof(*ack);
+				ack->ADDR_length = sizeof(struct sockaddr);
+				ack->CONIND_number = req.CONIND_number;
+				args[0] = fd;
+				args[1] = (long)(ack+sizeof(*ack));
+				args[2] = (long)&ack->ADDR_length;
+				set_fs(KERNEL_DS);
+				sys_socketcall(SYS_GETSOCKNAME,args);
+				set_fs(old_fs);
+				sock->state = TS_IDLE;
+				timod_ok(fd, T_BIND_REQ);
+				timod_queue_end(fd, it);
+				SOLD("BIND done");
+				return 0;
+			}
+		}
+		SOLD("some error");
+		switch (error) {
+			case -EINVAL:
+				terror = TOUTSTATE;
+				error = 0;
+				break;
+			case -EACCES:
+				terror = TACCES;
+				error = 0;
+				break;
+			case -EADDRNOTAVAIL:
+			case -EADDRINUSE:
+				terror = TNOADDR;
+				error = 0;
+				break;
+			default:
+				terror = TSYSERR;
+				break;
+		}
+		timod_error(fd, T_BIND_REQ, terror, -error);
+		SOLD("BIND done");
+		return 0;
+	}
+	case T_CONN_REQ:
+	{
+		struct T_conn_req req;
+		unsigned short oldflags;
+		struct T_primsg *it;
+		SOLD("T_CONN_REQ");
+		if (sock->state != TS_UNBND && sock->state != TS_IDLE) {
+			timod_error(fd, T_CONN_REQ, TOUTSTATE, 0);
+			return 0;
+		}
+		SOLD("state ok");
+		if (copy_from_user(&req, ctl_buf, sizeof(req))) {
+			timod_error(fd, T_CONN_REQ, TSYSERR, EFAULT);
+			return 0;
+		}
+		SOLD("got ctl req");
+		if (ctl_len > BUF_SIZE) {
+			timod_error(fd, T_CONN_REQ, TSYSERR, EFAULT);
+			return 0;
+		}
+		SOLD("req size ok");
+		buf = getpage();
+		if (copy_from_user(buf, ctl_buf, ctl_len)) {
+			timod_error(fd, T_CONN_REQ, TSYSERR, EFAULT);
+			putpage(buf);
+			return 0;
+		}
+#ifdef DEBUG_SOLARIS		
+		{
+			char * ptr = buf;
+			int len = ctl_len;
+			printk("returned data (%d bytes): ",len);
+			while( len-- ) {
+				if (!(len & 7))
+					printk(" ");
+				printk("%02x",(unsigned char)*ptr++);
+			}
+			printk("\n");
+		}
+#endif
+		SOLD("got ctl data");
+		args[0] = fd;
+		args[1] = (long)buf+req.DEST_offset;
+		args[2] = req.DEST_length;
+		oldflags = filp->f_flags;
+		filp->f_flags &= ~O_NONBLOCK;
+		SOLD("calling CONNECT");
+		set_fs(KERNEL_DS);
+		error = sys_socketcall(SYS_CONNECT, args);
+		set_fs(old_fs);
+		filp->f_flags = oldflags;
+		SOLD("CONNECT done");
+		if (!error) {
+			struct T_conn_con *con;
+			SOLD("no error");
+			it = timod_mkctl(ctl_len);
+			if (!it) {
+				putpage(buf);
+				return -ENOMEM;
+			}
+			con = (struct T_conn_con *)&it->type;
+#ifdef DEBUG_SOLARIS			
+			{
+				char * ptr = buf;
+				int len = ctl_len;
+				printk("returned data (%d bytes): ",len);
+				while( len-- ) {
+					if (!(len & 7))
+						printk(" ");
+					printk("%02x",(unsigned char)*ptr++);
+				}
+				printk("\n");
+			}
+#endif
+			memcpy(con, buf, ctl_len);
+			SOLD("copied ctl_buf");
+			con->PRIM_type = T_CONN_CON;
+			sock->state = TS_DATA_XFER;
+		} else {
+			struct T_discon_ind *dis;
+			SOLD("some error");
+			it = timod_mkctl(sizeof(*dis));
+			if (!it) {
+				putpage(buf);
+				return -ENOMEM;
+			}
+			SOLD("got primsg");
+			dis = (struct T_discon_ind *)&it->type;
+			dis->PRIM_type = T_DISCON_IND;
+			dis->DISCON_reason = -error;	/* FIXME: convert this as in iABI_errors() */
+			dis->SEQ_number = 0;
+		}
+		putpage(buf);
+		timod_ok(fd, T_CONN_REQ);
+		it->pri = 0;
+		timod_queue_end(fd, it);
+		SOLD("CONNECT done");
+		return 0;
+	}
+	case T_OPTMGMT_REQ:
+	{
+		struct T_optmgmt_req req;
+		SOLD("OPTMGMT_REQ");
+		if (copy_from_user(&req, ctl_buf, sizeof(req)))
+			return -EFAULT;
+		SOLD("got req");
+		return timod_optmgmt(fd, req.MGMT_flags,
+				req.OPT_offset > 0 ? ctl_buf + req.OPT_offset : NULL,
+				req.OPT_length, 1);
+	}
+	case T_UNITDATA_REQ:
+	{
+		struct T_unitdata_req req;
+		
+		int err;
+		SOLD("T_UNITDATA_REQ");
+		if (sock->state != TS_IDLE && sock->state != TS_DATA_XFER) {
+			timod_error(fd, T_CONN_REQ, TOUTSTATE, 0);
+			return 0;
+		}
+		SOLD("state ok");
+		if (copy_from_user(&req, ctl_buf, sizeof(req))) {
+			timod_error(fd, T_CONN_REQ, TSYSERR, EFAULT);
+			return 0;
+		}
+		SOLD("got ctl req");
+#ifdef DEBUG_SOLARIS		
+		{
+			char * ptr = ctl_buf+req.DEST_offset;
+			int len = req.DEST_length;
+			printk("socket address (%d bytes): ",len);
+			while( len-- ) {
+				char c;
+				if (get_user(c,ptr))
+					printk("??");
+				else
+					printk("%02x",(unsigned char)c);
+				ptr++;
+			}
+			printk("\n");
+		}
+#endif		
+		err = sys_sendto(fd, data_buf, data_len, 0, req.DEST_length > 0 ? (struct sockaddr __user *)(ctl_buf+req.DEST_offset) : NULL, req.DEST_length);
+		if (err == data_len)
+			return 0;
+		if(err >= 0) {
+			printk("timod: sendto failed to send all the data\n");
+			return 0;
+		}
+		timod_error(fd, T_CONN_REQ, TSYSERR, -err);
+		return 0;
+	}
+	default:
+		printk(KERN_INFO "timod_putmsg: unsupported command %u.\n", ret);
+		break;
+	}
+	return -EINVAL;
+}
+
+int timod_getmsg(unsigned int fd, char __user *ctl_buf, int ctl_maxlen, s32 __user *ctl_len,
+			char __user *data_buf, int data_maxlen, s32 __user *data_len, int *flags_p)
+{
+	int error;
+	int oldflags;
+	struct file *filp;
+	struct inode *ino;
+	struct sol_socket_struct *sock;
+	struct T_unitdata_ind udi;
+	mm_segment_t old_fs = get_fs();
+	long args[6];
+	char __user *tmpbuf;
+	int tmplen;
+	int (*sys_socketcall)(int, unsigned long __user *) =
+		(int (*)(int, unsigned long __user *))SYS(socketcall);
+	int (*sys_recvfrom)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *);
+	
+	SOLD("entry");
+	SOLDD(("%u %p %d %p %p %d %p %d\n", fd, ctl_buf, ctl_maxlen, ctl_len, data_buf, data_maxlen, data_len, *flags_p));
+	filp = current->files->fd[fd];
+	ino = filp->f_dentry->d_inode;
+	sock = (struct sol_socket_struct *)filp->private_data;
+	SOLDD(("%p %p\n", sock->pfirst, sock->pfirst ? sock->pfirst->next : NULL));
+	if ( ctl_maxlen > 0 && !sock->pfirst && SOCKET_I(ino)->type == SOCK_STREAM
+		&& sock->state == TS_IDLE) {
+		SOLD("calling LISTEN");
+		args[0] = fd;
+		args[1] = -1;
+		set_fs(KERNEL_DS);
+		sys_socketcall(SYS_LISTEN, args);
+		set_fs(old_fs);
+		SOLD("LISTEN done");
+	}
+	if (!(filp->f_flags & O_NONBLOCK)) {
+		struct poll_wqueues wait_table;
+		poll_table *wait;
+
+		poll_initwait(&wait_table);
+		wait = &wait_table.pt;
+		for(;;) {
+			SOLD("loop");
+			set_current_state(TASK_INTERRUPTIBLE);
+			/* ! ( l<0 || ( l>=0 && ( ! pfirst || (flags == HIPRI && pri != HIPRI) ) ) ) */ 
+			/* ( ! l<0 && ! ( l>=0 && ( ! pfirst || (flags == HIPRI && pri != HIPRI) ) ) ) */ 
+			/* ( l>=0 && ( ! l>=0 || ! ( ! pfirst || (flags == HIPRI && pri != HIPRI) ) ) ) */ 
+			/* ( l>=0 && ( l<0 || ( pfirst && ! (flags == HIPRI && pri != HIPRI) ) ) ) */ 
+			/* ( l>=0 && ( l<0 || ( pfirst && (flags != HIPRI || pri == HIPRI) ) ) ) */ 
+			/* ( l>=0 && ( pfirst && (flags != HIPRI || pri == HIPRI) ) ) */ 
+			if (ctl_maxlen >= 0 && sock->pfirst && (*flags_p != MSG_HIPRI || sock->pfirst->pri == MSG_HIPRI))
+				break;
+			SOLD("cond 1 passed");
+			if (
+			#if 1
+				*flags_p != MSG_HIPRI &&
+			#endif
+				((filp->f_op->poll(filp, wait) & POLLIN) ||
+				(filp->f_op->poll(filp, NULL) & POLLIN) ||
+				signal_pending(current))
+			) {
+				break;
+			}
+			if( *flags_p == MSG_HIPRI ) {
+				SOLD("avoiding lockup");
+				break ;
+			}
+			if(wait_table.error) {
+				SOLD("wait-table error");
+				poll_freewait(&wait_table);
+				return wait_table.error;
+			}
+			SOLD("scheduling");
+			schedule();
+		}
+		SOLD("loop done");
+		current->state = TASK_RUNNING;
+		poll_freewait(&wait_table);
+		if (signal_pending(current)) {
+			SOLD("signal pending");
+			return -EINTR;
+		}
+	}
+	if (ctl_maxlen >= 0 && sock->pfirst) {
+		struct T_primsg *it = sock->pfirst;
+		int l = min_t(int, ctl_maxlen, it->length);
+		SCHECK_MAGIC((char*)((u64)(((char *)&it->type)+sock->offset+it->length+7)&~7),MKCTL_MAGIC);
+		SOLD("purting ctl data");
+		if(copy_to_user(ctl_buf,
+			(char*)&it->type + sock->offset, l))
+			return -EFAULT;
+		SOLD("pur it");
+		if(put_user(l, ctl_len))
+			return -EFAULT;
+		SOLD("set ctl_len");
+		*flags_p = it->pri;
+		it->length -= l;
+		if (it->length) {
+			SOLD("more ctl");
+			sock->offset += l;
+			return MORECTL;
+		} else {
+			SOLD("removing message");
+			sock->pfirst = it->next;
+			if (!sock->pfirst)
+				sock->plast = NULL;
+			SOLDD(("getmsg kfree %016lx->%016lx\n", it, sock->pfirst));
+			mykfree(it);
+			sock->offset = 0;
+			SOLD("ctl done");
+			return 0;
+		}
+	}
+	*flags_p = 0;
+	if (ctl_maxlen >= 0) {
+		SOLD("ACCEPT perhaps?");
+		if (SOCKET_I(ino)->type == SOCK_STREAM && sock->state == TS_IDLE) {
+			struct T_conn_ind ind;
+			char *buf = getpage();
+			int len = BUF_SIZE;
+
+			SOLD("trying ACCEPT");
+			if (put_user(ctl_maxlen - sizeof(ind), ctl_len))
+				return -EFAULT;
+			args[0] = fd;
+			args[1] = (long)buf;
+			args[2] = (long)&len;
+			oldflags = filp->f_flags;
+			filp->f_flags |= O_NONBLOCK;
+			SOLD("calling ACCEPT");
+			set_fs(KERNEL_DS);
+			error = sys_socketcall(SYS_ACCEPT, args);
+			set_fs(old_fs);
+			filp->f_flags = oldflags;
+			if (error < 0) {
+				SOLD("some error");
+				putpage(buf);
+				return error;
+			}
+			if (error) {
+				SOLD("connect");
+				putpage(buf);
+				if (sizeof(ind) > ctl_maxlen) {
+					SOLD("generating CONN_IND");
+					ind.PRIM_type = T_CONN_IND;
+					ind.SRC_length = len;
+					ind.SRC_offset = sizeof(ind);
+					ind.OPT_length = ind.OPT_offset = 0;
+					ind.SEQ_number = error;
+					if(copy_to_user(ctl_buf, &ind, sizeof(ind))||
+					   put_user(sizeof(ind)+ind.SRC_length,ctl_len))
+						return -EFAULT;
+					SOLD("CONN_IND created");
+				}
+				if (data_maxlen >= 0)
+					put_user(0, data_len);
+				SOLD("CONN_IND done");
+				return 0;
+			}
+			if (len>ctl_maxlen) {
+				SOLD("data don't fit");
+				putpage(buf);
+				return -EFAULT;		/* XXX - is this ok ? */
+			}
+			if(copy_to_user(ctl_buf,buf,len) || put_user(len,ctl_len)){
+				SOLD("can't copy data");
+				putpage(buf);
+				return -EFAULT;
+			}
+			SOLD("ACCEPT done");
+			putpage(buf);
+		}
+	}
+	SOLD("checking data req");
+	if (data_maxlen <= 0) {
+		if (data_maxlen == 0)
+			put_user(0, data_len);
+		if (ctl_maxlen >= 0)
+			put_user(0, ctl_len);
+		return -EAGAIN;
+	}
+	SOLD("wants data");
+	if (ctl_maxlen > sizeof(udi) && sock->state == TS_IDLE) {
+		SOLD("udi fits");
+		tmpbuf = ctl_buf + sizeof(udi);
+		tmplen = ctl_maxlen - sizeof(udi);
+	} else {
+		SOLD("udi does not fit");
+		tmpbuf = NULL;
+		tmplen = 0;
+	}
+	if (put_user(tmplen, ctl_len))
+		return -EFAULT;
+	SOLD("set ctl_len");
+	oldflags = filp->f_flags;
+	filp->f_flags |= O_NONBLOCK;
+	SOLD("calling recvfrom");
+	sys_recvfrom = (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(recvfrom);
+	error = sys_recvfrom(fd, data_buf, data_maxlen, 0, (struct sockaddr __user *)tmpbuf, ctl_len);
+	filp->f_flags = oldflags;
+	if (error < 0)
+		return error;
+	SOLD("error >= 0" ) ;
+	if (error && ctl_maxlen > sizeof(udi) && sock->state == TS_IDLE) {
+		SOLD("generating udi");
+		udi.PRIM_type = T_UNITDATA_IND;
+		if (get_user(udi.SRC_length, ctl_len))
+			return -EFAULT;
+		udi.SRC_offset = sizeof(udi);
+		udi.OPT_length = udi.OPT_offset = 0;
+		if (copy_to_user(ctl_buf, &udi, sizeof(udi)) ||
+		    put_user(sizeof(udi)+udi.SRC_length, ctl_len))
+			return -EFAULT;
+		SOLD("udi done");
+	} else {
+		if (put_user(0, ctl_len))
+			return -EFAULT;
+	}
+	put_user(error, data_len);
+	SOLD("done");
+	return 0;
+}
+
+asmlinkage int solaris_getmsg(unsigned int fd, u32 arg1, u32 arg2, u32 arg3)
+{
+	struct file *filp;
+	struct inode *ino;
+	struct strbuf __user *ctlptr;
+	struct strbuf __user *datptr;
+	struct strbuf ctl, dat;
+	int __user *flgptr;
+	int flags;
+	int error = -EBADF;
+
+	SOLD("entry");
+	lock_kernel();
+	if(fd >= NR_OPEN) goto out;
+
+	filp = current->files->fd[fd];
+	if(!filp) goto out;
+
+	ino = filp->f_dentry->d_inode;
+	if (!ino || !S_ISSOCK(ino->i_mode))
+		goto out;
+
+	ctlptr = (struct strbuf __user *)A(arg1);
+	datptr = (struct strbuf __user *)A(arg2);
+	flgptr = (int __user *)A(arg3);
+
+	error = -EFAULT;
+
+	if (ctlptr) {
+		if (copy_from_user(&ctl,ctlptr,sizeof(struct strbuf)) || 
+		    put_user(-1,&ctlptr->len))
+			goto out;
+	} else
+		ctl.maxlen = -1;
+
+	if (datptr) {
+		if (copy_from_user(&dat,datptr,sizeof(struct strbuf)) || 
+		    put_user(-1,&datptr->len))
+			goto out;
+	} else
+		dat.maxlen = -1;
+
+	if (get_user(flags,flgptr))
+		goto out;
+
+	switch (flags) {
+	case 0:
+	case MSG_HIPRI:
+	case MSG_ANY:
+	case MSG_BAND:
+		break;
+	default:
+		error = -EINVAL;
+		goto out;
+	}
+
+	error = timod_getmsg(fd,A(ctl.buf),ctl.maxlen,&ctlptr->len,
+				A(dat.buf),dat.maxlen,&datptr->len,&flags);
+
+	if (!error && put_user(flags,flgptr))
+		error = -EFAULT;
+out:
+	unlock_kernel();
+	SOLD("done");
+	return error;
+}
+
+asmlinkage int solaris_putmsg(unsigned int fd, u32 arg1, u32 arg2, u32 arg3)
+{
+	struct file *filp;
+	struct inode *ino;
+	struct strbuf __user *ctlptr;
+	struct strbuf __user *datptr;
+	struct strbuf ctl, dat;
+	int flags = (int) arg3;
+	int error = -EBADF;
+
+	SOLD("entry");
+	lock_kernel();
+	if(fd >= NR_OPEN) goto out;
+
+	filp = current->files->fd[fd];
+	if(!filp) goto out;
+
+	ino = filp->f_dentry->d_inode;
+	if (!ino) goto out;
+
+	if (!S_ISSOCK(ino->i_mode) &&
+		(imajor(ino) != 30 || iminor(ino) != 1))
+		goto out;
+
+	ctlptr = A(arg1);
+	datptr = A(arg2);
+
+	error = -EFAULT;
+
+	if (ctlptr) {
+		if (copy_from_user(&ctl,ctlptr,sizeof(ctl)))
+			goto out;
+		if (ctl.len < 0 && flags) {
+			error = -EINVAL;
+			goto out;
+		}
+	} else {
+		ctl.len = 0;
+		ctl.buf = 0;
+	}
+
+	if (datptr) {
+		if (copy_from_user(&dat,datptr,sizeof(dat)))
+			goto out;
+	} else {
+		dat.len = 0;
+		dat.buf = 0;
+	}
+
+	error = timod_putmsg(fd,A(ctl.buf),ctl.len,
+				A(dat.buf),dat.len,flags);
+out:
+	unlock_kernel();
+	SOLD("done");
+	return error;
+}